Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-2.6-microblaze.git] / drivers / scsi / lpfc / lpfc_sli.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
38 #include <linux/crash_dump.h>
39 #ifdef CONFIG_X86
40 #include <asm/set_memory.h>
41 #endif
42
43 #include "lpfc_hw4.h"
44 #include "lpfc_hw.h"
45 #include "lpfc_sli.h"
46 #include "lpfc_sli4.h"
47 #include "lpfc_nl.h"
48 #include "lpfc_disc.h"
49 #include "lpfc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_compat.h"
55 #include "lpfc_debugfs.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_version.h"
58
59 /* There are only four IOCB completion types. */
60 typedef enum _lpfc_iocb_type {
61         LPFC_UNKNOWN_IOCB,
62         LPFC_UNSOL_IOCB,
63         LPFC_SOL_IOCB,
64         LPFC_ABORT_IOCB
65 } lpfc_iocb_type;
66
67
68 /* Provide function prototypes local to this module. */
69 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
70                                   uint32_t);
71 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
72                               uint8_t *, uint32_t *);
73 static struct lpfc_iocbq *
74 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
75                                   struct lpfc_iocbq *rspiocbq);
76 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
77                                       struct hbq_dmabuf *);
78 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
79                                           struct hbq_dmabuf *dmabuf);
80 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
81                                    struct lpfc_queue *cq, struct lpfc_cqe *cqe);
82 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
83                                        int);
84 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
85                                      struct lpfc_queue *eq,
86                                      struct lpfc_eqe *eqe);
87 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
88 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
89 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
90 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
91                                     struct lpfc_queue *cq,
92                                     struct lpfc_cqe *cqe);
93 static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
94                                  struct lpfc_iocbq *pwqeq,
95                                  struct lpfc_sglq *sglq);
96
97 union lpfc_wqe128 lpfc_iread_cmd_template;
98 union lpfc_wqe128 lpfc_iwrite_cmd_template;
99 union lpfc_wqe128 lpfc_icmnd_cmd_template;
100
101 /* Setup WQE templates for IOs */
102 void lpfc_wqe_cmd_template(void)
103 {
104         union lpfc_wqe128 *wqe;
105
106         /* IREAD template */
107         wqe = &lpfc_iread_cmd_template;
108         memset(wqe, 0, sizeof(union lpfc_wqe128));
109
110         /* Word 0, 1, 2 - BDE is variable */
111
112         /* Word 3 - cmd_buff_len, payload_offset_len is zero */
113
114         /* Word 4 - total_xfer_len is variable */
115
116         /* Word 5 - is zero */
117
118         /* Word 6 - ctxt_tag, xri_tag is variable */
119
120         /* Word 7 */
121         bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
122         bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
123         bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
124         bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
125
126         /* Word 8 - abort_tag is variable */
127
128         /* Word 9  - reqtag is variable */
129
130         /* Word 10 - dbde, wqes is variable */
131         bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
132         bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
133         bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
134         bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
135         bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
136
137         /* Word 11 - pbde is variable */
138         bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
139         bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
140         bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
141
142         /* Word 12 - is zero */
143
144         /* Word 13, 14, 15 - PBDE is variable */
145
146         /* IWRITE template */
147         wqe = &lpfc_iwrite_cmd_template;
148         memset(wqe, 0, sizeof(union lpfc_wqe128));
149
150         /* Word 0, 1, 2 - BDE is variable */
151
152         /* Word 3 - cmd_buff_len, payload_offset_len is zero */
153
154         /* Word 4 - total_xfer_len is variable */
155
156         /* Word 5 - initial_xfer_len is variable */
157
158         /* Word 6 - ctxt_tag, xri_tag is variable */
159
160         /* Word 7 */
161         bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
162         bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
163         bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
164         bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
165
166         /* Word 8 - abort_tag is variable */
167
168         /* Word 9  - reqtag is variable */
169
170         /* Word 10 - dbde, wqes is variable */
171         bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
172         bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
173         bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
174         bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
175         bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
176
177         /* Word 11 - pbde is variable */
178         bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
179         bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
180         bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
181
182         /* Word 12 - is zero */
183
184         /* Word 13, 14, 15 - PBDE is variable */
185
186         /* ICMND template */
187         wqe = &lpfc_icmnd_cmd_template;
188         memset(wqe, 0, sizeof(union lpfc_wqe128));
189
190         /* Word 0, 1, 2 - BDE is variable */
191
192         /* Word 3 - payload_offset_len is variable */
193
194         /* Word 4, 5 - is zero */
195
196         /* Word 6 - ctxt_tag, xri_tag is variable */
197
198         /* Word 7 */
199         bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
200         bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
201         bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
202         bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
203
204         /* Word 8 - abort_tag is variable */
205
206         /* Word 9  - reqtag is variable */
207
208         /* Word 10 - dbde, wqes is variable */
209         bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
210         bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
211         bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
212         bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
213         bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
214
215         /* Word 11 */
216         bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
217         bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
218         bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
219
220         /* Word 12, 13, 14, 15 - is zero */
221 }
222
223 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
224 /**
225  * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
226  * @srcp: Source memory pointer.
227  * @destp: Destination memory pointer.
228  * @cnt: Number of words required to be copied.
229  *       Must be a multiple of sizeof(uint64_t)
230  *
231  * This function is used for copying data between driver memory
232  * and the SLI WQ. This function also changes the endianness
233  * of each word if native endianness is different from SLI
234  * endianness. This function can be called with or without
235  * lock.
236  **/
237 static void
238 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
239 {
240         uint64_t *src = srcp;
241         uint64_t *dest = destp;
242         int i;
243
244         for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
245                 *dest++ = *src++;
246 }
247 #else
248 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
249 #endif
250
251 /**
252  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
253  * @q: The Work Queue to operate on.
254  * @wqe: The work Queue Entry to put on the Work queue.
255  *
256  * This routine will copy the contents of @wqe to the next available entry on
257  * the @q. This function will then ring the Work Queue Doorbell to signal the
258  * HBA to start processing the Work Queue Entry. This function returns 0 if
259  * successful. If no entries are available on @q then this function will return
260  * -ENOMEM.
261  * The caller is expected to hold the hbalock when calling this routine.
262  **/
263 static int
264 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
265 {
266         union lpfc_wqe *temp_wqe;
267         struct lpfc_register doorbell;
268         uint32_t host_index;
269         uint32_t idx;
270         uint32_t i = 0;
271         uint8_t *tmp;
272         u32 if_type;
273
274         /* sanity check on queue memory */
275         if (unlikely(!q))
276                 return -ENOMEM;
277
278         temp_wqe = lpfc_sli4_qe(q, q->host_index);
279
280         /* If the host has not yet processed the next entry then we are done */
281         idx = ((q->host_index + 1) % q->entry_count);
282         if (idx == q->hba_index) {
283                 q->WQ_overflow++;
284                 return -EBUSY;
285         }
286         q->WQ_posted++;
287         /* set consumption flag every once in a while */
288         if (!((q->host_index + 1) % q->notify_interval))
289                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
290         else
291                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
292         if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
293                 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
294         lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
295         if (q->dpp_enable && q->phba->cfg_enable_dpp) {
296                 /* write to DPP aperture taking advatage of Combined Writes */
297                 tmp = (uint8_t *)temp_wqe;
298 #ifdef __raw_writeq
299                 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
300                         __raw_writeq(*((uint64_t *)(tmp + i)),
301                                         q->dpp_regaddr + i);
302 #else
303                 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
304                         __raw_writel(*((uint32_t *)(tmp + i)),
305                                         q->dpp_regaddr + i);
306 #endif
307         }
308         /* ensure WQE bcopy and DPP flushed before doorbell write */
309         wmb();
310
311         /* Update the host index before invoking device */
312         host_index = q->host_index;
313
314         q->host_index = idx;
315
316         /* Ring Doorbell */
317         doorbell.word0 = 0;
318         if (q->db_format == LPFC_DB_LIST_FORMAT) {
319                 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
320                         bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
321                         bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
322                         bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
323                             q->dpp_id);
324                         bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
325                             q->queue_id);
326                 } else {
327                         bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
328                         bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
329
330                         /* Leave bits <23:16> clear for if_type 6 dpp */
331                         if_type = bf_get(lpfc_sli_intf_if_type,
332                                          &q->phba->sli4_hba.sli_intf);
333                         if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
334                                 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
335                                        host_index);
336                 }
337         } else if (q->db_format == LPFC_DB_RING_FORMAT) {
338                 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
339                 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
340         } else {
341                 return -EINVAL;
342         }
343         writel(doorbell.word0, q->db_regaddr);
344
345         return 0;
346 }
347
348 /**
349  * lpfc_sli4_wq_release - Updates internal hba index for WQ
350  * @q: The Work Queue to operate on.
351  * @index: The index to advance the hba index to.
352  *
353  * This routine will update the HBA index of a queue to reflect consumption of
354  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
355  * an entry the host calls this function to update the queue's internal
356  * pointers.
357  **/
358 static void
359 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
360 {
361         /* sanity check on queue memory */
362         if (unlikely(!q))
363                 return;
364
365         q->hba_index = index;
366 }
367
368 /**
369  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
370  * @q: The Mailbox Queue to operate on.
371  * @mqe: The Mailbox Queue Entry to put on the Work queue.
372  *
373  * This routine will copy the contents of @mqe to the next available entry on
374  * the @q. This function will then ring the Work Queue Doorbell to signal the
375  * HBA to start processing the Work Queue Entry. This function returns 0 if
376  * successful. If no entries are available on @q then this function will return
377  * -ENOMEM.
378  * The caller is expected to hold the hbalock when calling this routine.
379  **/
380 static uint32_t
381 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
382 {
383         struct lpfc_mqe *temp_mqe;
384         struct lpfc_register doorbell;
385
386         /* sanity check on queue memory */
387         if (unlikely(!q))
388                 return -ENOMEM;
389         temp_mqe = lpfc_sli4_qe(q, q->host_index);
390
391         /* If the host has not yet processed the next entry then we are done */
392         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
393                 return -ENOMEM;
394         lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
395         /* Save off the mailbox pointer for completion */
396         q->phba->mbox = (MAILBOX_t *)temp_mqe;
397
398         /* Update the host index before invoking device */
399         q->host_index = ((q->host_index + 1) % q->entry_count);
400
401         /* Ring Doorbell */
402         doorbell.word0 = 0;
403         bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
404         bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
405         writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
406         return 0;
407 }
408
409 /**
410  * lpfc_sli4_mq_release - Updates internal hba index for MQ
411  * @q: The Mailbox Queue to operate on.
412  *
413  * This routine will update the HBA index of a queue to reflect consumption of
414  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
415  * an entry the host calls this function to update the queue's internal
416  * pointers. This routine returns the number of entries that were consumed by
417  * the HBA.
418  **/
419 static uint32_t
420 lpfc_sli4_mq_release(struct lpfc_queue *q)
421 {
422         /* sanity check on queue memory */
423         if (unlikely(!q))
424                 return 0;
425
426         /* Clear the mailbox pointer for completion */
427         q->phba->mbox = NULL;
428         q->hba_index = ((q->hba_index + 1) % q->entry_count);
429         return 1;
430 }
431
432 /**
433  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
434  * @q: The Event Queue to get the first valid EQE from
435  *
436  * This routine will get the first valid Event Queue Entry from @q, update
437  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
438  * the Queue (no more work to do), or the Queue is full of EQEs that have been
439  * processed, but not popped back to the HBA then this routine will return NULL.
440  **/
441 static struct lpfc_eqe *
442 lpfc_sli4_eq_get(struct lpfc_queue *q)
443 {
444         struct lpfc_eqe *eqe;
445
446         /* sanity check on queue memory */
447         if (unlikely(!q))
448                 return NULL;
449         eqe = lpfc_sli4_qe(q, q->host_index);
450
451         /* If the next EQE is not valid then we are done */
452         if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
453                 return NULL;
454
455         /*
456          * insert barrier for instruction interlock : data from the hardware
457          * must have the valid bit checked before it can be copied and acted
458          * upon. Speculative instructions were allowing a bcopy at the start
459          * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
460          * after our return, to copy data before the valid bit check above
461          * was done. As such, some of the copied data was stale. The barrier
462          * ensures the check is before any data is copied.
463          */
464         mb();
465         return eqe;
466 }
467
468 /**
469  * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
470  * @q: The Event Queue to disable interrupts
471  *
472  **/
473 void
474 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
475 {
476         struct lpfc_register doorbell;
477
478         doorbell.word0 = 0;
479         bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
480         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
481         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
482                 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
483         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
484         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
485 }
486
487 /**
488  * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
489  * @q: The Event Queue to disable interrupts
490  *
491  **/
492 void
493 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
494 {
495         struct lpfc_register doorbell;
496
497         doorbell.word0 = 0;
498         bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
499         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
500 }
501
502 /**
503  * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
504  * @phba: adapter with EQ
505  * @q: The Event Queue that the host has completed processing for.
506  * @count: Number of elements that have been consumed
507  * @arm: Indicates whether the host wants to arms this CQ.
508  *
509  * This routine will notify the HBA, by ringing the doorbell, that count
510  * number of EQEs have been processed. The @arm parameter indicates whether
511  * the queue should be rearmed when ringing the doorbell.
512  **/
513 void
514 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
515                      uint32_t count, bool arm)
516 {
517         struct lpfc_register doorbell;
518
519         /* sanity check on queue memory */
520         if (unlikely(!q || (count == 0 && !arm)))
521                 return;
522
523         /* ring doorbell for number popped */
524         doorbell.word0 = 0;
525         if (arm) {
526                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
527                 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
528         }
529         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
530         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
531         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
532                         (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
533         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
534         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
535         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
536         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
537                 readl(q->phba->sli4_hba.EQDBregaddr);
538 }
539
540 /**
541  * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
542  * @phba: adapter with EQ
543  * @q: The Event Queue that the host has completed processing for.
544  * @count: Number of elements that have been consumed
545  * @arm: Indicates whether the host wants to arms this CQ.
546  *
547  * This routine will notify the HBA, by ringing the doorbell, that count
548  * number of EQEs have been processed. The @arm parameter indicates whether
549  * the queue should be rearmed when ringing the doorbell.
550  **/
551 void
552 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
553                           uint32_t count, bool arm)
554 {
555         struct lpfc_register doorbell;
556
557         /* sanity check on queue memory */
558         if (unlikely(!q || (count == 0 && !arm)))
559                 return;
560
561         /* ring doorbell for number popped */
562         doorbell.word0 = 0;
563         if (arm)
564                 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
565         bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
566         bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
567         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
568         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
569         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
570                 readl(q->phba->sli4_hba.EQDBregaddr);
571 }
572
573 static void
574 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
575                         struct lpfc_eqe *eqe)
576 {
577         if (!phba->sli4_hba.pc_sli4_params.eqav)
578                 bf_set_le32(lpfc_eqe_valid, eqe, 0);
579
580         eq->host_index = ((eq->host_index + 1) % eq->entry_count);
581
582         /* if the index wrapped around, toggle the valid bit */
583         if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
584                 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
585 }
586
587 static void
588 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
589 {
590         struct lpfc_eqe *eqe = NULL;
591         u32 eq_count = 0, cq_count = 0;
592         struct lpfc_cqe *cqe = NULL;
593         struct lpfc_queue *cq = NULL, *childq = NULL;
594         int cqid = 0;
595
596         /* walk all the EQ entries and drop on the floor */
597         eqe = lpfc_sli4_eq_get(eq);
598         while (eqe) {
599                 /* Get the reference to the corresponding CQ */
600                 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
601                 cq = NULL;
602
603                 list_for_each_entry(childq, &eq->child_list, list) {
604                         if (childq->queue_id == cqid) {
605                                 cq = childq;
606                                 break;
607                         }
608                 }
609                 /* If CQ is valid, iterate through it and drop all the CQEs */
610                 if (cq) {
611                         cqe = lpfc_sli4_cq_get(cq);
612                         while (cqe) {
613                                 __lpfc_sli4_consume_cqe(phba, cq, cqe);
614                                 cq_count++;
615                                 cqe = lpfc_sli4_cq_get(cq);
616                         }
617                         /* Clear and re-arm the CQ */
618                         phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
619                             LPFC_QUEUE_REARM);
620                         cq_count = 0;
621                 }
622                 __lpfc_sli4_consume_eqe(phba, eq, eqe);
623                 eq_count++;
624                 eqe = lpfc_sli4_eq_get(eq);
625         }
626
627         /* Clear and re-arm the EQ */
628         phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
629 }
630
631 static int
632 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
633                      uint8_t rearm)
634 {
635         struct lpfc_eqe *eqe;
636         int count = 0, consumed = 0;
637
638         if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
639                 goto rearm_and_exit;
640
641         eqe = lpfc_sli4_eq_get(eq);
642         while (eqe) {
643                 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
644                 __lpfc_sli4_consume_eqe(phba, eq, eqe);
645
646                 consumed++;
647                 if (!(++count % eq->max_proc_limit))
648                         break;
649
650                 if (!(count % eq->notify_interval)) {
651                         phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
652                                                         LPFC_QUEUE_NOARM);
653                         consumed = 0;
654                 }
655
656                 eqe = lpfc_sli4_eq_get(eq);
657         }
658         eq->EQ_processed += count;
659
660         /* Track the max number of EQEs processed in 1 intr */
661         if (count > eq->EQ_max_eqe)
662                 eq->EQ_max_eqe = count;
663
664         xchg(&eq->queue_claimed, 0);
665
666 rearm_and_exit:
667         /* Always clear the EQ. */
668         phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
669
670         return count;
671 }
672
673 /**
674  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
675  * @q: The Completion Queue to get the first valid CQE from
676  *
677  * This routine will get the first valid Completion Queue Entry from @q, update
678  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
679  * the Queue (no more work to do), or the Queue is full of CQEs that have been
680  * processed, but not popped back to the HBA then this routine will return NULL.
681  **/
682 static struct lpfc_cqe *
683 lpfc_sli4_cq_get(struct lpfc_queue *q)
684 {
685         struct lpfc_cqe *cqe;
686
687         /* sanity check on queue memory */
688         if (unlikely(!q))
689                 return NULL;
690         cqe = lpfc_sli4_qe(q, q->host_index);
691
692         /* If the next CQE is not valid then we are done */
693         if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
694                 return NULL;
695
696         /*
697          * insert barrier for instruction interlock : data from the hardware
698          * must have the valid bit checked before it can be copied and acted
699          * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
700          * instructions allowing action on content before valid bit checked,
701          * add barrier here as well. May not be needed as "content" is a
702          * single 32-bit entity here (vs multi word structure for cq's).
703          */
704         mb();
705         return cqe;
706 }
707
708 static void
709 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
710                         struct lpfc_cqe *cqe)
711 {
712         if (!phba->sli4_hba.pc_sli4_params.cqav)
713                 bf_set_le32(lpfc_cqe_valid, cqe, 0);
714
715         cq->host_index = ((cq->host_index + 1) % cq->entry_count);
716
717         /* if the index wrapped around, toggle the valid bit */
718         if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
719                 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
720 }
721
722 /**
723  * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
724  * @phba: the adapter with the CQ
725  * @q: The Completion Queue that the host has completed processing for.
726  * @count: the number of elements that were consumed
727  * @arm: Indicates whether the host wants to arms this CQ.
728  *
729  * This routine will notify the HBA, by ringing the doorbell, that the
730  * CQEs have been processed. The @arm parameter specifies whether the
731  * queue should be rearmed when ringing the doorbell.
732  **/
733 void
734 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
735                      uint32_t count, bool arm)
736 {
737         struct lpfc_register doorbell;
738
739         /* sanity check on queue memory */
740         if (unlikely(!q || (count == 0 && !arm)))
741                 return;
742
743         /* ring doorbell for number popped */
744         doorbell.word0 = 0;
745         if (arm)
746                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
747         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
748         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
749         bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
750                         (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
751         bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
752         writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
753 }
754
755 /**
756  * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
757  * @phba: the adapter with the CQ
758  * @q: The Completion Queue that the host has completed processing for.
759  * @count: the number of elements that were consumed
760  * @arm: Indicates whether the host wants to arms this CQ.
761  *
762  * This routine will notify the HBA, by ringing the doorbell, that the
763  * CQEs have been processed. The @arm parameter specifies whether the
764  * queue should be rearmed when ringing the doorbell.
765  **/
766 void
767 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
768                          uint32_t count, bool arm)
769 {
770         struct lpfc_register doorbell;
771
772         /* sanity check on queue memory */
773         if (unlikely(!q || (count == 0 && !arm)))
774                 return;
775
776         /* ring doorbell for number popped */
777         doorbell.word0 = 0;
778         if (arm)
779                 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
780         bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
781         bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
782         writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
783 }
784
785 /*
786  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
787  *
788  * This routine will copy the contents of @wqe to the next available entry on
789  * the @q. This function will then ring the Receive Queue Doorbell to signal the
790  * HBA to start processing the Receive Queue Entry. This function returns the
791  * index that the rqe was copied to if successful. If no entries are available
792  * on @q then this function will return -ENOMEM.
793  * The caller is expected to hold the hbalock when calling this routine.
794  **/
795 int
796 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
797                  struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
798 {
799         struct lpfc_rqe *temp_hrqe;
800         struct lpfc_rqe *temp_drqe;
801         struct lpfc_register doorbell;
802         int hq_put_index;
803         int dq_put_index;
804
805         /* sanity check on queue memory */
806         if (unlikely(!hq) || unlikely(!dq))
807                 return -ENOMEM;
808         hq_put_index = hq->host_index;
809         dq_put_index = dq->host_index;
810         temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
811         temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
812
813         if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
814                 return -EINVAL;
815         if (hq_put_index != dq_put_index)
816                 return -EINVAL;
817         /* If the host has not yet processed the next entry then we are done */
818         if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
819                 return -EBUSY;
820         lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
821         lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
822
823         /* Update the host index to point to the next slot */
824         hq->host_index = ((hq_put_index + 1) % hq->entry_count);
825         dq->host_index = ((dq_put_index + 1) % dq->entry_count);
826         hq->RQ_buf_posted++;
827
828         /* Ring The Header Receive Queue Doorbell */
829         if (!(hq->host_index % hq->notify_interval)) {
830                 doorbell.word0 = 0;
831                 if (hq->db_format == LPFC_DB_RING_FORMAT) {
832                         bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
833                                hq->notify_interval);
834                         bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
835                 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
836                         bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
837                                hq->notify_interval);
838                         bf_set(lpfc_rq_db_list_fm_index, &doorbell,
839                                hq->host_index);
840                         bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
841                 } else {
842                         return -EINVAL;
843                 }
844                 writel(doorbell.word0, hq->db_regaddr);
845         }
846         return hq_put_index;
847 }
848
849 /*
850  * lpfc_sli4_rq_release - Updates internal hba index for RQ
851  *
852  * This routine will update the HBA index of a queue to reflect consumption of
853  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
854  * consumed an entry the host calls this function to update the queue's
855  * internal pointers. This routine returns the number of entries that were
856  * consumed by the HBA.
857  **/
858 static uint32_t
859 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
860 {
861         /* sanity check on queue memory */
862         if (unlikely(!hq) || unlikely(!dq))
863                 return 0;
864
865         if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
866                 return 0;
867         hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
868         dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
869         return 1;
870 }
871
872 /**
873  * lpfc_cmd_iocb - Get next command iocb entry in the ring
874  * @phba: Pointer to HBA context object.
875  * @pring: Pointer to driver SLI ring object.
876  *
877  * This function returns pointer to next command iocb entry
878  * in the command ring. The caller must hold hbalock to prevent
879  * other threads consume the next command iocb.
880  * SLI-2/SLI-3 provide different sized iocbs.
881  **/
882 static inline IOCB_t *
883 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
884 {
885         return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
886                            pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
887 }
888
889 /**
890  * lpfc_resp_iocb - Get next response iocb entry in the ring
891  * @phba: Pointer to HBA context object.
892  * @pring: Pointer to driver SLI ring object.
893  *
894  * This function returns pointer to next response iocb entry
895  * in the response ring. The caller must hold hbalock to make sure
896  * that no other thread consume the next response iocb.
897  * SLI-2/SLI-3 provide different sized iocbs.
898  **/
899 static inline IOCB_t *
900 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
901 {
902         return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
903                            pring->sli.sli3.rspidx * phba->iocb_rsp_size);
904 }
905
906 /**
907  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
908  * @phba: Pointer to HBA context object.
909  *
910  * This function is called with hbalock held. This function
911  * allocates a new driver iocb object from the iocb pool. If the
912  * allocation is successful, it returns pointer to the newly
913  * allocated iocb object else it returns NULL.
914  **/
915 struct lpfc_iocbq *
916 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
917 {
918         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
919         struct lpfc_iocbq * iocbq = NULL;
920
921         lockdep_assert_held(&phba->hbalock);
922
923         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
924         if (iocbq)
925                 phba->iocb_cnt++;
926         if (phba->iocb_cnt > phba->iocb_max)
927                 phba->iocb_max = phba->iocb_cnt;
928         return iocbq;
929 }
930
931 /**
932  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
933  * @phba: Pointer to HBA context object.
934  * @xritag: XRI value.
935  *
936  * This function clears the sglq pointer from the array of active
937  * sglq's. The xritag that is passed in is used to index into the
938  * array. Before the xritag can be used it needs to be adjusted
939  * by subtracting the xribase.
940  *
941  * Returns sglq ponter = success, NULL = Failure.
942  **/
943 struct lpfc_sglq *
944 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
945 {
946         struct lpfc_sglq *sglq;
947
948         sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
949         phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
950         return sglq;
951 }
952
953 /**
954  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
955  * @phba: Pointer to HBA context object.
956  * @xritag: XRI value.
957  *
958  * This function returns the sglq pointer from the array of active
959  * sglq's. The xritag that is passed in is used to index into the
960  * array. Before the xritag can be used it needs to be adjusted
961  * by subtracting the xribase.
962  *
963  * Returns sglq ponter = success, NULL = Failure.
964  **/
965 struct lpfc_sglq *
966 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
967 {
968         struct lpfc_sglq *sglq;
969
970         sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
971         return sglq;
972 }
973
974 /**
975  * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
976  * @phba: Pointer to HBA context object.
977  * @xritag: xri used in this exchange.
978  * @rrq: The RRQ to be cleared.
979  *
980  **/
981 void
982 lpfc_clr_rrq_active(struct lpfc_hba *phba,
983                     uint16_t xritag,
984                     struct lpfc_node_rrq *rrq)
985 {
986         struct lpfc_nodelist *ndlp = NULL;
987
988         /* Lookup did to verify if did is still active on this vport */
989         if (rrq->vport)
990                 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
991
992         if (!ndlp)
993                 goto out;
994
995         if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
996                 rrq->send_rrq = 0;
997                 rrq->xritag = 0;
998                 rrq->rrq_stop_time = 0;
999         }
1000 out:
1001         mempool_free(rrq, phba->rrq_pool);
1002 }
1003
1004 /**
1005  * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1006  * @phba: Pointer to HBA context object.
1007  *
1008  * This function is called with hbalock held. This function
1009  * Checks if stop_time (ratov from setting rrq active) has
1010  * been reached, if it has and the send_rrq flag is set then
1011  * it will call lpfc_send_rrq. If the send_rrq flag is not set
1012  * then it will just call the routine to clear the rrq and
1013  * free the rrq resource.
1014  * The timer is set to the next rrq that is going to expire before
1015  * leaving the routine.
1016  *
1017  **/
1018 void
1019 lpfc_handle_rrq_active(struct lpfc_hba *phba)
1020 {
1021         struct lpfc_node_rrq *rrq;
1022         struct lpfc_node_rrq *nextrrq;
1023         unsigned long next_time;
1024         unsigned long iflags;
1025         LIST_HEAD(send_rrq);
1026
1027         spin_lock_irqsave(&phba->hbalock, iflags);
1028         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1029         next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1030         list_for_each_entry_safe(rrq, nextrrq,
1031                                  &phba->active_rrq_list, list) {
1032                 if (time_after(jiffies, rrq->rrq_stop_time))
1033                         list_move(&rrq->list, &send_rrq);
1034                 else if (time_before(rrq->rrq_stop_time, next_time))
1035                         next_time = rrq->rrq_stop_time;
1036         }
1037         spin_unlock_irqrestore(&phba->hbalock, iflags);
1038         if ((!list_empty(&phba->active_rrq_list)) &&
1039             (!(phba->pport->load_flag & FC_UNLOADING)))
1040                 mod_timer(&phba->rrq_tmr, next_time);
1041         list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1042                 list_del(&rrq->list);
1043                 if (!rrq->send_rrq) {
1044                         /* this call will free the rrq */
1045                         lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1046                 } else if (lpfc_send_rrq(phba, rrq)) {
1047                         /* if we send the rrq then the completion handler
1048                         *  will clear the bit in the xribitmap.
1049                         */
1050                         lpfc_clr_rrq_active(phba, rrq->xritag,
1051                                             rrq);
1052                 }
1053         }
1054 }
1055
1056 /**
1057  * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1058  * @vport: Pointer to vport context object.
1059  * @xri: The xri used in the exchange.
1060  * @did: The targets DID for this exchange.
1061  *
1062  * returns NULL = rrq not found in the phba->active_rrq_list.
1063  *         rrq = rrq for this xri and target.
1064  **/
1065 struct lpfc_node_rrq *
1066 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1067 {
1068         struct lpfc_hba *phba = vport->phba;
1069         struct lpfc_node_rrq *rrq;
1070         struct lpfc_node_rrq *nextrrq;
1071         unsigned long iflags;
1072
1073         if (phba->sli_rev != LPFC_SLI_REV4)
1074                 return NULL;
1075         spin_lock_irqsave(&phba->hbalock, iflags);
1076         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1077                 if (rrq->vport == vport && rrq->xritag == xri &&
1078                                 rrq->nlp_DID == did){
1079                         list_del(&rrq->list);
1080                         spin_unlock_irqrestore(&phba->hbalock, iflags);
1081                         return rrq;
1082                 }
1083         }
1084         spin_unlock_irqrestore(&phba->hbalock, iflags);
1085         return NULL;
1086 }
1087
1088 /**
1089  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1090  * @vport: Pointer to vport context object.
1091  * @ndlp: Pointer to the lpfc_node_list structure.
1092  * If ndlp is NULL Remove all active RRQs for this vport from the
1093  * phba->active_rrq_list and clear the rrq.
1094  * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1095  **/
1096 void
1097 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1098
1099 {
1100         struct lpfc_hba *phba = vport->phba;
1101         struct lpfc_node_rrq *rrq;
1102         struct lpfc_node_rrq *nextrrq;
1103         unsigned long iflags;
1104         LIST_HEAD(rrq_list);
1105
1106         if (phba->sli_rev != LPFC_SLI_REV4)
1107                 return;
1108         if (!ndlp) {
1109                 lpfc_sli4_vport_delete_els_xri_aborted(vport);
1110                 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1111         }
1112         spin_lock_irqsave(&phba->hbalock, iflags);
1113         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1114                 if (rrq->vport != vport)
1115                         continue;
1116
1117                 if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1118                         list_move(&rrq->list, &rrq_list);
1119
1120         }
1121         spin_unlock_irqrestore(&phba->hbalock, iflags);
1122
1123         list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1124                 list_del(&rrq->list);
1125                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1126         }
1127 }
1128
1129 /**
1130  * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1131  * @phba: Pointer to HBA context object.
1132  * @ndlp: Targets nodelist pointer for this exchange.
1133  * @xritag: the xri in the bitmap to test.
1134  *
1135  * This function returns:
1136  * 0 = rrq not active for this xri
1137  * 1 = rrq is valid for this xri.
1138  **/
1139 int
1140 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1141                         uint16_t  xritag)
1142 {
1143         if (!ndlp)
1144                 return 0;
1145         if (!ndlp->active_rrqs_xri_bitmap)
1146                 return 0;
1147         if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1148                 return 1;
1149         else
1150                 return 0;
1151 }
1152
1153 /**
1154  * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1155  * @phba: Pointer to HBA context object.
1156  * @ndlp: nodelist pointer for this target.
1157  * @xritag: xri used in this exchange.
1158  * @rxid: Remote Exchange ID.
1159  * @send_rrq: Flag used to determine if we should send rrq els cmd.
1160  *
1161  * This function takes the hbalock.
1162  * The active bit is always set in the active rrq xri_bitmap even
1163  * if there is no slot avaiable for the other rrq information.
1164  *
1165  * returns 0 rrq actived for this xri
1166  *         < 0 No memory or invalid ndlp.
1167  **/
1168 int
1169 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1170                     uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1171 {
1172         unsigned long iflags;
1173         struct lpfc_node_rrq *rrq;
1174         int empty;
1175
1176         if (!ndlp)
1177                 return -EINVAL;
1178
1179         if (!phba->cfg_enable_rrq)
1180                 return -EINVAL;
1181
1182         spin_lock_irqsave(&phba->hbalock, iflags);
1183         if (phba->pport->load_flag & FC_UNLOADING) {
1184                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1185                 goto out;
1186         }
1187
1188         if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1189                 goto out;
1190
1191         if (!ndlp->active_rrqs_xri_bitmap)
1192                 goto out;
1193
1194         if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1195                 goto out;
1196
1197         spin_unlock_irqrestore(&phba->hbalock, iflags);
1198         rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1199         if (!rrq) {
1200                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1201                                 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1202                                 " DID:0x%x Send:%d\n",
1203                                 xritag, rxid, ndlp->nlp_DID, send_rrq);
1204                 return -EINVAL;
1205         }
1206         if (phba->cfg_enable_rrq == 1)
1207                 rrq->send_rrq = send_rrq;
1208         else
1209                 rrq->send_rrq = 0;
1210         rrq->xritag = xritag;
1211         rrq->rrq_stop_time = jiffies +
1212                                 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1213         rrq->nlp_DID = ndlp->nlp_DID;
1214         rrq->vport = ndlp->vport;
1215         rrq->rxid = rxid;
1216         spin_lock_irqsave(&phba->hbalock, iflags);
1217         empty = list_empty(&phba->active_rrq_list);
1218         list_add_tail(&rrq->list, &phba->active_rrq_list);
1219         phba->hba_flag |= HBA_RRQ_ACTIVE;
1220         if (empty)
1221                 lpfc_worker_wake_up(phba);
1222         spin_unlock_irqrestore(&phba->hbalock, iflags);
1223         return 0;
1224 out:
1225         spin_unlock_irqrestore(&phba->hbalock, iflags);
1226         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1227                         "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1228                         " DID:0x%x Send:%d\n",
1229                         xritag, rxid, ndlp->nlp_DID, send_rrq);
1230         return -EINVAL;
1231 }
1232
1233 /**
1234  * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1235  * @phba: Pointer to HBA context object.
1236  * @piocbq: Pointer to the iocbq.
1237  *
1238  * The driver calls this function with either the nvme ls ring lock
1239  * or the fc els ring lock held depending on the iocb usage.  This function
1240  * gets a new driver sglq object from the sglq list. If the list is not empty
1241  * then it is successful, it returns pointer to the newly allocated sglq
1242  * object else it returns NULL.
1243  **/
1244 static struct lpfc_sglq *
1245 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1246 {
1247         struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1248         struct lpfc_sglq *sglq = NULL;
1249         struct lpfc_sglq *start_sglq = NULL;
1250         struct lpfc_io_buf *lpfc_cmd;
1251         struct lpfc_nodelist *ndlp;
1252         int found = 0;
1253         u8 cmnd;
1254
1255         cmnd = get_job_cmnd(phba, piocbq);
1256
1257         if (piocbq->cmd_flag &  LPFC_IO_FCP) {
1258                 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1259                 ndlp = lpfc_cmd->rdata->pnode;
1260         } else  if ((cmnd == CMD_GEN_REQUEST64_CR) &&
1261                         !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
1262                 ndlp = piocbq->context_un.ndlp;
1263         } else  if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
1264                 if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
1265                         ndlp = NULL;
1266                 else
1267                         ndlp = piocbq->context_un.ndlp;
1268         } else {
1269                 ndlp = piocbq->context1;
1270         }
1271
1272         spin_lock(&phba->sli4_hba.sgl_list_lock);
1273         list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1274         start_sglq = sglq;
1275         while (!found) {
1276                 if (!sglq)
1277                         break;
1278                 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1279                     test_bit(sglq->sli4_lxritag,
1280                     ndlp->active_rrqs_xri_bitmap)) {
1281                         /* This xri has an rrq outstanding for this DID.
1282                          * put it back in the list and get another xri.
1283                          */
1284                         list_add_tail(&sglq->list, lpfc_els_sgl_list);
1285                         sglq = NULL;
1286                         list_remove_head(lpfc_els_sgl_list, sglq,
1287                                                 struct lpfc_sglq, list);
1288                         if (sglq == start_sglq) {
1289                                 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1290                                 sglq = NULL;
1291                                 break;
1292                         } else
1293                                 continue;
1294                 }
1295                 sglq->ndlp = ndlp;
1296                 found = 1;
1297                 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1298                 sglq->state = SGL_ALLOCATED;
1299         }
1300         spin_unlock(&phba->sli4_hba.sgl_list_lock);
1301         return sglq;
1302 }
1303
1304 /**
1305  * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1306  * @phba: Pointer to HBA context object.
1307  * @piocbq: Pointer to the iocbq.
1308  *
1309  * This function is called with the sgl_list lock held. This function
1310  * gets a new driver sglq object from the sglq list. If the
1311  * list is not empty then it is successful, it returns pointer to the newly
1312  * allocated sglq object else it returns NULL.
1313  **/
1314 struct lpfc_sglq *
1315 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1316 {
1317         struct list_head *lpfc_nvmet_sgl_list;
1318         struct lpfc_sglq *sglq = NULL;
1319
1320         lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1321
1322         lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1323
1324         list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1325         if (!sglq)
1326                 return NULL;
1327         phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1328         sglq->state = SGL_ALLOCATED;
1329         return sglq;
1330 }
1331
1332 /**
1333  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1334  * @phba: Pointer to HBA context object.
1335  *
1336  * This function is called with no lock held. This function
1337  * allocates a new driver iocb object from the iocb pool. If the
1338  * allocation is successful, it returns pointer to the newly
1339  * allocated iocb object else it returns NULL.
1340  **/
1341 struct lpfc_iocbq *
1342 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1343 {
1344         struct lpfc_iocbq * iocbq = NULL;
1345         unsigned long iflags;
1346
1347         spin_lock_irqsave(&phba->hbalock, iflags);
1348         iocbq = __lpfc_sli_get_iocbq(phba);
1349         spin_unlock_irqrestore(&phba->hbalock, iflags);
1350         return iocbq;
1351 }
1352
1353 /**
1354  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1355  * @phba: Pointer to HBA context object.
1356  * @iocbq: Pointer to driver iocb object.
1357  *
1358  * This function is called to release the driver iocb object
1359  * to the iocb pool. The iotag in the iocb object
1360  * does not change for each use of the iocb object. This function
1361  * clears all other fields of the iocb object when it is freed.
1362  * The sqlq structure that holds the xritag and phys and virtual
1363  * mappings for the scatter gather list is retrieved from the
1364  * active array of sglq. The get of the sglq pointer also clears
1365  * the entry in the array. If the status of the IO indiactes that
1366  * this IO was aborted then the sglq entry it put on the
1367  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1368  * IO has good status or fails for any other reason then the sglq
1369  * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1370  *  asserted held in the code path calling this routine.
1371  **/
1372 static void
1373 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1374 {
1375         struct lpfc_sglq *sglq;
1376         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1377         unsigned long iflag = 0;
1378         struct lpfc_sli_ring *pring;
1379
1380         if (iocbq->sli4_xritag == NO_XRI)
1381                 sglq = NULL;
1382         else
1383                 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1384
1385
1386         if (sglq)  {
1387                 if (iocbq->cmd_flag & LPFC_IO_NVMET) {
1388                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1389                                           iflag);
1390                         sglq->state = SGL_FREED;
1391                         sglq->ndlp = NULL;
1392                         list_add_tail(&sglq->list,
1393                                       &phba->sli4_hba.lpfc_nvmet_sgl_list);
1394                         spin_unlock_irqrestore(
1395                                 &phba->sli4_hba.sgl_list_lock, iflag);
1396                         goto out;
1397                 }
1398
1399                 if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
1400                     (!(unlikely(pci_channel_offline(phba->pcidev)))) &&
1401                     sglq->state != SGL_XRI_ABORTED) {
1402                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1403                                           iflag);
1404
1405                         /* Check if we can get a reference on ndlp */
1406                         if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1407                                 sglq->ndlp = NULL;
1408
1409                         list_add(&sglq->list,
1410                                  &phba->sli4_hba.lpfc_abts_els_sgl_list);
1411                         spin_unlock_irqrestore(
1412                                 &phba->sli4_hba.sgl_list_lock, iflag);
1413                 } else {
1414                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1415                                           iflag);
1416                         sglq->state = SGL_FREED;
1417                         sglq->ndlp = NULL;
1418                         list_add_tail(&sglq->list,
1419                                       &phba->sli4_hba.lpfc_els_sgl_list);
1420                         spin_unlock_irqrestore(
1421                                 &phba->sli4_hba.sgl_list_lock, iflag);
1422                         pring = lpfc_phba_elsring(phba);
1423                         /* Check if TXQ queue needs to be serviced */
1424                         if (pring && (!list_empty(&pring->txq)))
1425                                 lpfc_worker_wake_up(phba);
1426                 }
1427         }
1428
1429 out:
1430         /*
1431          * Clean all volatile data fields, preserve iotag and node struct.
1432          */
1433         memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1434         iocbq->sli4_lxritag = NO_XRI;
1435         iocbq->sli4_xritag = NO_XRI;
1436         iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
1437                               LPFC_IO_NVME_LS);
1438         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1439 }
1440
1441
1442 /**
1443  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1444  * @phba: Pointer to HBA context object.
1445  * @iocbq: Pointer to driver iocb object.
1446  *
1447  * This function is called to release the driver iocb object to the
1448  * iocb pool. The iotag in the iocb object does not change for each
1449  * use of the iocb object. This function clears all other fields of
1450  * the iocb object when it is freed. The hbalock is asserted held in
1451  * the code path calling this routine.
1452  **/
1453 static void
1454 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1455 {
1456         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1457
1458         /*
1459          * Clean all volatile data fields, preserve iotag and node struct.
1460          */
1461         memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1462         iocbq->sli4_xritag = NO_XRI;
1463         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1464 }
1465
1466 /**
1467  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1468  * @phba: Pointer to HBA context object.
1469  * @iocbq: Pointer to driver iocb object.
1470  *
1471  * This function is called with hbalock held to release driver
1472  * iocb object to the iocb pool. The iotag in the iocb object
1473  * does not change for each use of the iocb object. This function
1474  * clears all other fields of the iocb object when it is freed.
1475  **/
1476 static void
1477 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1478 {
1479         lockdep_assert_held(&phba->hbalock);
1480
1481         phba->__lpfc_sli_release_iocbq(phba, iocbq);
1482         phba->iocb_cnt--;
1483 }
1484
1485 /**
1486  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1487  * @phba: Pointer to HBA context object.
1488  * @iocbq: Pointer to driver iocb object.
1489  *
1490  * This function is called with no lock held to release the iocb to
1491  * iocb pool.
1492  **/
1493 void
1494 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1495 {
1496         unsigned long iflags;
1497
1498         /*
1499          * Clean all volatile data fields, preserve iotag and node struct.
1500          */
1501         spin_lock_irqsave(&phba->hbalock, iflags);
1502         __lpfc_sli_release_iocbq(phba, iocbq);
1503         spin_unlock_irqrestore(&phba->hbalock, iflags);
1504 }
1505
1506 /**
1507  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1508  * @phba: Pointer to HBA context object.
1509  * @iocblist: List of IOCBs.
1510  * @ulpstatus: ULP status in IOCB command field.
1511  * @ulpWord4: ULP word-4 in IOCB command field.
1512  *
1513  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1514  * on the list by invoking the complete callback function associated with the
1515  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1516  * fields.
1517  **/
1518 void
1519 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1520                       uint32_t ulpstatus, uint32_t ulpWord4)
1521 {
1522         struct lpfc_iocbq *piocb;
1523
1524         while (!list_empty(iocblist)) {
1525                 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1526                 if (piocb->cmd_cmpl) {
1527                         if (piocb->cmd_flag & LPFC_IO_NVME) {
1528                                 lpfc_nvme_cancel_iocb(phba, piocb,
1529                                                       ulpstatus, ulpWord4);
1530                         } else {
1531                                 if (phba->sli_rev == LPFC_SLI_REV4) {
1532                                         bf_set(lpfc_wcqe_c_status,
1533                                                &piocb->wcqe_cmpl, ulpstatus);
1534                                         piocb->wcqe_cmpl.parameter = ulpWord4;
1535                                 } else {
1536                                         piocb->iocb.ulpStatus = ulpstatus;
1537                                         piocb->iocb.un.ulpWord[4] = ulpWord4;
1538                                 }
1539                                 (piocb->cmd_cmpl) (phba, piocb, piocb);
1540                         }
1541                 } else {
1542                         lpfc_sli_release_iocbq(phba, piocb);
1543                 }
1544         }
1545         return;
1546 }
1547
1548 /**
1549  * lpfc_sli_iocb_cmd_type - Get the iocb type
1550  * @iocb_cmnd: iocb command code.
1551  *
1552  * This function is called by ring event handler function to get the iocb type.
1553  * This function translates the iocb command to an iocb command type used to
1554  * decide the final disposition of each completed IOCB.
1555  * The function returns
1556  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1557  * LPFC_SOL_IOCB     if it is a solicited iocb completion
1558  * LPFC_ABORT_IOCB   if it is an abort iocb
1559  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1560  *
1561  * The caller is not required to hold any lock.
1562  **/
1563 static lpfc_iocb_type
1564 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1565 {
1566         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1567
1568         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1569                 return 0;
1570
1571         switch (iocb_cmnd) {
1572         case CMD_XMIT_SEQUENCE_CR:
1573         case CMD_XMIT_SEQUENCE_CX:
1574         case CMD_XMIT_BCAST_CN:
1575         case CMD_XMIT_BCAST_CX:
1576         case CMD_ELS_REQUEST_CR:
1577         case CMD_ELS_REQUEST_CX:
1578         case CMD_CREATE_XRI_CR:
1579         case CMD_CREATE_XRI_CX:
1580         case CMD_GET_RPI_CN:
1581         case CMD_XMIT_ELS_RSP_CX:
1582         case CMD_GET_RPI_CR:
1583         case CMD_FCP_IWRITE_CR:
1584         case CMD_FCP_IWRITE_CX:
1585         case CMD_FCP_IREAD_CR:
1586         case CMD_FCP_IREAD_CX:
1587         case CMD_FCP_ICMND_CR:
1588         case CMD_FCP_ICMND_CX:
1589         case CMD_FCP_TSEND_CX:
1590         case CMD_FCP_TRSP_CX:
1591         case CMD_FCP_TRECEIVE_CX:
1592         case CMD_FCP_AUTO_TRSP_CX:
1593         case CMD_ADAPTER_MSG:
1594         case CMD_ADAPTER_DUMP:
1595         case CMD_XMIT_SEQUENCE64_CR:
1596         case CMD_XMIT_SEQUENCE64_CX:
1597         case CMD_XMIT_BCAST64_CN:
1598         case CMD_XMIT_BCAST64_CX:
1599         case CMD_ELS_REQUEST64_CR:
1600         case CMD_ELS_REQUEST64_CX:
1601         case CMD_FCP_IWRITE64_CR:
1602         case CMD_FCP_IWRITE64_CX:
1603         case CMD_FCP_IREAD64_CR:
1604         case CMD_FCP_IREAD64_CX:
1605         case CMD_FCP_ICMND64_CR:
1606         case CMD_FCP_ICMND64_CX:
1607         case CMD_FCP_TSEND64_CX:
1608         case CMD_FCP_TRSP64_CX:
1609         case CMD_FCP_TRECEIVE64_CX:
1610         case CMD_GEN_REQUEST64_CR:
1611         case CMD_GEN_REQUEST64_CX:
1612         case CMD_XMIT_ELS_RSP64_CX:
1613         case DSSCMD_IWRITE64_CR:
1614         case DSSCMD_IWRITE64_CX:
1615         case DSSCMD_IREAD64_CR:
1616         case DSSCMD_IREAD64_CX:
1617         case CMD_SEND_FRAME:
1618                 type = LPFC_SOL_IOCB;
1619                 break;
1620         case CMD_ABORT_XRI_CN:
1621         case CMD_ABORT_XRI_CX:
1622         case CMD_CLOSE_XRI_CN:
1623         case CMD_CLOSE_XRI_CX:
1624         case CMD_XRI_ABORTED_CX:
1625         case CMD_ABORT_MXRI64_CN:
1626         case CMD_XMIT_BLS_RSP64_CX:
1627                 type = LPFC_ABORT_IOCB;
1628                 break;
1629         case CMD_RCV_SEQUENCE_CX:
1630         case CMD_RCV_ELS_REQ_CX:
1631         case CMD_RCV_SEQUENCE64_CX:
1632         case CMD_RCV_ELS_REQ64_CX:
1633         case CMD_ASYNC_STATUS:
1634         case CMD_IOCB_RCV_SEQ64_CX:
1635         case CMD_IOCB_RCV_ELS64_CX:
1636         case CMD_IOCB_RCV_CONT64_CX:
1637         case CMD_IOCB_RET_XRI64_CX:
1638                 type = LPFC_UNSOL_IOCB;
1639                 break;
1640         case CMD_IOCB_XMIT_MSEQ64_CR:
1641         case CMD_IOCB_XMIT_MSEQ64_CX:
1642         case CMD_IOCB_RCV_SEQ_LIST64_CX:
1643         case CMD_IOCB_RCV_ELS_LIST64_CX:
1644         case CMD_IOCB_CLOSE_EXTENDED_CN:
1645         case CMD_IOCB_ABORT_EXTENDED_CN:
1646         case CMD_IOCB_RET_HBQE64_CN:
1647         case CMD_IOCB_FCP_IBIDIR64_CR:
1648         case CMD_IOCB_FCP_IBIDIR64_CX:
1649         case CMD_IOCB_FCP_ITASKMGT64_CX:
1650         case CMD_IOCB_LOGENTRY_CN:
1651         case CMD_IOCB_LOGENTRY_ASYNC_CN:
1652                 printk("%s - Unhandled SLI-3 Command x%x\n",
1653                                 __func__, iocb_cmnd);
1654                 type = LPFC_UNKNOWN_IOCB;
1655                 break;
1656         default:
1657                 type = LPFC_UNKNOWN_IOCB;
1658                 break;
1659         }
1660
1661         return type;
1662 }
1663
1664 /**
1665  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1666  * @phba: Pointer to HBA context object.
1667  *
1668  * This function is called from SLI initialization code
1669  * to configure every ring of the HBA's SLI interface. The
1670  * caller is not required to hold any lock. This function issues
1671  * a config_ring mailbox command for each ring.
1672  * This function returns zero if successful else returns a negative
1673  * error code.
1674  **/
1675 static int
1676 lpfc_sli_ring_map(struct lpfc_hba *phba)
1677 {
1678         struct lpfc_sli *psli = &phba->sli;
1679         LPFC_MBOXQ_t *pmb;
1680         MAILBOX_t *pmbox;
1681         int i, rc, ret = 0;
1682
1683         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1684         if (!pmb)
1685                 return -ENOMEM;
1686         pmbox = &pmb->u.mb;
1687         phba->link_state = LPFC_INIT_MBX_CMDS;
1688         for (i = 0; i < psli->num_rings; i++) {
1689                 lpfc_config_ring(phba, i, pmb);
1690                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1691                 if (rc != MBX_SUCCESS) {
1692                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1693                                         "0446 Adapter failed to init (%d), "
1694                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1695                                         "ring %d\n",
1696                                         rc, pmbox->mbxCommand,
1697                                         pmbox->mbxStatus, i);
1698                         phba->link_state = LPFC_HBA_ERROR;
1699                         ret = -ENXIO;
1700                         break;
1701                 }
1702         }
1703         mempool_free(pmb, phba->mbox_mem_pool);
1704         return ret;
1705 }
1706
1707 /**
1708  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1709  * @phba: Pointer to HBA context object.
1710  * @pring: Pointer to driver SLI ring object.
1711  * @piocb: Pointer to the driver iocb object.
1712  *
1713  * The driver calls this function with the hbalock held for SLI3 ports or
1714  * the ring lock held for SLI4 ports. The function adds the
1715  * new iocb to txcmplq of the given ring. This function always returns
1716  * 0. If this function is called for ELS ring, this function checks if
1717  * there is a vport associated with the ELS command. This function also
1718  * starts els_tmofunc timer if this is an ELS command.
1719  **/
1720 static int
1721 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1722                         struct lpfc_iocbq *piocb)
1723 {
1724         u32 ulp_command = 0;
1725
1726         BUG_ON(!piocb);
1727         ulp_command = get_job_cmnd(phba, piocb);
1728
1729         list_add_tail(&piocb->list, &pring->txcmplq);
1730         piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
1731         pring->txcmplq_cnt++;
1732         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1733            (ulp_command != CMD_ABORT_XRI_WQE) &&
1734            (ulp_command != CMD_ABORT_XRI_CN) &&
1735            (ulp_command != CMD_CLOSE_XRI_CN)) {
1736                 BUG_ON(!piocb->vport);
1737                 if (!(piocb->vport->load_flag & FC_UNLOADING))
1738                         mod_timer(&piocb->vport->els_tmofunc,
1739                                   jiffies +
1740                                   msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1741         }
1742
1743         return 0;
1744 }
1745
1746 /**
1747  * lpfc_sli_ringtx_get - Get first element of the txq
1748  * @phba: Pointer to HBA context object.
1749  * @pring: Pointer to driver SLI ring object.
1750  *
1751  * This function is called with hbalock held to get next
1752  * iocb in txq of the given ring. If there is any iocb in
1753  * the txq, the function returns first iocb in the list after
1754  * removing the iocb from the list, else it returns NULL.
1755  **/
1756 struct lpfc_iocbq *
1757 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1758 {
1759         struct lpfc_iocbq *cmd_iocb;
1760
1761         lockdep_assert_held(&phba->hbalock);
1762
1763         list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1764         return cmd_iocb;
1765 }
1766
1767 /**
1768  * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
1769  * @phba: Pointer to HBA context object.
1770  * @cmdiocb: Pointer to driver command iocb object.
1771  * @rspiocb: Pointer to driver response iocb object.
1772  *
1773  * This routine will inform the driver of any BW adjustments we need
1774  * to make. These changes will be picked up during the next CMF
1775  * timer interrupt. In addition, any BW changes will be logged
1776  * with LOG_CGN_MGMT.
1777  **/
1778 static void
1779 lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1780                    struct lpfc_iocbq *rspiocb)
1781 {
1782         union lpfc_wqe128 *wqe;
1783         uint32_t status, info;
1784         struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
1785         uint64_t bw, bwdif, slop;
1786         uint64_t pcent, bwpcent;
1787         int asig, afpin, sigcnt, fpincnt;
1788         int wsigmax, wfpinmax, cg, tdp;
1789         char *s;
1790
1791         /* First check for error */
1792         status = bf_get(lpfc_wcqe_c_status, wcqe);
1793         if (status) {
1794                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1795                                 "6211 CMF_SYNC_WQE Error "
1796                                 "req_tag x%x status x%x hwstatus x%x "
1797                                 "tdatap x%x parm x%x\n",
1798                                 bf_get(lpfc_wcqe_c_request_tag, wcqe),
1799                                 bf_get(lpfc_wcqe_c_status, wcqe),
1800                                 bf_get(lpfc_wcqe_c_hw_status, wcqe),
1801                                 wcqe->total_data_placed,
1802                                 wcqe->parameter);
1803                 goto out;
1804         }
1805
1806         /* Gather congestion information on a successful cmpl */
1807         info = wcqe->parameter;
1808         phba->cmf_active_info = info;
1809
1810         /* See if firmware info count is valid or has changed */
1811         if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
1812                 info = 0;
1813         else
1814                 phba->cmf_info_per_interval = info;
1815
1816         tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
1817         cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
1818
1819         /* Get BW requirement from firmware */
1820         bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
1821         if (!bw) {
1822                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1823                                 "6212 CMF_SYNC_WQE x%x: NULL bw\n",
1824                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
1825                 goto out;
1826         }
1827
1828         /* Gather information needed for logging if a BW change is required */
1829         wqe = &cmdiocb->wqe;
1830         asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
1831         afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
1832         fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
1833         sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
1834         if (phba->cmf_max_bytes_per_interval != bw ||
1835             (asig || afpin || sigcnt || fpincnt)) {
1836                 /* Are we increasing or decreasing BW */
1837                 if (phba->cmf_max_bytes_per_interval <  bw) {
1838                         bwdif = bw - phba->cmf_max_bytes_per_interval;
1839                         s = "Increase";
1840                 } else {
1841                         bwdif = phba->cmf_max_bytes_per_interval - bw;
1842                         s = "Decrease";
1843                 }
1844
1845                 /* What is the change percentage */
1846                 slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/
1847                 pcent = div64_u64(bwdif * 100 + slop,
1848                                   phba->cmf_link_byte_count);
1849                 bwpcent = div64_u64(bw * 100 + slop,
1850                                     phba->cmf_link_byte_count);
1851                 if (asig) {
1852                         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1853                                         "6237 BW Threshold %lld%% (%lld): "
1854                                         "%lld%% %s: Signal Alarm: cg:%d "
1855                                         "Info:%u\n",
1856                                         bwpcent, bw, pcent, s, cg,
1857                                         phba->cmf_active_info);
1858                 } else if (afpin) {
1859                         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1860                                         "6238 BW Threshold %lld%% (%lld): "
1861                                         "%lld%% %s: FPIN Alarm: cg:%d "
1862                                         "Info:%u\n",
1863                                         bwpcent, bw, pcent, s, cg,
1864                                         phba->cmf_active_info);
1865                 } else if (sigcnt) {
1866                         wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
1867                         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1868                                         "6239 BW Threshold %lld%% (%lld): "
1869                                         "%lld%% %s: Signal Warning: "
1870                                         "Cnt %d Max %d: cg:%d Info:%u\n",
1871                                         bwpcent, bw, pcent, s, sigcnt,
1872                                         wsigmax, cg, phba->cmf_active_info);
1873                 } else if (fpincnt) {
1874                         wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
1875                         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1876                                         "6240 BW Threshold %lld%% (%lld): "
1877                                         "%lld%% %s: FPIN Warning: "
1878                                         "Cnt %d Max %d: cg:%d Info:%u\n",
1879                                         bwpcent, bw, pcent, s, fpincnt,
1880                                         wfpinmax, cg, phba->cmf_active_info);
1881                 } else {
1882                         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1883                                         "6241 BW Threshold %lld%% (%lld): "
1884                                         "CMF %lld%% %s: cg:%d Info:%u\n",
1885                                         bwpcent, bw, pcent, s, cg,
1886                                         phba->cmf_active_info);
1887                 }
1888         } else if (info) {
1889                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1890                                 "6246 Info Threshold %u\n", info);
1891         }
1892
1893         /* Save BW change to be picked up during next timer interrupt */
1894         phba->cmf_last_sync_bw = bw;
1895 out:
1896         lpfc_sli_release_iocbq(phba, cmdiocb);
1897 }
1898
1899 /**
1900  * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE
1901  * @phba: Pointer to HBA context object.
1902  * @ms:   ms to set in WQE interval, 0 means use init op
1903  * @total: Total rcv bytes for this interval
1904  *
1905  * This routine is called every CMF timer interrupt. Its purpose is
1906  * to issue a CMF_SYNC_WQE to the firmware to inform it of any events
1907  * that may indicate we have congestion (FPINs or Signals). Upon
1908  * completion, the firmware will indicate any BW restrictions the
1909  * driver may need to take.
1910  **/
1911 int
1912 lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
1913 {
1914         union lpfc_wqe128 *wqe;
1915         struct lpfc_iocbq *sync_buf;
1916         unsigned long iflags;
1917         u32 ret_val;
1918         u32 atot, wtot, max;
1919
1920         /* First address any alarm / warning activity */
1921         atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
1922         wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
1923
1924         /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
1925         if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
1926             phba->link_state == LPFC_LINK_DOWN)
1927                 return 0;
1928
1929         spin_lock_irqsave(&phba->hbalock, iflags);
1930         sync_buf = __lpfc_sli_get_iocbq(phba);
1931         if (!sync_buf) {
1932                 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
1933                                 "6213 No available WQEs for CMF_SYNC_WQE\n");
1934                 ret_val = ENOMEM;
1935                 goto out_unlock;
1936         }
1937
1938         wqe = &sync_buf->wqe;
1939
1940         /* WQEs are reused.  Clear stale data and set key fields to zero */
1941         memset(wqe, 0, sizeof(*wqe));
1942
1943         /* If this is the very first CMF_SYNC_WQE, issue an init operation */
1944         if (!ms) {
1945                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1946                                 "6441 CMF Init %d - CMF_SYNC_WQE\n",
1947                                 phba->fc_eventTag);
1948                 bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */
1949                 bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
1950                 goto initpath;
1951         }
1952
1953         bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */
1954         bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
1955
1956         /* Check for alarms / warnings */
1957         if (atot) {
1958                 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1959                         /* We hit an Signal alarm condition */
1960                         bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
1961                 } else {
1962                         /* We hit a FPIN alarm condition */
1963                         bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
1964                 }
1965         } else if (wtot) {
1966                 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
1967                     phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1968                         /* We hit an Signal warning condition */
1969                         max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency *
1970                                 lpfc_acqe_cgn_frequency;
1971                         bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
1972                         bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
1973                 } else {
1974                         /* We hit a FPIN warning condition */
1975                         bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
1976                         bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
1977                 }
1978         }
1979
1980         /* Update total read blocks during previous timer interval */
1981         wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
1982
1983 initpath:
1984         bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
1985         wqe->cmf_sync.event_tag = phba->fc_eventTag;
1986         bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
1987
1988         /* Setup reqtag to match the wqe completion. */
1989         bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
1990
1991         bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
1992
1993         bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
1994         bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
1995         bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
1996
1997         sync_buf->vport = phba->pport;
1998         sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
1999         sync_buf->context1 = NULL;
2000         sync_buf->context2 = NULL;
2001         sync_buf->context3 = NULL;
2002         sync_buf->sli4_xritag = NO_XRI;
2003
2004         sync_buf->cmd_flag |= LPFC_IO_CMF;
2005         ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
2006         if (ret_val)
2007                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
2008                                 "6214 Cannot issue CMF_SYNC_WQE: x%x\n",
2009                                 ret_val);
2010 out_unlock:
2011         spin_unlock_irqrestore(&phba->hbalock, iflags);
2012         return ret_val;
2013 }
2014
2015 /**
2016  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
2017  * @phba: Pointer to HBA context object.
2018  * @pring: Pointer to driver SLI ring object.
2019  *
2020  * This function is called with hbalock held and the caller must post the
2021  * iocb without releasing the lock. If the caller releases the lock,
2022  * iocb slot returned by the function is not guaranteed to be available.
2023  * The function returns pointer to the next available iocb slot if there
2024  * is available slot in the ring, else it returns NULL.
2025  * If the get index of the ring is ahead of the put index, the function
2026  * will post an error attention event to the worker thread to take the
2027  * HBA to offline state.
2028  **/
2029 static IOCB_t *
2030 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2031 {
2032         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2033         uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
2034
2035         lockdep_assert_held(&phba->hbalock);
2036
2037         if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
2038            (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
2039                 pring->sli.sli3.next_cmdidx = 0;
2040
2041         if (unlikely(pring->sli.sli3.local_getidx ==
2042                 pring->sli.sli3.next_cmdidx)) {
2043
2044                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
2045
2046                 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
2047                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2048                                         "0315 Ring %d issue: portCmdGet %d "
2049                                         "is bigger than cmd ring %d\n",
2050                                         pring->ringno,
2051                                         pring->sli.sli3.local_getidx,
2052                                         max_cmd_idx);
2053
2054                         phba->link_state = LPFC_HBA_ERROR;
2055                         /*
2056                          * All error attention handlers are posted to
2057                          * worker thread
2058                          */
2059                         phba->work_ha |= HA_ERATT;
2060                         phba->work_hs = HS_FFER3;
2061
2062                         lpfc_worker_wake_up(phba);
2063
2064                         return NULL;
2065                 }
2066
2067                 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
2068                         return NULL;
2069         }
2070
2071         return lpfc_cmd_iocb(phba, pring);
2072 }
2073
2074 /**
2075  * lpfc_sli_next_iotag - Get an iotag for the iocb
2076  * @phba: Pointer to HBA context object.
2077  * @iocbq: Pointer to driver iocb object.
2078  *
2079  * This function gets an iotag for the iocb. If there is no unused iotag and
2080  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
2081  * array and assigns a new iotag.
2082  * The function returns the allocated iotag if successful, else returns zero.
2083  * Zero is not a valid iotag.
2084  * The caller is not required to hold any lock.
2085  **/
2086 uint16_t
2087 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
2088 {
2089         struct lpfc_iocbq **new_arr;
2090         struct lpfc_iocbq **old_arr;
2091         size_t new_len;
2092         struct lpfc_sli *psli = &phba->sli;
2093         uint16_t iotag;
2094
2095         spin_lock_irq(&phba->hbalock);
2096         iotag = psli->last_iotag;
2097         if(++iotag < psli->iocbq_lookup_len) {
2098                 psli->last_iotag = iotag;
2099                 psli->iocbq_lookup[iotag] = iocbq;
2100                 spin_unlock_irq(&phba->hbalock);
2101                 iocbq->iotag = iotag;
2102                 return iotag;
2103         } else if (psli->iocbq_lookup_len < (0xffff
2104                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
2105                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2106                 spin_unlock_irq(&phba->hbalock);
2107                 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
2108                                   GFP_KERNEL);
2109                 if (new_arr) {
2110                         spin_lock_irq(&phba->hbalock);
2111                         old_arr = psli->iocbq_lookup;
2112                         if (new_len <= psli->iocbq_lookup_len) {
2113                                 /* highly unprobable case */
2114                                 kfree(new_arr);
2115                                 iotag = psli->last_iotag;
2116                                 if(++iotag < psli->iocbq_lookup_len) {
2117                                         psli->last_iotag = iotag;
2118                                         psli->iocbq_lookup[iotag] = iocbq;
2119                                         spin_unlock_irq(&phba->hbalock);
2120                                         iocbq->iotag = iotag;
2121                                         return iotag;
2122                                 }
2123                                 spin_unlock_irq(&phba->hbalock);
2124                                 return 0;
2125                         }
2126                         if (psli->iocbq_lookup)
2127                                 memcpy(new_arr, old_arr,
2128                                        ((psli->last_iotag  + 1) *
2129                                         sizeof (struct lpfc_iocbq *)));
2130                         psli->iocbq_lookup = new_arr;
2131                         psli->iocbq_lookup_len = new_len;
2132                         psli->last_iotag = iotag;
2133                         psli->iocbq_lookup[iotag] = iocbq;
2134                         spin_unlock_irq(&phba->hbalock);
2135                         iocbq->iotag = iotag;
2136                         kfree(old_arr);
2137                         return iotag;
2138                 }
2139         } else
2140                 spin_unlock_irq(&phba->hbalock);
2141
2142         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2143                         "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
2144                         psli->last_iotag);
2145
2146         return 0;
2147 }
2148
2149 /**
2150  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
2151  * @phba: Pointer to HBA context object.
2152  * @pring: Pointer to driver SLI ring object.
2153  * @iocb: Pointer to iocb slot in the ring.
2154  * @nextiocb: Pointer to driver iocb object which need to be
2155  *            posted to firmware.
2156  *
2157  * This function is called to post a new iocb to the firmware. This
2158  * function copies the new iocb to ring iocb slot and updates the
2159  * ring pointers. It adds the new iocb to txcmplq if there is
2160  * a completion call back for this iocb else the function will free the
2161  * iocb object.  The hbalock is asserted held in the code path calling
2162  * this routine.
2163  **/
2164 static void
2165 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2166                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
2167 {
2168         /*
2169          * Set up an iotag
2170          */
2171         nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
2172
2173
2174         if (pring->ringno == LPFC_ELS_RING) {
2175                 lpfc_debugfs_slow_ring_trc(phba,
2176                         "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
2177                         *(((uint32_t *) &nextiocb->iocb) + 4),
2178                         *(((uint32_t *) &nextiocb->iocb) + 6),
2179                         *(((uint32_t *) &nextiocb->iocb) + 7));
2180         }
2181
2182         /*
2183          * Issue iocb command to adapter
2184          */
2185         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
2186         wmb();
2187         pring->stats.iocb_cmd++;
2188
2189         /*
2190          * If there is no completion routine to call, we can release the
2191          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
2192          * that have no rsp ring completion, cmd_cmpl MUST be NULL.
2193          */
2194         if (nextiocb->cmd_cmpl)
2195                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
2196         else
2197                 __lpfc_sli_release_iocbq(phba, nextiocb);
2198
2199         /*
2200          * Let the HBA know what IOCB slot will be the next one the
2201          * driver will put a command into.
2202          */
2203         pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
2204         writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
2205 }
2206
2207 /**
2208  * lpfc_sli_update_full_ring - Update the chip attention register
2209  * @phba: Pointer to HBA context object.
2210  * @pring: Pointer to driver SLI ring object.
2211  *
2212  * The caller is not required to hold any lock for calling this function.
2213  * This function updates the chip attention bits for the ring to inform firmware
2214  * that there are pending work to be done for this ring and requests an
2215  * interrupt when there is space available in the ring. This function is
2216  * called when the driver is unable to post more iocbs to the ring due
2217  * to unavailability of space in the ring.
2218  **/
2219 static void
2220 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2221 {
2222         int ringno = pring->ringno;
2223
2224         pring->flag |= LPFC_CALL_RING_AVAILABLE;
2225
2226         wmb();
2227
2228         /*
2229          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
2230          * The HBA will tell us when an IOCB entry is available.
2231          */
2232         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
2233         readl(phba->CAregaddr); /* flush */
2234
2235         pring->stats.iocb_cmd_full++;
2236 }
2237
2238 /**
2239  * lpfc_sli_update_ring - Update chip attention register
2240  * @phba: Pointer to HBA context object.
2241  * @pring: Pointer to driver SLI ring object.
2242  *
2243  * This function updates the chip attention register bit for the
2244  * given ring to inform HBA that there is more work to be done
2245  * in this ring. The caller is not required to hold any lock.
2246  **/
2247 static void
2248 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2249 {
2250         int ringno = pring->ringno;
2251
2252         /*
2253          * Tell the HBA that there is work to do in this ring.
2254          */
2255         if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2256                 wmb();
2257                 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2258                 readl(phba->CAregaddr); /* flush */
2259         }
2260 }
2261
2262 /**
2263  * lpfc_sli_resume_iocb - Process iocbs in the txq
2264  * @phba: Pointer to HBA context object.
2265  * @pring: Pointer to driver SLI ring object.
2266  *
2267  * This function is called with hbalock held to post pending iocbs
2268  * in the txq to the firmware. This function is called when driver
2269  * detects space available in the ring.
2270  **/
2271 static void
2272 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2273 {
2274         IOCB_t *iocb;
2275         struct lpfc_iocbq *nextiocb;
2276
2277         lockdep_assert_held(&phba->hbalock);
2278
2279         /*
2280          * Check to see if:
2281          *  (a) there is anything on the txq to send
2282          *  (b) link is up
2283          *  (c) link attention events can be processed (fcp ring only)
2284          *  (d) IOCB processing is not blocked by the outstanding mbox command.
2285          */
2286
2287         if (lpfc_is_link_up(phba) &&
2288             (!list_empty(&pring->txq)) &&
2289             (pring->ringno != LPFC_FCP_RING ||
2290              phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2291
2292                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2293                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2294                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2295
2296                 if (iocb)
2297                         lpfc_sli_update_ring(phba, pring);
2298                 else
2299                         lpfc_sli_update_full_ring(phba, pring);
2300         }
2301
2302         return;
2303 }
2304
2305 /**
2306  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
2307  * @phba: Pointer to HBA context object.
2308  * @hbqno: HBQ number.
2309  *
2310  * This function is called with hbalock held to get the next
2311  * available slot for the given HBQ. If there is free slot
2312  * available for the HBQ it will return pointer to the next available
2313  * HBQ entry else it will return NULL.
2314  **/
2315 static struct lpfc_hbq_entry *
2316 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2317 {
2318         struct hbq_s *hbqp = &phba->hbqs[hbqno];
2319
2320         lockdep_assert_held(&phba->hbalock);
2321
2322         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2323             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2324                 hbqp->next_hbqPutIdx = 0;
2325
2326         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2327                 uint32_t raw_index = phba->hbq_get[hbqno];
2328                 uint32_t getidx = le32_to_cpu(raw_index);
2329
2330                 hbqp->local_hbqGetIdx = getidx;
2331
2332                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2333                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2334                                         "1802 HBQ %d: local_hbqGetIdx "
2335                                         "%u is > than hbqp->entry_count %u\n",
2336                                         hbqno, hbqp->local_hbqGetIdx,
2337                                         hbqp->entry_count);
2338
2339                         phba->link_state = LPFC_HBA_ERROR;
2340                         return NULL;
2341                 }
2342
2343                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2344                         return NULL;
2345         }
2346
2347         return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2348                         hbqp->hbqPutIdx;
2349 }
2350
2351 /**
2352  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
2353  * @phba: Pointer to HBA context object.
2354  *
2355  * This function is called with no lock held to free all the
2356  * hbq buffers while uninitializing the SLI interface. It also
2357  * frees the HBQ buffers returned by the firmware but not yet
2358  * processed by the upper layers.
2359  **/
2360 void
2361 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2362 {
2363         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2364         struct hbq_dmabuf *hbq_buf;
2365         unsigned long flags;
2366         int i, hbq_count;
2367
2368         hbq_count = lpfc_sli_hbq_count();
2369         /* Return all memory used by all HBQs */
2370         spin_lock_irqsave(&phba->hbalock, flags);
2371         for (i = 0; i < hbq_count; ++i) {
2372                 list_for_each_entry_safe(dmabuf, next_dmabuf,
2373                                 &phba->hbqs[i].hbq_buffer_list, list) {
2374                         hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2375                         list_del(&hbq_buf->dbuf.list);
2376                         (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2377                 }
2378                 phba->hbqs[i].buffer_count = 0;
2379         }
2380
2381         /* Mark the HBQs not in use */
2382         phba->hbq_in_use = 0;
2383         spin_unlock_irqrestore(&phba->hbalock, flags);
2384 }
2385
2386 /**
2387  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2388  * @phba: Pointer to HBA context object.
2389  * @hbqno: HBQ number.
2390  * @hbq_buf: Pointer to HBQ buffer.
2391  *
2392  * This function is called with the hbalock held to post a
2393  * hbq buffer to the firmware. If the function finds an empty
2394  * slot in the HBQ, it will post the buffer. The function will return
2395  * pointer to the hbq entry if it successfully post the buffer
2396  * else it will return NULL.
2397  **/
2398 static int
2399 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2400                          struct hbq_dmabuf *hbq_buf)
2401 {
2402         lockdep_assert_held(&phba->hbalock);
2403         return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2404 }
2405
2406 /**
2407  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2408  * @phba: Pointer to HBA context object.
2409  * @hbqno: HBQ number.
2410  * @hbq_buf: Pointer to HBQ buffer.
2411  *
2412  * This function is called with the hbalock held to post a hbq buffer to the
2413  * firmware. If the function finds an empty slot in the HBQ, it will post the
2414  * buffer and place it on the hbq_buffer_list. The function will return zero if
2415  * it successfully post the buffer else it will return an error.
2416  **/
2417 static int
2418 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2419                             struct hbq_dmabuf *hbq_buf)
2420 {
2421         struct lpfc_hbq_entry *hbqe;
2422         dma_addr_t physaddr = hbq_buf->dbuf.phys;
2423
2424         lockdep_assert_held(&phba->hbalock);
2425         /* Get next HBQ entry slot to use */
2426         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2427         if (hbqe) {
2428                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2429
2430                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2431                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
2432                 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2433                 hbqe->bde.tus.f.bdeFlags = 0;
2434                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2435                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2436                                 /* Sync SLIM */
2437                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2438                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2439                                 /* flush */
2440                 readl(phba->hbq_put + hbqno);
2441                 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2442                 return 0;
2443         } else
2444                 return -ENOMEM;
2445 }
2446
2447 /**
2448  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2449  * @phba: Pointer to HBA context object.
2450  * @hbqno: HBQ number.
2451  * @hbq_buf: Pointer to HBQ buffer.
2452  *
2453  * This function is called with the hbalock held to post an RQE to the SLI4
2454  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2455  * the hbq_buffer_list and return zero, otherwise it will return an error.
2456  **/
2457 static int
2458 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2459                             struct hbq_dmabuf *hbq_buf)
2460 {
2461         int rc;
2462         struct lpfc_rqe hrqe;
2463         struct lpfc_rqe drqe;
2464         struct lpfc_queue *hrq;
2465         struct lpfc_queue *drq;
2466
2467         if (hbqno != LPFC_ELS_HBQ)
2468                 return 1;
2469         hrq = phba->sli4_hba.hdr_rq;
2470         drq = phba->sli4_hba.dat_rq;
2471
2472         lockdep_assert_held(&phba->hbalock);
2473         hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2474         hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2475         drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2476         drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2477         rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2478         if (rc < 0)
2479                 return rc;
2480         hbq_buf->tag = (rc | (hbqno << 16));
2481         list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2482         return 0;
2483 }
2484
2485 /* HBQ for ELS and CT traffic. */
2486 static struct lpfc_hbq_init lpfc_els_hbq = {
2487         .rn = 1,
2488         .entry_count = 256,
2489         .mask_count = 0,
2490         .profile = 0,
2491         .ring_mask = (1 << LPFC_ELS_RING),
2492         .buffer_count = 0,
2493         .init_count = 40,
2494         .add_count = 40,
2495 };
2496
2497 /* Array of HBQs */
2498 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2499         &lpfc_els_hbq,
2500 };
2501
2502 /**
2503  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2504  * @phba: Pointer to HBA context object.
2505  * @hbqno: HBQ number.
2506  * @count: Number of HBQ buffers to be posted.
2507  *
2508  * This function is called with no lock held to post more hbq buffers to the
2509  * given HBQ. The function returns the number of HBQ buffers successfully
2510  * posted.
2511  **/
2512 static int
2513 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2514 {
2515         uint32_t i, posted = 0;
2516         unsigned long flags;
2517         struct hbq_dmabuf *hbq_buffer;
2518         LIST_HEAD(hbq_buf_list);
2519         if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2520                 return 0;
2521
2522         if ((phba->hbqs[hbqno].buffer_count + count) >
2523             lpfc_hbq_defs[hbqno]->entry_count)
2524                 count = lpfc_hbq_defs[hbqno]->entry_count -
2525                                         phba->hbqs[hbqno].buffer_count;
2526         if (!count)
2527                 return 0;
2528         /* Allocate HBQ entries */
2529         for (i = 0; i < count; i++) {
2530                 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2531                 if (!hbq_buffer)
2532                         break;
2533                 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2534         }
2535         /* Check whether HBQ is still in use */
2536         spin_lock_irqsave(&phba->hbalock, flags);
2537         if (!phba->hbq_in_use)
2538                 goto err;
2539         while (!list_empty(&hbq_buf_list)) {
2540                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2541                                  dbuf.list);
2542                 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2543                                       (hbqno << 16));
2544                 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2545                         phba->hbqs[hbqno].buffer_count++;
2546                         posted++;
2547                 } else
2548                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2549         }
2550         spin_unlock_irqrestore(&phba->hbalock, flags);
2551         return posted;
2552 err:
2553         spin_unlock_irqrestore(&phba->hbalock, flags);
2554         while (!list_empty(&hbq_buf_list)) {
2555                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2556                                  dbuf.list);
2557                 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2558         }
2559         return 0;
2560 }
2561
2562 /**
2563  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2564  * @phba: Pointer to HBA context object.
2565  * @qno: HBQ number.
2566  *
2567  * This function posts more buffers to the HBQ. This function
2568  * is called with no lock held. The function returns the number of HBQ entries
2569  * successfully allocated.
2570  **/
2571 int
2572 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2573 {
2574         if (phba->sli_rev == LPFC_SLI_REV4)
2575                 return 0;
2576         else
2577                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2578                                          lpfc_hbq_defs[qno]->add_count);
2579 }
2580
2581 /**
2582  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2583  * @phba: Pointer to HBA context object.
2584  * @qno:  HBQ queue number.
2585  *
2586  * This function is called from SLI initialization code path with
2587  * no lock held to post initial HBQ buffers to firmware. The
2588  * function returns the number of HBQ entries successfully allocated.
2589  **/
2590 static int
2591 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2592 {
2593         if (phba->sli_rev == LPFC_SLI_REV4)
2594                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2595                                         lpfc_hbq_defs[qno]->entry_count);
2596         else
2597                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2598                                          lpfc_hbq_defs[qno]->init_count);
2599 }
2600
2601 /*
2602  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2603  *
2604  * This function removes the first hbq buffer on an hbq list and returns a
2605  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2606  **/
2607 static struct hbq_dmabuf *
2608 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2609 {
2610         struct lpfc_dmabuf *d_buf;
2611
2612         list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2613         if (!d_buf)
2614                 return NULL;
2615         return container_of(d_buf, struct hbq_dmabuf, dbuf);
2616 }
2617
2618 /**
2619  * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2620  * @phba: Pointer to HBA context object.
2621  * @hrq: HBQ number.
2622  *
2623  * This function removes the first RQ buffer on an RQ buffer list and returns a
2624  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2625  **/
2626 static struct rqb_dmabuf *
2627 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2628 {
2629         struct lpfc_dmabuf *h_buf;
2630         struct lpfc_rqb *rqbp;
2631
2632         rqbp = hrq->rqbp;
2633         list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2634                          struct lpfc_dmabuf, list);
2635         if (!h_buf)
2636                 return NULL;
2637         rqbp->buffer_count--;
2638         return container_of(h_buf, struct rqb_dmabuf, hbuf);
2639 }
2640
2641 /**
2642  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2643  * @phba: Pointer to HBA context object.
2644  * @tag: Tag of the hbq buffer.
2645  *
2646  * This function searches for the hbq buffer associated with the given tag in
2647  * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2648  * otherwise it returns NULL.
2649  **/
2650 static struct hbq_dmabuf *
2651 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2652 {
2653         struct lpfc_dmabuf *d_buf;
2654         struct hbq_dmabuf *hbq_buf;
2655         uint32_t hbqno;
2656
2657         hbqno = tag >> 16;
2658         if (hbqno >= LPFC_MAX_HBQS)
2659                 return NULL;
2660
2661         spin_lock_irq(&phba->hbalock);
2662         list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2663                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2664                 if (hbq_buf->tag == tag) {
2665                         spin_unlock_irq(&phba->hbalock);
2666                         return hbq_buf;
2667                 }
2668         }
2669         spin_unlock_irq(&phba->hbalock);
2670         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2671                         "1803 Bad hbq tag. Data: x%x x%x\n",
2672                         tag, phba->hbqs[tag >> 16].buffer_count);
2673         return NULL;
2674 }
2675
2676 /**
2677  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2678  * @phba: Pointer to HBA context object.
2679  * @hbq_buffer: Pointer to HBQ buffer.
2680  *
2681  * This function is called with hbalock. This function gives back
2682  * the hbq buffer to firmware. If the HBQ does not have space to
2683  * post the buffer, it will free the buffer.
2684  **/
2685 void
2686 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2687 {
2688         uint32_t hbqno;
2689
2690         if (hbq_buffer) {
2691                 hbqno = hbq_buffer->tag >> 16;
2692                 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2693                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2694         }
2695 }
2696
2697 /**
2698  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2699  * @mbxCommand: mailbox command code.
2700  *
2701  * This function is called by the mailbox event handler function to verify
2702  * that the completed mailbox command is a legitimate mailbox command. If the
2703  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2704  * and the mailbox event handler will take the HBA offline.
2705  **/
2706 static int
2707 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2708 {
2709         uint8_t ret;
2710
2711         switch (mbxCommand) {
2712         case MBX_LOAD_SM:
2713         case MBX_READ_NV:
2714         case MBX_WRITE_NV:
2715         case MBX_WRITE_VPARMS:
2716         case MBX_RUN_BIU_DIAG:
2717         case MBX_INIT_LINK:
2718         case MBX_DOWN_LINK:
2719         case MBX_CONFIG_LINK:
2720         case MBX_CONFIG_RING:
2721         case MBX_RESET_RING:
2722         case MBX_READ_CONFIG:
2723         case MBX_READ_RCONFIG:
2724         case MBX_READ_SPARM:
2725         case MBX_READ_STATUS:
2726         case MBX_READ_RPI:
2727         case MBX_READ_XRI:
2728         case MBX_READ_REV:
2729         case MBX_READ_LNK_STAT:
2730         case MBX_REG_LOGIN:
2731         case MBX_UNREG_LOGIN:
2732         case MBX_CLEAR_LA:
2733         case MBX_DUMP_MEMORY:
2734         case MBX_DUMP_CONTEXT:
2735         case MBX_RUN_DIAGS:
2736         case MBX_RESTART:
2737         case MBX_UPDATE_CFG:
2738         case MBX_DOWN_LOAD:
2739         case MBX_DEL_LD_ENTRY:
2740         case MBX_RUN_PROGRAM:
2741         case MBX_SET_MASK:
2742         case MBX_SET_VARIABLE:
2743         case MBX_UNREG_D_ID:
2744         case MBX_KILL_BOARD:
2745         case MBX_CONFIG_FARP:
2746         case MBX_BEACON:
2747         case MBX_LOAD_AREA:
2748         case MBX_RUN_BIU_DIAG64:
2749         case MBX_CONFIG_PORT:
2750         case MBX_READ_SPARM64:
2751         case MBX_READ_RPI64:
2752         case MBX_REG_LOGIN64:
2753         case MBX_READ_TOPOLOGY:
2754         case MBX_WRITE_WWN:
2755         case MBX_SET_DEBUG:
2756         case MBX_LOAD_EXP_ROM:
2757         case MBX_ASYNCEVT_ENABLE:
2758         case MBX_REG_VPI:
2759         case MBX_UNREG_VPI:
2760         case MBX_HEARTBEAT:
2761         case MBX_PORT_CAPABILITIES:
2762         case MBX_PORT_IOV_CONTROL:
2763         case MBX_SLI4_CONFIG:
2764         case MBX_SLI4_REQ_FTRS:
2765         case MBX_REG_FCFI:
2766         case MBX_UNREG_FCFI:
2767         case MBX_REG_VFI:
2768         case MBX_UNREG_VFI:
2769         case MBX_INIT_VPI:
2770         case MBX_INIT_VFI:
2771         case MBX_RESUME_RPI:
2772         case MBX_READ_EVENT_LOG_STATUS:
2773         case MBX_READ_EVENT_LOG:
2774         case MBX_SECURITY_MGMT:
2775         case MBX_AUTH_PORT:
2776         case MBX_ACCESS_VDATA:
2777                 ret = mbxCommand;
2778                 break;
2779         default:
2780                 ret = MBX_SHUTDOWN;
2781                 break;
2782         }
2783         return ret;
2784 }
2785
2786 /**
2787  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2788  * @phba: Pointer to HBA context object.
2789  * @pmboxq: Pointer to mailbox command.
2790  *
2791  * This is completion handler function for mailbox commands issued from
2792  * lpfc_sli_issue_mbox_wait function. This function is called by the
2793  * mailbox event handler function with no lock held. This function
2794  * will wake up thread waiting on the wait queue pointed by context1
2795  * of the mailbox.
2796  **/
2797 void
2798 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2799 {
2800         unsigned long drvr_flag;
2801         struct completion *pmbox_done;
2802
2803         /*
2804          * If pmbox_done is empty, the driver thread gave up waiting and
2805          * continued running.
2806          */
2807         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2808         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2809         pmbox_done = (struct completion *)pmboxq->context3;
2810         if (pmbox_done)
2811                 complete(pmbox_done);
2812         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2813         return;
2814 }
2815
2816 static void
2817 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2818 {
2819         unsigned long iflags;
2820
2821         if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2822                 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2823                 spin_lock_irqsave(&ndlp->lock, iflags);
2824                 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2825                 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2826                 spin_unlock_irqrestore(&ndlp->lock, iflags);
2827         }
2828         ndlp->nlp_flag &= ~NLP_UNREG_INP;
2829 }
2830
2831 /**
2832  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2833  * @phba: Pointer to HBA context object.
2834  * @pmb: Pointer to mailbox object.
2835  *
2836  * This function is the default mailbox completion handler. It
2837  * frees the memory resources associated with the completed mailbox
2838  * command. If the completed command is a REG_LOGIN mailbox command,
2839  * this function will issue a UREG_LOGIN to re-claim the RPI.
2840  **/
2841 void
2842 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2843 {
2844         struct lpfc_vport  *vport = pmb->vport;
2845         struct lpfc_dmabuf *mp;
2846         struct lpfc_nodelist *ndlp;
2847         struct Scsi_Host *shost;
2848         uint16_t rpi, vpi;
2849         int rc;
2850
2851         mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2852
2853         if (mp) {
2854                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2855                 kfree(mp);
2856         }
2857
2858         /*
2859          * If a REG_LOGIN succeeded  after node is destroyed or node
2860          * is in re-discovery driver need to cleanup the RPI.
2861          */
2862         if (!(phba->pport->load_flag & FC_UNLOADING) &&
2863             pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2864             !pmb->u.mb.mbxStatus) {
2865                 rpi = pmb->u.mb.un.varWords[0];
2866                 vpi = pmb->u.mb.un.varRegLogin.vpi;
2867                 if (phba->sli_rev == LPFC_SLI_REV4)
2868                         vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2869                 lpfc_unreg_login(phba, vpi, rpi, pmb);
2870                 pmb->vport = vport;
2871                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2872                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2873                 if (rc != MBX_NOT_FINISHED)
2874                         return;
2875         }
2876
2877         if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2878                 !(phba->pport->load_flag & FC_UNLOADING) &&
2879                 !pmb->u.mb.mbxStatus) {
2880                 shost = lpfc_shost_from_vport(vport);
2881                 spin_lock_irq(shost->host_lock);
2882                 vport->vpi_state |= LPFC_VPI_REGISTERED;
2883                 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2884                 spin_unlock_irq(shost->host_lock);
2885         }
2886
2887         if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2888                 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2889                 lpfc_nlp_put(ndlp);
2890                 pmb->ctx_buf = NULL;
2891                 pmb->ctx_ndlp = NULL;
2892         }
2893
2894         if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2895                 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2896
2897                 /* Check to see if there are any deferred events to process */
2898                 if (ndlp) {
2899                         lpfc_printf_vlog(
2900                                 vport,
2901                                 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2902                                 "1438 UNREG cmpl deferred mbox x%x "
2903                                 "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
2904                                 ndlp->nlp_rpi, ndlp->nlp_DID,
2905                                 ndlp->nlp_flag, ndlp->nlp_defer_did,
2906                                 ndlp, vport->load_flag, kref_read(&ndlp->kref));
2907
2908                         if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2909                             (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2910                                 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2911                                 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2912                                 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2913                         } else {
2914                                 __lpfc_sli_rpi_release(vport, ndlp);
2915                         }
2916
2917                         /* The unreg_login mailbox is complete and had a
2918                          * reference that has to be released.  The PLOGI
2919                          * got its own ref.
2920                          */
2921                         lpfc_nlp_put(ndlp);
2922                         pmb->ctx_ndlp = NULL;
2923                 }
2924         }
2925
2926         /* This nlp_put pairs with lpfc_sli4_resume_rpi */
2927         if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2928                 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2929                 lpfc_nlp_put(ndlp);
2930         }
2931
2932         /* Check security permission status on INIT_LINK mailbox command */
2933         if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2934             (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2935                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2936                                 "2860 SLI authentication is required "
2937                                 "for INIT_LINK but has not done yet\n");
2938
2939         if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2940                 lpfc_sli4_mbox_cmd_free(phba, pmb);
2941         else
2942                 mempool_free(pmb, phba->mbox_mem_pool);
2943 }
2944  /**
2945  * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2946  * @phba: Pointer to HBA context object.
2947  * @pmb: Pointer to mailbox object.
2948  *
2949  * This function is the unreg rpi mailbox completion handler. It
2950  * frees the memory resources associated with the completed mailbox
2951  * command. An additional reference is put on the ndlp to prevent
2952  * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2953  * the unreg mailbox command completes, this routine puts the
2954  * reference back.
2955  *
2956  **/
2957 void
2958 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2959 {
2960         struct lpfc_vport  *vport = pmb->vport;
2961         struct lpfc_nodelist *ndlp;
2962
2963         ndlp = pmb->ctx_ndlp;
2964         if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2965                 if (phba->sli_rev == LPFC_SLI_REV4 &&
2966                     (bf_get(lpfc_sli_intf_if_type,
2967                      &phba->sli4_hba.sli_intf) >=
2968                      LPFC_SLI_INTF_IF_TYPE_2)) {
2969                         if (ndlp) {
2970                                 lpfc_printf_vlog(
2971                                          vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2972                                          "0010 UNREG_LOGIN vpi:%x "
2973                                          "rpi:%x DID:%x defer x%x flg x%x "
2974                                          "x%px\n",
2975                                          vport->vpi, ndlp->nlp_rpi,
2976                                          ndlp->nlp_DID, ndlp->nlp_defer_did,
2977                                          ndlp->nlp_flag,
2978                                          ndlp);
2979                                 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2980
2981                                 /* Check to see if there are any deferred
2982                                  * events to process
2983                                  */
2984                                 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2985                                     (ndlp->nlp_defer_did !=
2986                                     NLP_EVT_NOTHING_PENDING)) {
2987                                         lpfc_printf_vlog(
2988                                                 vport, KERN_INFO, LOG_DISCOVERY,
2989                                                 "4111 UNREG cmpl deferred "
2990                                                 "clr x%x on "
2991                                                 "NPort x%x Data: x%x x%px\n",
2992                                                 ndlp->nlp_rpi, ndlp->nlp_DID,
2993                                                 ndlp->nlp_defer_did, ndlp);
2994                                         ndlp->nlp_flag &= ~NLP_UNREG_INP;
2995                                         ndlp->nlp_defer_did =
2996                                                 NLP_EVT_NOTHING_PENDING;
2997                                         lpfc_issue_els_plogi(
2998                                                 vport, ndlp->nlp_DID, 0);
2999                                 } else {
3000                                         __lpfc_sli_rpi_release(vport, ndlp);
3001                                 }
3002                                 lpfc_nlp_put(ndlp);
3003                         }
3004                 }
3005         }
3006
3007         mempool_free(pmb, phba->mbox_mem_pool);
3008 }
3009
3010 /**
3011  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
3012  * @phba: Pointer to HBA context object.
3013  *
3014  * This function is called with no lock held. This function processes all
3015  * the completed mailbox commands and gives it to upper layers. The interrupt
3016  * service routine processes mailbox completion interrupt and adds completed
3017  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
3018  * Worker thread call lpfc_sli_handle_mb_event, which will return the
3019  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
3020  * function returns the mailbox commands to the upper layer by calling the
3021  * completion handler function of each mailbox.
3022  **/
3023 int
3024 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
3025 {
3026         MAILBOX_t *pmbox;
3027         LPFC_MBOXQ_t *pmb;
3028         int rc;
3029         LIST_HEAD(cmplq);
3030
3031         phba->sli.slistat.mbox_event++;
3032
3033         /* Get all completed mailboxe buffers into the cmplq */
3034         spin_lock_irq(&phba->hbalock);
3035         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
3036         spin_unlock_irq(&phba->hbalock);
3037
3038         /* Get a Mailbox buffer to setup mailbox commands for callback */
3039         do {
3040                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
3041                 if (pmb == NULL)
3042                         break;
3043
3044                 pmbox = &pmb->u.mb;
3045
3046                 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
3047                         if (pmb->vport) {
3048                                 lpfc_debugfs_disc_trc(pmb->vport,
3049                                         LPFC_DISC_TRC_MBOX_VPORT,
3050                                         "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
3051                                         (uint32_t)pmbox->mbxCommand,
3052                                         pmbox->un.varWords[0],
3053                                         pmbox->un.varWords[1]);
3054                         }
3055                         else {
3056                                 lpfc_debugfs_disc_trc(phba->pport,
3057                                         LPFC_DISC_TRC_MBOX,
3058                                         "MBOX cmpl:       cmd:x%x mb:x%x x%x",
3059                                         (uint32_t)pmbox->mbxCommand,
3060                                         pmbox->un.varWords[0],
3061                                         pmbox->un.varWords[1]);
3062                         }
3063                 }
3064
3065                 /*
3066                  * It is a fatal error if unknown mbox command completion.
3067                  */
3068                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
3069                     MBX_SHUTDOWN) {
3070                         /* Unknown mailbox command compl */
3071                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3072                                         "(%d):0323 Unknown Mailbox command "
3073                                         "x%x (x%x/x%x) Cmpl\n",
3074                                         pmb->vport ? pmb->vport->vpi :
3075                                         LPFC_VPORT_UNKNOWN,
3076                                         pmbox->mbxCommand,
3077                                         lpfc_sli_config_mbox_subsys_get(phba,
3078                                                                         pmb),
3079                                         lpfc_sli_config_mbox_opcode_get(phba,
3080                                                                         pmb));
3081                         phba->link_state = LPFC_HBA_ERROR;
3082                         phba->work_hs = HS_FFER3;
3083                         lpfc_handle_eratt(phba);
3084                         continue;
3085                 }
3086
3087                 if (pmbox->mbxStatus) {
3088                         phba->sli.slistat.mbox_stat_err++;
3089                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
3090                                 /* Mbox cmd cmpl error - RETRYing */
3091                                 lpfc_printf_log(phba, KERN_INFO,
3092                                         LOG_MBOX | LOG_SLI,
3093                                         "(%d):0305 Mbox cmd cmpl "
3094                                         "error - RETRYing Data: x%x "
3095                                         "(x%x/x%x) x%x x%x x%x\n",
3096                                         pmb->vport ? pmb->vport->vpi :
3097                                         LPFC_VPORT_UNKNOWN,
3098                                         pmbox->mbxCommand,
3099                                         lpfc_sli_config_mbox_subsys_get(phba,
3100                                                                         pmb),
3101                                         lpfc_sli_config_mbox_opcode_get(phba,
3102                                                                         pmb),
3103                                         pmbox->mbxStatus,
3104                                         pmbox->un.varWords[0],
3105                                         pmb->vport ? pmb->vport->port_state :
3106                                         LPFC_VPORT_UNKNOWN);
3107                                 pmbox->mbxStatus = 0;
3108                                 pmbox->mbxOwner = OWN_HOST;
3109                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3110                                 if (rc != MBX_NOT_FINISHED)
3111                                         continue;
3112                         }
3113                 }
3114
3115                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
3116                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
3117                                 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
3118                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
3119                                 "x%x x%x x%x\n",
3120                                 pmb->vport ? pmb->vport->vpi : 0,
3121                                 pmbox->mbxCommand,
3122                                 lpfc_sli_config_mbox_subsys_get(phba, pmb),
3123                                 lpfc_sli_config_mbox_opcode_get(phba, pmb),
3124                                 pmb->mbox_cmpl,
3125                                 *((uint32_t *) pmbox),
3126                                 pmbox->un.varWords[0],
3127                                 pmbox->un.varWords[1],
3128                                 pmbox->un.varWords[2],
3129                                 pmbox->un.varWords[3],
3130                                 pmbox->un.varWords[4],
3131                                 pmbox->un.varWords[5],
3132                                 pmbox->un.varWords[6],
3133                                 pmbox->un.varWords[7],
3134                                 pmbox->un.varWords[8],
3135                                 pmbox->un.varWords[9],
3136                                 pmbox->un.varWords[10]);
3137
3138                 if (pmb->mbox_cmpl)
3139                         pmb->mbox_cmpl(phba,pmb);
3140         } while (1);
3141         return 0;
3142 }
3143
3144 /**
3145  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
3146  * @phba: Pointer to HBA context object.
3147  * @pring: Pointer to driver SLI ring object.
3148  * @tag: buffer tag.
3149  *
3150  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
3151  * is set in the tag the buffer is posted for a particular exchange,
3152  * the function will return the buffer without replacing the buffer.
3153  * If the buffer is for unsolicited ELS or CT traffic, this function
3154  * returns the buffer and also posts another buffer to the firmware.
3155  **/
3156 static struct lpfc_dmabuf *
3157 lpfc_sli_get_buff(struct lpfc_hba *phba,
3158                   struct lpfc_sli_ring *pring,
3159                   uint32_t tag)
3160 {
3161         struct hbq_dmabuf *hbq_entry;
3162
3163         if (tag & QUE_BUFTAG_BIT)
3164                 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
3165         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
3166         if (!hbq_entry)
3167                 return NULL;
3168         return &hbq_entry->dbuf;
3169 }
3170
3171 /**
3172  * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
3173  *                              containing a NVME LS request.
3174  * @phba: pointer to lpfc hba data structure.
3175  * @piocb: pointer to the iocbq struct representing the sequence starting
3176  *        frame.
3177  *
3178  * This routine initially validates the NVME LS, validates there is a login
3179  * with the port that sent the LS, and then calls the appropriate nvme host
3180  * or target LS request handler.
3181  **/
3182 static void
3183 lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
3184 {
3185         struct lpfc_nodelist *ndlp;
3186         struct lpfc_dmabuf *d_buf;
3187         struct hbq_dmabuf *nvmebuf;
3188         struct fc_frame_header *fc_hdr;
3189         struct lpfc_async_xchg_ctx *axchg = NULL;
3190         char *failwhy = NULL;
3191         uint32_t oxid, sid, did, fctl, size;
3192         int ret = 1;
3193
3194         d_buf = piocb->context2;
3195
3196         nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
3197         fc_hdr = nvmebuf->hbuf.virt;
3198         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
3199         sid = sli4_sid_from_fc_hdr(fc_hdr);
3200         did = sli4_did_from_fc_hdr(fc_hdr);
3201         fctl = (fc_hdr->fh_f_ctl[0] << 16 |
3202                 fc_hdr->fh_f_ctl[1] << 8 |
3203                 fc_hdr->fh_f_ctl[2]);
3204         size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
3205
3206         lpfc_nvmeio_data(phba, "NVME LS    RCV: xri x%x sz %d from %06x\n",
3207                          oxid, size, sid);
3208
3209         if (phba->pport->load_flag & FC_UNLOADING) {
3210                 failwhy = "Driver Unloading";
3211         } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
3212                 failwhy = "NVME FC4 Disabled";
3213         } else if (!phba->nvmet_support && !phba->pport->localport) {
3214                 failwhy = "No Localport";
3215         } else if (phba->nvmet_support && !phba->targetport) {
3216                 failwhy = "No Targetport";
3217         } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
3218                 failwhy = "Bad NVME LS R_CTL";
3219         } else if (unlikely((fctl & 0x00FF0000) !=
3220                         (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
3221                 failwhy = "Bad NVME LS F_CTL";
3222         } else {
3223                 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
3224                 if (!axchg)
3225                         failwhy = "No CTX memory";
3226         }
3227
3228         if (unlikely(failwhy)) {
3229                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3230                                 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
3231                                 sid, oxid, failwhy);
3232                 goto out_fail;
3233         }
3234
3235         /* validate the source of the LS is logged in */
3236         ndlp = lpfc_findnode_did(phba->pport, sid);
3237         if (!ndlp ||
3238             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3239              (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3240                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
3241                                 "6216 NVME Unsol rcv: No ndlp: "
3242                                 "NPort_ID x%x oxid x%x\n",
3243                                 sid, oxid);
3244                 goto out_fail;
3245         }
3246
3247         axchg->phba = phba;
3248         axchg->ndlp = ndlp;
3249         axchg->size = size;
3250         axchg->oxid = oxid;
3251         axchg->sid = sid;
3252         axchg->wqeq = NULL;
3253         axchg->state = LPFC_NVME_STE_LS_RCV;
3254         axchg->entry_cnt = 1;
3255         axchg->rqb_buffer = (void *)nvmebuf;
3256         axchg->hdwq = &phba->sli4_hba.hdwq[0];
3257         axchg->payload = nvmebuf->dbuf.virt;
3258         INIT_LIST_HEAD(&axchg->list);
3259
3260         if (phba->nvmet_support) {
3261                 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3262                 spin_lock_irq(&ndlp->lock);
3263                 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3264                         ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3265                         spin_unlock_irq(&ndlp->lock);
3266
3267                         /* This reference is a single occurrence to hold the
3268                          * node valid until the nvmet transport calls
3269                          * host_release.
3270                          */
3271                         if (!lpfc_nlp_get(ndlp))
3272                                 goto out_fail;
3273
3274                         lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3275                                         "6206 NVMET unsol ls_req ndlp x%px "
3276                                         "DID x%x xflags x%x refcnt %d\n",
3277                                         ndlp, ndlp->nlp_DID,
3278                                         ndlp->fc4_xpt_flags,
3279                                         kref_read(&ndlp->kref));
3280                 } else {
3281                         spin_unlock_irq(&ndlp->lock);
3282                 }
3283         } else {
3284                 ret = lpfc_nvme_handle_lsreq(phba, axchg);
3285         }
3286
3287         /* if zero, LS was successfully handled. If non-zero, LS not handled */
3288         if (!ret)
3289                 return;
3290
3291 out_fail:
3292         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3293                         "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3294                         "NVMe%s handler failed %d\n",
3295                         did, sid, oxid,
3296                         (phba->nvmet_support) ? "T" : "I", ret);
3297
3298         /* recycle receive buffer */
3299         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3300
3301         /* If start of new exchange, abort it */
3302         if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3303                 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3304
3305         if (ret)
3306                 kfree(axchg);
3307 }
3308
3309 /**
3310  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3311  * @phba: Pointer to HBA context object.
3312  * @pring: Pointer to driver SLI ring object.
3313  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3314  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3315  * @fch_type: the type for the first frame of the sequence.
3316  *
3317  * This function is called with no lock held. This function uses the r_ctl and
3318  * type of the received sequence to find the correct callback function to call
3319  * to process the sequence.
3320  **/
3321 static int
3322 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3323                          struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3324                          uint32_t fch_type)
3325 {
3326         int i;
3327
3328         switch (fch_type) {
3329         case FC_TYPE_NVME:
3330                 lpfc_nvme_unsol_ls_handler(phba, saveq);
3331                 return 1;
3332         default:
3333                 break;
3334         }
3335
3336         /* unSolicited Responses */
3337         if (pring->prt[0].profile) {
3338                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3339                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3340                                                                         saveq);
3341                 return 1;
3342         }
3343         /* We must search, based on rctl / type
3344            for the right routine */
3345         for (i = 0; i < pring->num_mask; i++) {
3346                 if ((pring->prt[i].rctl == fch_r_ctl) &&
3347                     (pring->prt[i].type == fch_type)) {
3348                         if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3349                                 (pring->prt[i].lpfc_sli_rcv_unsol_event)
3350                                                 (phba, pring, saveq);
3351                         return 1;
3352                 }
3353         }
3354         return 0;
3355 }
3356
3357 static void
3358 lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba,
3359                         struct lpfc_iocbq *saveq)
3360 {
3361         IOCB_t *irsp;
3362         union lpfc_wqe128 *wqe;
3363         u16 i = 0;
3364
3365         irsp = &saveq->iocb;
3366         wqe = &saveq->wqe;
3367
3368         /* Fill wcqe with the IOCB status fields */
3369         bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus);
3370         saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount;
3371         saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4];
3372         saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len;
3373
3374         /* Source ID */
3375         bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo);
3376
3377         /* rx-id of the response frame */
3378         bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext);
3379
3380         /* ox-id of the frame */
3381         bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
3382                irsp->unsli3.rcvsli3.ox_id);
3383
3384         /* DID */
3385         bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
3386                irsp->un.rcvels.remoteID);
3387
3388         /* unsol data len */
3389         for (i = 0; i < irsp->ulpBdeCount; i++) {
3390                 struct lpfc_hbq_entry *hbqe = NULL;
3391
3392                 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3393                         if (i == 0) {
3394                                 hbqe = (struct lpfc_hbq_entry *)
3395                                         &irsp->un.ulpWord[0];
3396                                 saveq->wqe.gen_req.bde.tus.f.bdeSize =
3397                                         hbqe->bde.tus.f.bdeSize;
3398                         } else if (i == 1) {
3399                                 hbqe = (struct lpfc_hbq_entry *)
3400                                         &irsp->unsli3.sli3Words[4];
3401                                 saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize;
3402                         }
3403                 }
3404         }
3405 }
3406
3407 /**
3408  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
3409  * @phba: Pointer to HBA context object.
3410  * @pring: Pointer to driver SLI ring object.
3411  * @saveq: Pointer to the unsolicited iocb.
3412  *
3413  * This function is called with no lock held by the ring event handler
3414  * when there is an unsolicited iocb posted to the response ring by the
3415  * firmware. This function gets the buffer associated with the iocbs
3416  * and calls the event handler for the ring. This function handles both
3417  * qring buffers and hbq buffers.
3418  * When the function returns 1 the caller can free the iocb object otherwise
3419  * upper layer functions will free the iocb objects.
3420  **/
3421 static int
3422 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3423                             struct lpfc_iocbq *saveq)
3424 {
3425         IOCB_t           * irsp;
3426         WORD5            * w5p;
3427         dma_addr_t       paddr;
3428         uint32_t           Rctl, Type;
3429         struct lpfc_iocbq *iocbq;
3430         struct lpfc_dmabuf *dmzbuf;
3431
3432         irsp = &saveq->iocb;
3433         saveq->vport = phba->pport;
3434
3435         if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3436                 if (pring->lpfc_sli_rcv_async_status)
3437                         pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3438                 else
3439                         lpfc_printf_log(phba,
3440                                         KERN_WARNING,
3441                                         LOG_SLI,
3442                                         "0316 Ring %d handler: unexpected "
3443                                         "ASYNC_STATUS iocb received evt_code "
3444                                         "0x%x\n",
3445                                         pring->ringno,
3446                                         irsp->un.asyncstat.evt_code);
3447                 return 1;
3448         }
3449
3450         if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3451             (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3452                 if (irsp->ulpBdeCount > 0) {
3453                         dmzbuf = lpfc_sli_get_buff(phba, pring,
3454                                                    irsp->un.ulpWord[3]);
3455                         lpfc_in_buf_free(phba, dmzbuf);
3456                 }
3457
3458                 if (irsp->ulpBdeCount > 1) {
3459                         dmzbuf = lpfc_sli_get_buff(phba, pring,
3460                                                    irsp->unsli3.sli3Words[3]);
3461                         lpfc_in_buf_free(phba, dmzbuf);
3462                 }
3463
3464                 if (irsp->ulpBdeCount > 2) {
3465                         dmzbuf = lpfc_sli_get_buff(phba, pring,
3466                                                    irsp->unsli3.sli3Words[7]);
3467                         lpfc_in_buf_free(phba, dmzbuf);
3468                 }
3469
3470                 return 1;
3471         }
3472
3473         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3474                 if (irsp->ulpBdeCount != 0) {
3475                         saveq->context2 = lpfc_sli_get_buff(phba, pring,
3476                                                 irsp->un.ulpWord[3]);
3477                         if (!saveq->context2)
3478                                 lpfc_printf_log(phba,
3479                                         KERN_ERR,
3480                                         LOG_SLI,
3481                                         "0341 Ring %d Cannot find buffer for "
3482                                         "an unsolicited iocb. tag 0x%x\n",
3483                                         pring->ringno,
3484                                         irsp->un.ulpWord[3]);
3485                 }
3486                 if (irsp->ulpBdeCount == 2) {
3487                         saveq->context3 = lpfc_sli_get_buff(phba, pring,
3488                                                 irsp->unsli3.sli3Words[7]);
3489                         if (!saveq->context3)
3490                                 lpfc_printf_log(phba,
3491                                         KERN_ERR,
3492                                         LOG_SLI,
3493                                         "0342 Ring %d Cannot find buffer for an"
3494                                         " unsolicited iocb. tag 0x%x\n",
3495                                         pring->ringno,
3496                                         irsp->unsli3.sli3Words[7]);
3497                 }
3498                 list_for_each_entry(iocbq, &saveq->list, list) {
3499                         irsp = &iocbq->iocb;
3500                         if (irsp->ulpBdeCount != 0) {
3501                                 iocbq->context2 = lpfc_sli_get_buff(phba,
3502                                                         pring,
3503                                                         irsp->un.ulpWord[3]);
3504                                 if (!iocbq->context2)
3505                                         lpfc_printf_log(phba,
3506                                                 KERN_ERR,
3507                                                 LOG_SLI,
3508                                                 "0343 Ring %d Cannot find "
3509                                                 "buffer for an unsolicited iocb"
3510                                                 ". tag 0x%x\n", pring->ringno,
3511                                                 irsp->un.ulpWord[3]);
3512                         }
3513                         if (irsp->ulpBdeCount == 2) {
3514                                 iocbq->context3 = lpfc_sli_get_buff(phba,
3515                                                 pring,
3516                                                 irsp->unsli3.sli3Words[7]);
3517                                 if (!iocbq->context3)
3518                                         lpfc_printf_log(phba,
3519                                                 KERN_ERR,
3520                                                 LOG_SLI,
3521                                                 "0344 Ring %d Cannot find "
3522                                                 "buffer for an unsolicited "
3523                                                 "iocb. tag 0x%x\n",
3524                                                 pring->ringno,
3525                                                 irsp->unsli3.sli3Words[7]);
3526                         }
3527                 }
3528         } else {
3529                 paddr = getPaddr(irsp->un.cont64[0].addrHigh,
3530                                  irsp->un.cont64[0].addrLow);
3531                 saveq->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
3532                                                              paddr);
3533                 if (irsp->ulpBdeCount == 2) {
3534                         paddr = getPaddr(irsp->un.cont64[1].addrHigh,
3535                                          irsp->un.cont64[1].addrLow);
3536                         saveq->context3 = lpfc_sli_ringpostbuf_get(phba,
3537                                                                    pring,
3538                                                                    paddr);
3539                 }
3540         }
3541
3542         if (irsp->ulpBdeCount != 0 &&
3543             (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3544              irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3545                 int found = 0;
3546
3547                 /* search continue save q for same XRI */
3548                 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3549                         if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3550                                 saveq->iocb.unsli3.rcvsli3.ox_id) {
3551                                 list_add_tail(&saveq->list, &iocbq->list);
3552                                 found = 1;
3553                                 break;
3554                         }
3555                 }
3556                 if (!found)
3557                         list_add_tail(&saveq->clist,
3558                                       &pring->iocb_continue_saveq);
3559
3560                 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3561                         list_del_init(&iocbq->clist);
3562                         saveq = iocbq;
3563                         irsp = &saveq->iocb;
3564                 } else {
3565                         return 0;
3566                 }
3567         }
3568         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3569             (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3570             (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3571                 Rctl = FC_RCTL_ELS_REQ;
3572                 Type = FC_TYPE_ELS;
3573         } else {
3574                 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3575                 Rctl = w5p->hcsw.Rctl;
3576                 Type = w5p->hcsw.Type;
3577
3578                 /* Firmware Workaround */
3579                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3580                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3581                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3582                         Rctl = FC_RCTL_ELS_REQ;
3583                         Type = FC_TYPE_ELS;
3584                         w5p->hcsw.Rctl = Rctl;
3585                         w5p->hcsw.Type = Type;
3586                 }
3587         }
3588
3589         if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3590             (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
3591             irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3592                 if (irsp->unsli3.rcvsli3.vpi == 0xffff)
3593                         saveq->vport = phba->pport;
3594                 else
3595                         saveq->vport = lpfc_find_vport_by_vpid(phba,
3596                                                irsp->unsli3.rcvsli3.vpi);
3597         }
3598
3599         /* Prepare WQE with Unsol frame */
3600         lpfc_sli_prep_unsol_wqe(phba, saveq);
3601
3602         if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3603                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3604                                 "0313 Ring %d handler: unexpected Rctl x%x "
3605                                 "Type x%x received\n",
3606                                 pring->ringno, Rctl, Type);
3607
3608         return 1;
3609 }
3610
3611 /**
3612  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3613  * @phba: Pointer to HBA context object.
3614  * @pring: Pointer to driver SLI ring object.
3615  * @prspiocb: Pointer to response iocb object.
3616  *
3617  * This function looks up the iocb_lookup table to get the command iocb
3618  * corresponding to the given response iocb using the iotag of the
3619  * response iocb. The driver calls this function with the hbalock held
3620  * for SLI3 ports or the ring lock held for SLI4 ports.
3621  * This function returns the command iocb object if it finds the command
3622  * iocb else returns NULL.
3623  **/
3624 static struct lpfc_iocbq *
3625 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3626                       struct lpfc_sli_ring *pring,
3627                       struct lpfc_iocbq *prspiocb)
3628 {
3629         struct lpfc_iocbq *cmd_iocb = NULL;
3630         u16 iotag;
3631
3632         if (phba->sli_rev == LPFC_SLI_REV4)
3633                 iotag = get_wqe_reqtag(prspiocb);
3634         else
3635                 iotag = prspiocb->iocb.ulpIoTag;
3636
3637         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3638                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3639                 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3640                         /* remove from txcmpl queue list */
3641                         list_del_init(&cmd_iocb->list);
3642                         cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3643                         pring->txcmplq_cnt--;
3644                         return cmd_iocb;
3645                 }
3646         }
3647
3648         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3649                         "0317 iotag x%x is out of "
3650                         "range: max iotag x%x\n",
3651                         iotag, phba->sli.last_iotag);
3652         return NULL;
3653 }
3654
3655 /**
3656  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3657  * @phba: Pointer to HBA context object.
3658  * @pring: Pointer to driver SLI ring object.
3659  * @iotag: IOCB tag.
3660  *
3661  * This function looks up the iocb_lookup table to get the command iocb
3662  * corresponding to the given iotag. The driver calls this function with
3663  * the ring lock held because this function is an SLI4 port only helper.
3664  * This function returns the command iocb object if it finds the command
3665  * iocb else returns NULL.
3666  **/
3667 static struct lpfc_iocbq *
3668 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3669                              struct lpfc_sli_ring *pring, uint16_t iotag)
3670 {
3671         struct lpfc_iocbq *cmd_iocb = NULL;
3672
3673         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3674                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3675                 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3676                         /* remove from txcmpl queue list */
3677                         list_del_init(&cmd_iocb->list);
3678                         cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3679                         pring->txcmplq_cnt--;
3680                         return cmd_iocb;
3681                 }
3682         }
3683
3684         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3685                         "0372 iotag x%x lookup error: max iotag (x%x) "
3686                         "cmd_flag x%x\n",
3687                         iotag, phba->sli.last_iotag,
3688                         cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
3689         return NULL;
3690 }
3691
3692 /**
3693  * lpfc_sli_process_sol_iocb - process solicited iocb completion
3694  * @phba: Pointer to HBA context object.
3695  * @pring: Pointer to driver SLI ring object.
3696  * @saveq: Pointer to the response iocb to be processed.
3697  *
3698  * This function is called by the ring event handler for non-fcp
3699  * rings when there is a new response iocb in the response ring.
3700  * The caller is not required to hold any locks. This function
3701  * gets the command iocb associated with the response iocb and
3702  * calls the completion handler for the command iocb. If there
3703  * is no completion handler, the function will free the resources
3704  * associated with command iocb. If the response iocb is for
3705  * an already aborted command iocb, the status of the completion
3706  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3707  * This function always returns 1.
3708  **/
3709 static int
3710 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3711                           struct lpfc_iocbq *saveq)
3712 {
3713         struct lpfc_iocbq *cmdiocbp;
3714         int rc = 1;
3715         unsigned long iflag;
3716         u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
3717
3718         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3719
3720         ulp_command = get_job_cmnd(phba, saveq);
3721         ulp_status = get_job_ulpstatus(phba, saveq);
3722         ulp_word4 = get_job_word4(phba, saveq);
3723         ulp_context = get_job_ulpcontext(phba, saveq);
3724         if (phba->sli_rev == LPFC_SLI_REV4)
3725                 iotag = get_wqe_reqtag(saveq);
3726         else
3727                 iotag = saveq->iocb.ulpIoTag;
3728
3729         if (cmdiocbp) {
3730                 ulp_command = get_job_cmnd(phba, cmdiocbp);
3731                 if (cmdiocbp->cmd_cmpl) {
3732                         /*
3733                          * If an ELS command failed send an event to mgmt
3734                          * application.
3735                          */
3736                         if (ulp_status &&
3737                              (pring->ringno == LPFC_ELS_RING) &&
3738                              (ulp_command == CMD_ELS_REQUEST64_CR))
3739                                 lpfc_send_els_failure_event(phba,
3740                                         cmdiocbp, saveq);
3741
3742                         /*
3743                          * Post all ELS completions to the worker thread.
3744                          * All other are passed to the completion callback.
3745                          */
3746                         if (pring->ringno == LPFC_ELS_RING) {
3747                                 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3748                                     (cmdiocbp->cmd_flag &
3749                                                         LPFC_DRIVER_ABORTED)) {
3750                                         spin_lock_irqsave(&phba->hbalock,
3751                                                           iflag);
3752                                         cmdiocbp->cmd_flag &=
3753                                                 ~LPFC_DRIVER_ABORTED;
3754                                         spin_unlock_irqrestore(&phba->hbalock,
3755                                                                iflag);
3756                                         saveq->iocb.ulpStatus =
3757                                                 IOSTAT_LOCAL_REJECT;
3758                                         saveq->iocb.un.ulpWord[4] =
3759                                                 IOERR_SLI_ABORTED;
3760
3761                                         /* Firmware could still be in progress
3762                                          * of DMAing payload, so don't free data
3763                                          * buffer till after a hbeat.
3764                                          */
3765                                         spin_lock_irqsave(&phba->hbalock,
3766                                                           iflag);
3767                                         saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
3768                                         spin_unlock_irqrestore(&phba->hbalock,
3769                                                                iflag);
3770                                 }
3771                                 if (phba->sli_rev == LPFC_SLI_REV4) {
3772                                         if (saveq->cmd_flag &
3773                                             LPFC_EXCHANGE_BUSY) {
3774                                                 /* Set cmdiocb flag for the
3775                                                  * exchange busy so sgl (xri)
3776                                                  * will not be released until
3777                                                  * the abort xri is received
3778                                                  * from hba.
3779                                                  */
3780                                                 spin_lock_irqsave(
3781                                                         &phba->hbalock, iflag);
3782                                                 cmdiocbp->cmd_flag |=
3783                                                         LPFC_EXCHANGE_BUSY;
3784                                                 spin_unlock_irqrestore(
3785                                                         &phba->hbalock, iflag);
3786                                         }
3787                                         if (cmdiocbp->cmd_flag &
3788                                             LPFC_DRIVER_ABORTED) {
3789                                                 /*
3790                                                  * Clear LPFC_DRIVER_ABORTED
3791                                                  * bit in case it was driver
3792                                                  * initiated abort.
3793                                                  */
3794                                                 spin_lock_irqsave(
3795                                                         &phba->hbalock, iflag);
3796                                                 cmdiocbp->cmd_flag &=
3797                                                         ~LPFC_DRIVER_ABORTED;
3798                                                 spin_unlock_irqrestore(
3799                                                         &phba->hbalock, iflag);
3800                                                 set_job_ulpstatus(cmdiocbp,
3801                                                                   IOSTAT_LOCAL_REJECT);
3802                                                 set_job_ulpword4(cmdiocbp,
3803                                                                  IOERR_ABORT_REQUESTED);
3804                                                 /*
3805                                                  * For SLI4, irsiocb contains
3806                                                  * NO_XRI in sli_xritag, it
3807                                                  * shall not affect releasing
3808                                                  * sgl (xri) process.
3809                                                  */
3810                                                 set_job_ulpstatus(saveq,
3811                                                                   IOSTAT_LOCAL_REJECT);
3812                                                 set_job_ulpword4(saveq,
3813                                                                  IOERR_SLI_ABORTED);
3814                                                 spin_lock_irqsave(
3815                                                         &phba->hbalock, iflag);
3816                                                 saveq->cmd_flag |=
3817                                                         LPFC_DELAY_MEM_FREE;
3818                                                 spin_unlock_irqrestore(
3819                                                         &phba->hbalock, iflag);
3820                                         }
3821                                 }
3822                         }
3823                         (cmdiocbp->cmd_cmpl) (phba, cmdiocbp, saveq);
3824                 } else
3825                         lpfc_sli_release_iocbq(phba, cmdiocbp);
3826         } else {
3827                 /*
3828                  * Unknown initiating command based on the response iotag.
3829                  * This could be the case on the ELS ring because of
3830                  * lpfc_els_abort().
3831                  */
3832                 if (pring->ringno != LPFC_ELS_RING) {
3833                         /*
3834                          * Ring <ringno> handler: unexpected completion IoTag
3835                          * <IoTag>
3836                          */
3837                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3838                                          "0322 Ring %d handler: "
3839                                          "unexpected completion IoTag x%x "
3840                                          "Data: x%x x%x x%x x%x\n",
3841                                          pring->ringno, iotag, ulp_status,
3842                                          ulp_word4, ulp_command, ulp_context);
3843                 }
3844         }
3845
3846         return rc;
3847 }
3848
3849 /**
3850  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3851  * @phba: Pointer to HBA context object.
3852  * @pring: Pointer to driver SLI ring object.
3853  *
3854  * This function is called from the iocb ring event handlers when
3855  * put pointer is ahead of the get pointer for a ring. This function signal
3856  * an error attention condition to the worker thread and the worker
3857  * thread will transition the HBA to offline state.
3858  **/
3859 static void
3860 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3861 {
3862         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3863         /*
3864          * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3865          * rsp ring <portRspMax>
3866          */
3867         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3868                         "0312 Ring %d handler: portRspPut %d "
3869                         "is bigger than rsp ring %d\n",
3870                         pring->ringno, le32_to_cpu(pgp->rspPutInx),
3871                         pring->sli.sli3.numRiocb);
3872
3873         phba->link_state = LPFC_HBA_ERROR;
3874
3875         /*
3876          * All error attention handlers are posted to
3877          * worker thread
3878          */
3879         phba->work_ha |= HA_ERATT;
3880         phba->work_hs = HS_FFER3;
3881
3882         lpfc_worker_wake_up(phba);
3883
3884         return;
3885 }
3886
3887 /**
3888  * lpfc_poll_eratt - Error attention polling timer timeout handler
3889  * @t: Context to fetch pointer to address of HBA context object from.
3890  *
3891  * This function is invoked by the Error Attention polling timer when the
3892  * timer times out. It will check the SLI Error Attention register for
3893  * possible attention events. If so, it will post an Error Attention event
3894  * and wake up worker thread to process it. Otherwise, it will set up the
3895  * Error Attention polling timer for the next poll.
3896  **/
3897 void lpfc_poll_eratt(struct timer_list *t)
3898 {
3899         struct lpfc_hba *phba;
3900         uint32_t eratt = 0;
3901         uint64_t sli_intr, cnt;
3902
3903         phba = from_timer(phba, t, eratt_poll);
3904
3905         /* Here we will also keep track of interrupts per sec of the hba */
3906         sli_intr = phba->sli.slistat.sli_intr;
3907
3908         if (phba->sli.slistat.sli_prev_intr > sli_intr)
3909                 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3910                         sli_intr);
3911         else
3912                 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3913
3914         /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3915         do_div(cnt, phba->eratt_poll_interval);
3916         phba->sli.slistat.sli_ips = cnt;
3917
3918         phba->sli.slistat.sli_prev_intr = sli_intr;
3919
3920         /* Check chip HA register for error event */
3921         eratt = lpfc_sli_check_eratt(phba);
3922
3923         if (eratt)
3924                 /* Tell the worker thread there is work to do */
3925                 lpfc_worker_wake_up(phba);
3926         else
3927                 /* Restart the timer for next eratt poll */
3928                 mod_timer(&phba->eratt_poll,
3929                           jiffies +
3930                           msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3931         return;
3932 }
3933
3934
3935 /**
3936  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3937  * @phba: Pointer to HBA context object.
3938  * @pring: Pointer to driver SLI ring object.
3939  * @mask: Host attention register mask for this ring.
3940  *
3941  * This function is called from the interrupt context when there is a ring
3942  * event for the fcp ring. The caller does not hold any lock.
3943  * The function processes each response iocb in the response ring until it
3944  * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3945  * LE bit set. The function will call the completion handler of the command iocb
3946  * if the response iocb indicates a completion for a command iocb or it is
3947  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3948  * function if this is an unsolicited iocb.
3949  * This routine presumes LPFC_FCP_RING handling and doesn't bother
3950  * to check it explicitly.
3951  */
3952 int
3953 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3954                                 struct lpfc_sli_ring *pring, uint32_t mask)
3955 {
3956         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3957         IOCB_t *irsp = NULL;
3958         IOCB_t *entry = NULL;
3959         struct lpfc_iocbq *cmdiocbq = NULL;
3960         struct lpfc_iocbq rspiocbq;
3961         uint32_t status;
3962         uint32_t portRspPut, portRspMax;
3963         int rc = 1;
3964         lpfc_iocb_type type;
3965         unsigned long iflag;
3966         uint32_t rsp_cmpl = 0;
3967
3968         spin_lock_irqsave(&phba->hbalock, iflag);
3969         pring->stats.iocb_event++;
3970
3971         /*
3972          * The next available response entry should never exceed the maximum
3973          * entries.  If it does, treat it as an adapter hardware error.
3974          */
3975         portRspMax = pring->sli.sli3.numRiocb;
3976         portRspPut = le32_to_cpu(pgp->rspPutInx);
3977         if (unlikely(portRspPut >= portRspMax)) {
3978                 lpfc_sli_rsp_pointers_error(phba, pring);
3979                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3980                 return 1;
3981         }
3982         if (phba->fcp_ring_in_use) {
3983                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3984                 return 1;
3985         } else
3986                 phba->fcp_ring_in_use = 1;
3987
3988         rmb();
3989         while (pring->sli.sli3.rspidx != portRspPut) {
3990                 /*
3991                  * Fetch an entry off the ring and copy it into a local data
3992                  * structure.  The copy involves a byte-swap since the
3993                  * network byte order and pci byte orders are different.
3994                  */
3995                 entry = lpfc_resp_iocb(phba, pring);
3996                 phba->last_completion_time = jiffies;
3997
3998                 if (++pring->sli.sli3.rspidx >= portRspMax)
3999                         pring->sli.sli3.rspidx = 0;
4000
4001                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
4002                                       (uint32_t *) &rspiocbq.iocb,
4003                                       phba->iocb_rsp_size);
4004                 INIT_LIST_HEAD(&(rspiocbq.list));
4005                 irsp = &rspiocbq.iocb;
4006
4007                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
4008                 pring->stats.iocb_rsp++;
4009                 rsp_cmpl++;
4010
4011                 if (unlikely(irsp->ulpStatus)) {
4012                         /*
4013                          * If resource errors reported from HBA, reduce
4014                          * queuedepths of the SCSI device.
4015                          */
4016                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
4017                             ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
4018                              IOERR_NO_RESOURCES)) {
4019                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4020                                 phba->lpfc_rampdown_queue_depth(phba);
4021                                 spin_lock_irqsave(&phba->hbalock, iflag);
4022                         }
4023
4024                         /* Rsp ring <ringno> error: IOCB */
4025                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4026                                         "0336 Rsp Ring %d error: IOCB Data: "
4027                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
4028                                         pring->ringno,
4029                                         irsp->un.ulpWord[0],
4030                                         irsp->un.ulpWord[1],
4031                                         irsp->un.ulpWord[2],
4032                                         irsp->un.ulpWord[3],
4033                                         irsp->un.ulpWord[4],
4034                                         irsp->un.ulpWord[5],
4035                                         *(uint32_t *)&irsp->un1,
4036                                         *((uint32_t *)&irsp->un1 + 1));
4037                 }
4038
4039                 switch (type) {
4040                 case LPFC_ABORT_IOCB:
4041                 case LPFC_SOL_IOCB:
4042                         /*
4043                          * Idle exchange closed via ABTS from port.  No iocb
4044                          * resources need to be recovered.
4045                          */
4046                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
4047                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4048                                                 "0333 IOCB cmd 0x%x"
4049                                                 " processed. Skipping"
4050                                                 " completion\n",
4051                                                 irsp->ulpCommand);
4052                                 break;
4053                         }
4054
4055                         spin_unlock_irqrestore(&phba->hbalock, iflag);
4056                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
4057                                                          &rspiocbq);
4058                         spin_lock_irqsave(&phba->hbalock, iflag);
4059                         if (unlikely(!cmdiocbq))
4060                                 break;
4061                         if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
4062                                 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
4063                         if (cmdiocbq->cmd_cmpl) {
4064                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4065                                 (cmdiocbq->cmd_cmpl)(phba, cmdiocbq,
4066                                                       &rspiocbq);
4067                                 spin_lock_irqsave(&phba->hbalock, iflag);
4068                         }
4069                         break;
4070                 case LPFC_UNSOL_IOCB:
4071                         spin_unlock_irqrestore(&phba->hbalock, iflag);
4072                         lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
4073                         spin_lock_irqsave(&phba->hbalock, iflag);
4074                         break;
4075                 default:
4076                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
4077                                 char adaptermsg[LPFC_MAX_ADPTMSG];
4078                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4079                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
4080                                        MAX_MSG_DATA);
4081                                 dev_warn(&((phba->pcidev)->dev),
4082                                          "lpfc%d: %s\n",
4083                                          phba->brd_no, adaptermsg);
4084                         } else {
4085                                 /* Unknown IOCB command */
4086                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4087                                                 "0334 Unknown IOCB command "
4088                                                 "Data: x%x, x%x x%x x%x x%x\n",
4089                                                 type, irsp->ulpCommand,
4090                                                 irsp->ulpStatus,
4091                                                 irsp->ulpIoTag,
4092                                                 irsp->ulpContext);
4093                         }
4094                         break;
4095                 }
4096
4097                 /*
4098                  * The response IOCB has been processed.  Update the ring
4099                  * pointer in SLIM.  If the port response put pointer has not
4100                  * been updated, sync the pgp->rspPutInx and fetch the new port
4101                  * response put pointer.
4102                  */
4103                 writel(pring->sli.sli3.rspidx,
4104                         &phba->host_gp[pring->ringno].rspGetInx);
4105
4106                 if (pring->sli.sli3.rspidx == portRspPut)
4107                         portRspPut = le32_to_cpu(pgp->rspPutInx);
4108         }
4109
4110         if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
4111                 pring->stats.iocb_rsp_full++;
4112                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4113                 writel(status, phba->CAregaddr);
4114                 readl(phba->CAregaddr);
4115         }
4116         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4117                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4118                 pring->stats.iocb_cmd_empty++;
4119
4120                 /* Force update of the local copy of cmdGetInx */
4121                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4122                 lpfc_sli_resume_iocb(phba, pring);
4123
4124                 if ((pring->lpfc_sli_cmd_available))
4125                         (pring->lpfc_sli_cmd_available) (phba, pring);
4126
4127         }
4128
4129         phba->fcp_ring_in_use = 0;
4130         spin_unlock_irqrestore(&phba->hbalock, iflag);
4131         return rc;
4132 }
4133
4134 /**
4135  * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
4136  * @phba: Pointer to HBA context object.
4137  * @pring: Pointer to driver SLI ring object.
4138  * @rspiocbp: Pointer to driver response IOCB object.
4139  *
4140  * This function is called from the worker thread when there is a slow-path
4141  * response IOCB to process. This function chains all the response iocbs until
4142  * seeing the iocb with the LE bit set. The function will call
4143  * lpfc_sli_process_sol_iocb function if the response iocb indicates a
4144  * completion of a command iocb. The function will call the
4145  * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
4146  * The function frees the resources or calls the completion handler if this
4147  * iocb is an abort completion. The function returns NULL when the response
4148  * iocb has the LE bit set and all the chained iocbs are processed, otherwise
4149  * this function shall chain the iocb on to the iocb_continueq and return the
4150  * response iocb passed in.
4151  **/
4152 static struct lpfc_iocbq *
4153 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4154                         struct lpfc_iocbq *rspiocbp)
4155 {
4156         struct lpfc_iocbq *saveq;
4157         struct lpfc_iocbq *cmdiocb;
4158         struct lpfc_iocbq *next_iocb;
4159         IOCB_t *irsp;
4160         uint32_t free_saveq;
4161         u8 cmd_type;
4162         lpfc_iocb_type type;
4163         unsigned long iflag;
4164         u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
4165         u32 ulp_word4 = get_job_word4(phba, rspiocbp);
4166         u32 ulp_command = get_job_cmnd(phba, rspiocbp);
4167         int rc;
4168
4169         spin_lock_irqsave(&phba->hbalock, iflag);
4170         /* First add the response iocb to the countinueq list */
4171         list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
4172         pring->iocb_continueq_cnt++;
4173
4174         /*
4175          * By default, the driver expects to free all resources
4176          * associated with this iocb completion.
4177          */
4178         free_saveq = 1;
4179         saveq = list_get_first(&pring->iocb_continueq,
4180                                struct lpfc_iocbq, list);
4181         list_del_init(&pring->iocb_continueq);
4182         pring->iocb_continueq_cnt = 0;
4183
4184         pring->stats.iocb_rsp++;
4185
4186         /*
4187          * If resource errors reported from HBA, reduce
4188          * queuedepths of the SCSI device.
4189          */
4190         if (ulp_status == IOSTAT_LOCAL_REJECT &&
4191             ((ulp_word4 & IOERR_PARAM_MASK) ==
4192              IOERR_NO_RESOURCES)) {
4193                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4194                 phba->lpfc_rampdown_queue_depth(phba);
4195                 spin_lock_irqsave(&phba->hbalock, iflag);
4196         }
4197
4198         if (ulp_status) {
4199                 /* Rsp ring <ringno> error: IOCB */
4200                 if (phba->sli_rev < LPFC_SLI_REV4) {
4201                         irsp = &rspiocbp->iocb;
4202                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4203                                         "0328 Rsp Ring %d error: ulp_status x%x "
4204                                         "IOCB Data: "
4205                                         "x%08x x%08x x%08x x%08x "
4206                                         "x%08x x%08x x%08x x%08x "
4207                                         "x%08x x%08x x%08x x%08x "
4208                                         "x%08x x%08x x%08x x%08x\n",
4209                                         pring->ringno, ulp_status,
4210                                         get_job_ulpword(rspiocbp, 0),
4211                                         get_job_ulpword(rspiocbp, 1),
4212                                         get_job_ulpword(rspiocbp, 2),
4213                                         get_job_ulpword(rspiocbp, 3),
4214                                         get_job_ulpword(rspiocbp, 4),
4215                                         get_job_ulpword(rspiocbp, 5),
4216                                         *(((uint32_t *)irsp) + 6),
4217                                         *(((uint32_t *)irsp) + 7),
4218                                         *(((uint32_t *)irsp) + 8),
4219                                         *(((uint32_t *)irsp) + 9),
4220                                         *(((uint32_t *)irsp) + 10),
4221                                         *(((uint32_t *)irsp) + 11),
4222                                         *(((uint32_t *)irsp) + 12),
4223                                         *(((uint32_t *)irsp) + 13),
4224                                         *(((uint32_t *)irsp) + 14),
4225                                         *(((uint32_t *)irsp) + 15));
4226                 } else {
4227                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4228                                         "0321 Rsp Ring %d error: "
4229                                         "IOCB Data: "
4230                                         "x%x x%x x%x x%x\n",
4231                                         pring->ringno,
4232                                         rspiocbp->wcqe_cmpl.word0,
4233                                         rspiocbp->wcqe_cmpl.total_data_placed,
4234                                         rspiocbp->wcqe_cmpl.parameter,
4235                                         rspiocbp->wcqe_cmpl.word3);
4236                 }
4237         }
4238
4239
4240         /*
4241          * Fetch the iocb command type and call the correct completion
4242          * routine. Solicited and Unsolicited IOCBs on the ELS ring
4243          * get freed back to the lpfc_iocb_list by the discovery
4244          * kernel thread.
4245          */
4246         cmd_type = ulp_command & CMD_IOCB_MASK;
4247         type = lpfc_sli_iocb_cmd_type(cmd_type);
4248         switch (type) {
4249         case LPFC_SOL_IOCB:
4250                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4251                 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
4252                 spin_lock_irqsave(&phba->hbalock, iflag);
4253                 break;
4254         case LPFC_UNSOL_IOCB:
4255                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4256                 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
4257                 spin_lock_irqsave(&phba->hbalock, iflag);
4258                 if (!rc)
4259                         free_saveq = 0;
4260                 break;
4261         case LPFC_ABORT_IOCB:
4262                 cmdiocb = NULL;
4263                 if (ulp_command != CMD_XRI_ABORTED_CX)
4264                         cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
4265                                                         saveq);
4266                 if (cmdiocb) {
4267                         /* Call the specified completion routine */
4268                         if (cmdiocb->cmd_cmpl) {
4269                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4270                                 cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
4271                                 spin_lock_irqsave(&phba->hbalock, iflag);
4272                         } else {
4273                                 __lpfc_sli_release_iocbq(phba, cmdiocb);
4274                         }
4275                 }
4276                 break;
4277         case LPFC_UNKNOWN_IOCB:
4278                 if (ulp_command == CMD_ADAPTER_MSG) {
4279                         char adaptermsg[LPFC_MAX_ADPTMSG];
4280
4281                         memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4282                         memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
4283                                MAX_MSG_DATA);
4284                         dev_warn(&((phba->pcidev)->dev),
4285                                  "lpfc%d: %s\n",
4286                                  phba->brd_no, adaptermsg);
4287                 } else {
4288                         /* Unknown command */
4289                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4290                                         "0335 Unknown IOCB "
4291                                         "command Data: x%x "
4292                                         "x%x x%x x%x\n",
4293                                         ulp_command,
4294                                         ulp_status,
4295                                         get_wqe_reqtag(rspiocbp),
4296                                         get_job_ulpcontext(phba, rspiocbp));
4297                 }
4298                 break;
4299         }
4300
4301         if (free_saveq) {
4302                 list_for_each_entry_safe(rspiocbp, next_iocb,
4303                                          &saveq->list, list) {
4304                         list_del_init(&rspiocbp->list);
4305                         __lpfc_sli_release_iocbq(phba, rspiocbp);
4306                 }
4307                 __lpfc_sli_release_iocbq(phba, saveq);
4308         }
4309         rspiocbp = NULL;
4310         spin_unlock_irqrestore(&phba->hbalock, iflag);
4311         return rspiocbp;
4312 }
4313
4314 /**
4315  * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
4316  * @phba: Pointer to HBA context object.
4317  * @pring: Pointer to driver SLI ring object.
4318  * @mask: Host attention register mask for this ring.
4319  *
4320  * This routine wraps the actual slow_ring event process routine from the
4321  * API jump table function pointer from the lpfc_hba struct.
4322  **/
4323 void
4324 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4325                                 struct lpfc_sli_ring *pring, uint32_t mask)
4326 {
4327         phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4328 }
4329
4330 /**
4331  * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
4332  * @phba: Pointer to HBA context object.
4333  * @pring: Pointer to driver SLI ring object.
4334  * @mask: Host attention register mask for this ring.
4335  *
4336  * This function is called from the worker thread when there is a ring event
4337  * for non-fcp rings. The caller does not hold any lock. The function will
4338  * remove each response iocb in the response ring and calls the handle
4339  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4340  **/
4341 static void
4342 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4343                                    struct lpfc_sli_ring *pring, uint32_t mask)
4344 {
4345         struct lpfc_pgp *pgp;
4346         IOCB_t *entry;
4347         IOCB_t *irsp = NULL;
4348         struct lpfc_iocbq *rspiocbp = NULL;
4349         uint32_t portRspPut, portRspMax;
4350         unsigned long iflag;
4351         uint32_t status;
4352
4353         pgp = &phba->port_gp[pring->ringno];
4354         spin_lock_irqsave(&phba->hbalock, iflag);
4355         pring->stats.iocb_event++;
4356
4357         /*
4358          * The next available response entry should never exceed the maximum
4359          * entries.  If it does, treat it as an adapter hardware error.
4360          */
4361         portRspMax = pring->sli.sli3.numRiocb;
4362         portRspPut = le32_to_cpu(pgp->rspPutInx);
4363         if (portRspPut >= portRspMax) {
4364                 /*
4365                  * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
4366                  * rsp ring <portRspMax>
4367                  */
4368                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4369                                 "0303 Ring %d handler: portRspPut %d "
4370                                 "is bigger than rsp ring %d\n",
4371                                 pring->ringno, portRspPut, portRspMax);
4372
4373                 phba->link_state = LPFC_HBA_ERROR;
4374                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4375
4376                 phba->work_hs = HS_FFER3;
4377                 lpfc_handle_eratt(phba);
4378
4379                 return;
4380         }
4381
4382         rmb();
4383         while (pring->sli.sli3.rspidx != portRspPut) {
4384                 /*
4385                  * Build a completion list and call the appropriate handler.
4386                  * The process is to get the next available response iocb, get
4387                  * a free iocb from the list, copy the response data into the
4388                  * free iocb, insert to the continuation list, and update the
4389                  * next response index to slim.  This process makes response
4390                  * iocb's in the ring available to DMA as fast as possible but
4391                  * pays a penalty for a copy operation.  Since the iocb is
4392                  * only 32 bytes, this penalty is considered small relative to
4393                  * the PCI reads for register values and a slim write.  When
4394                  * the ulpLe field is set, the entire Command has been
4395                  * received.
4396                  */
4397                 entry = lpfc_resp_iocb(phba, pring);
4398
4399                 phba->last_completion_time = jiffies;
4400                 rspiocbp = __lpfc_sli_get_iocbq(phba);
4401                 if (rspiocbp == NULL) {
4402                         printk(KERN_ERR "%s: out of buffers! Failing "
4403                                "completion.\n", __func__);
4404                         break;
4405                 }
4406
4407                 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4408                                       phba->iocb_rsp_size);
4409                 irsp = &rspiocbp->iocb;
4410
4411                 if (++pring->sli.sli3.rspidx >= portRspMax)
4412                         pring->sli.sli3.rspidx = 0;
4413
4414                 if (pring->ringno == LPFC_ELS_RING) {
4415                         lpfc_debugfs_slow_ring_trc(phba,
4416                         "IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
4417                                 *(((uint32_t *) irsp) + 4),
4418                                 *(((uint32_t *) irsp) + 6),
4419                                 *(((uint32_t *) irsp) + 7));
4420                 }
4421
4422                 writel(pring->sli.sli3.rspidx,
4423                         &phba->host_gp[pring->ringno].rspGetInx);
4424
4425                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4426                 /* Handle the response IOCB */
4427                 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4428                 spin_lock_irqsave(&phba->hbalock, iflag);
4429
4430                 /*
4431                  * If the port response put pointer has not been updated, sync
4432                  * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4433                  * response put pointer.
4434                  */
4435                 if (pring->sli.sli3.rspidx == portRspPut) {
4436                         portRspPut = le32_to_cpu(pgp->rspPutInx);
4437                 }
4438         } /* while (pring->sli.sli3.rspidx != portRspPut) */
4439
4440         if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4441                 /* At least one response entry has been freed */
4442                 pring->stats.iocb_rsp_full++;
4443                 /* SET RxRE_RSP in Chip Att register */
4444                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4445                 writel(status, phba->CAregaddr);
4446                 readl(phba->CAregaddr); /* flush */
4447         }
4448         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4449                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4450                 pring->stats.iocb_cmd_empty++;
4451
4452                 /* Force update of the local copy of cmdGetInx */
4453                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4454                 lpfc_sli_resume_iocb(phba, pring);
4455
4456                 if ((pring->lpfc_sli_cmd_available))
4457                         (pring->lpfc_sli_cmd_available) (phba, pring);
4458
4459         }
4460
4461         spin_unlock_irqrestore(&phba->hbalock, iflag);
4462         return;
4463 }
4464
4465 /**
4466  * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4467  * @phba: Pointer to HBA context object.
4468  * @pring: Pointer to driver SLI ring object.
4469  * @mask: Host attention register mask for this ring.
4470  *
4471  * This function is called from the worker thread when there is a pending
4472  * ELS response iocb on the driver internal slow-path response iocb worker
4473  * queue. The caller does not hold any lock. The function will remove each
4474  * response iocb from the response worker queue and calls the handle
4475  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4476  **/
4477 static void
4478 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4479                                    struct lpfc_sli_ring *pring, uint32_t mask)
4480 {
4481         struct lpfc_iocbq *irspiocbq;
4482         struct hbq_dmabuf *dmabuf;
4483         struct lpfc_cq_event *cq_event;
4484         unsigned long iflag;
4485         int count = 0;
4486
4487         spin_lock_irqsave(&phba->hbalock, iflag);
4488         phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4489         spin_unlock_irqrestore(&phba->hbalock, iflag);
4490         while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4491                 /* Get the response iocb from the head of work queue */
4492                 spin_lock_irqsave(&phba->hbalock, iflag);
4493                 list_remove_head(&phba->sli4_hba.sp_queue_event,
4494                                  cq_event, struct lpfc_cq_event, list);
4495                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4496
4497                 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4498                 case CQE_CODE_COMPL_WQE:
4499                         irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4500                                                  cq_event);
4501                         /* Translate ELS WCQE to response IOCBQ */
4502                         irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
4503                                                                       irspiocbq);
4504                         if (irspiocbq)
4505                                 lpfc_sli_sp_handle_rspiocb(phba, pring,
4506                                                            irspiocbq);
4507                         count++;
4508                         break;
4509                 case CQE_CODE_RECEIVE:
4510                 case CQE_CODE_RECEIVE_V1:
4511                         dmabuf = container_of(cq_event, struct hbq_dmabuf,
4512                                               cq_event);
4513                         lpfc_sli4_handle_received_buffer(phba, dmabuf);
4514                         count++;
4515                         break;
4516                 default:
4517                         break;
4518                 }
4519
4520                 /* Limit the number of events to 64 to avoid soft lockups */
4521                 if (count == 64)
4522                         break;
4523         }
4524 }
4525
4526 /**
4527  * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4528  * @phba: Pointer to HBA context object.
4529  * @pring: Pointer to driver SLI ring object.
4530  *
4531  * This function aborts all iocbs in the given ring and frees all the iocb
4532  * objects in txq. This function issues an abort iocb for all the iocb commands
4533  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4534  * the return of this function. The caller is not required to hold any locks.
4535  **/
4536 void
4537 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4538 {
4539         LIST_HEAD(completions);
4540         struct lpfc_iocbq *iocb, *next_iocb;
4541
4542         if (pring->ringno == LPFC_ELS_RING) {
4543                 lpfc_fabric_abort_hba(phba);
4544         }
4545
4546         /* Error everything on txq and txcmplq
4547          * First do the txq.
4548          */
4549         if (phba->sli_rev >= LPFC_SLI_REV4) {
4550                 spin_lock_irq(&pring->ring_lock);
4551                 list_splice_init(&pring->txq, &completions);
4552                 pring->txq_cnt = 0;
4553                 spin_unlock_irq(&pring->ring_lock);
4554
4555                 spin_lock_irq(&phba->hbalock);
4556                 /* Next issue ABTS for everything on the txcmplq */
4557                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4558                         lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4559                 spin_unlock_irq(&phba->hbalock);
4560         } else {
4561                 spin_lock_irq(&phba->hbalock);
4562                 list_splice_init(&pring->txq, &completions);
4563                 pring->txq_cnt = 0;
4564
4565                 /* Next issue ABTS for everything on the txcmplq */
4566                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4567                         lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4568                 spin_unlock_irq(&phba->hbalock);
4569         }
4570         /* Make sure HBA is alive */
4571         lpfc_issue_hb_tmo(phba);
4572
4573         /* Cancel all the IOCBs from the completions list */
4574         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4575                               IOERR_SLI_ABORTED);
4576 }
4577
4578 /**
4579  * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4580  * @phba: Pointer to HBA context object.
4581  *
4582  * This function aborts all iocbs in FCP rings and frees all the iocb
4583  * objects in txq. This function issues an abort iocb for all the iocb commands
4584  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4585  * the return of this function. The caller is not required to hold any locks.
4586  **/
4587 void
4588 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4589 {
4590         struct lpfc_sli *psli = &phba->sli;
4591         struct lpfc_sli_ring  *pring;
4592         uint32_t i;
4593
4594         /* Look on all the FCP Rings for the iotag */
4595         if (phba->sli_rev >= LPFC_SLI_REV4) {
4596                 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4597                         pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4598                         lpfc_sli_abort_iocb_ring(phba, pring);
4599                 }
4600         } else {
4601                 pring = &psli->sli3_ring[LPFC_FCP_RING];
4602                 lpfc_sli_abort_iocb_ring(phba, pring);
4603         }
4604 }
4605
4606 /**
4607  * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4608  * @phba: Pointer to HBA context object.
4609  *
4610  * This function flushes all iocbs in the IO ring and frees all the iocb
4611  * objects in txq and txcmplq. This function will not issue abort iocbs
4612  * for all the iocb commands in txcmplq, they will just be returned with
4613  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4614  * slot has been permanently disabled.
4615  **/
4616 void
4617 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4618 {
4619         LIST_HEAD(txq);
4620         LIST_HEAD(txcmplq);
4621         struct lpfc_sli *psli = &phba->sli;
4622         struct lpfc_sli_ring  *pring;
4623         uint32_t i;
4624         struct lpfc_iocbq *piocb, *next_iocb;
4625
4626         spin_lock_irq(&phba->hbalock);
4627         if (phba->hba_flag & HBA_IOQ_FLUSH ||
4628             !phba->sli4_hba.hdwq) {
4629                 spin_unlock_irq(&phba->hbalock);
4630                 return;
4631         }
4632         /* Indicate the I/O queues are flushed */
4633         phba->hba_flag |= HBA_IOQ_FLUSH;
4634         spin_unlock_irq(&phba->hbalock);
4635
4636         /* Look on all the FCP Rings for the iotag */
4637         if (phba->sli_rev >= LPFC_SLI_REV4) {
4638                 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4639                         pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4640
4641                         spin_lock_irq(&pring->ring_lock);
4642                         /* Retrieve everything on txq */
4643                         list_splice_init(&pring->txq, &txq);
4644                         list_for_each_entry_safe(piocb, next_iocb,
4645                                                  &pring->txcmplq, list)
4646                                 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4647                         /* Retrieve everything on the txcmplq */
4648                         list_splice_init(&pring->txcmplq, &txcmplq);
4649                         pring->txq_cnt = 0;
4650                         pring->txcmplq_cnt = 0;
4651                         spin_unlock_irq(&pring->ring_lock);
4652
4653                         /* Flush the txq */
4654                         lpfc_sli_cancel_iocbs(phba, &txq,
4655                                               IOSTAT_LOCAL_REJECT,
4656                                               IOERR_SLI_DOWN);
4657                         /* Flush the txcmplq */
4658                         lpfc_sli_cancel_iocbs(phba, &txcmplq,
4659                                               IOSTAT_LOCAL_REJECT,
4660                                               IOERR_SLI_DOWN);
4661                         if (unlikely(pci_channel_offline(phba->pcidev)))
4662                                 lpfc_sli4_io_xri_aborted(phba, NULL, 0);
4663                 }
4664         } else {
4665                 pring = &psli->sli3_ring[LPFC_FCP_RING];
4666
4667                 spin_lock_irq(&phba->hbalock);
4668                 /* Retrieve everything on txq */
4669                 list_splice_init(&pring->txq, &txq);
4670                 list_for_each_entry_safe(piocb, next_iocb,
4671                                          &pring->txcmplq, list)
4672                         piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4673                 /* Retrieve everything on the txcmplq */
4674                 list_splice_init(&pring->txcmplq, &txcmplq);
4675                 pring->txq_cnt = 0;
4676                 pring->txcmplq_cnt = 0;
4677                 spin_unlock_irq(&phba->hbalock);
4678
4679                 /* Flush the txq */
4680                 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4681                                       IOERR_SLI_DOWN);
4682                 /* Flush the txcmpq */
4683                 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4684                                       IOERR_SLI_DOWN);
4685         }
4686 }
4687
4688 /**
4689  * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4690  * @phba: Pointer to HBA context object.
4691  * @mask: Bit mask to be checked.
4692  *
4693  * This function reads the host status register and compares
4694  * with the provided bit mask to check if HBA completed
4695  * the restart. This function will wait in a loop for the
4696  * HBA to complete restart. If the HBA does not restart within
4697  * 15 iterations, the function will reset the HBA again. The
4698  * function returns 1 when HBA fail to restart otherwise returns
4699  * zero.
4700  **/
4701 static int
4702 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4703 {
4704         uint32_t status;
4705         int i = 0;
4706         int retval = 0;
4707
4708         /* Read the HBA Host Status Register */
4709         if (lpfc_readl(phba->HSregaddr, &status))
4710                 return 1;
4711
4712         phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4713
4714         /*
4715          * Check status register every 100ms for 5 retries, then every
4716          * 500ms for 5, then every 2.5 sec for 5, then reset board and
4717          * every 2.5 sec for 4.
4718          * Break our of the loop if errors occurred during init.
4719          */
4720         while (((status & mask) != mask) &&
4721                !(status & HS_FFERM) &&
4722                i++ < 20) {
4723
4724                 if (i <= 5)
4725                         msleep(10);
4726                 else if (i <= 10)
4727                         msleep(500);
4728                 else
4729                         msleep(2500);
4730
4731                 if (i == 15) {
4732                                 /* Do post */
4733                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4734                         lpfc_sli_brdrestart(phba);
4735                 }
4736                 /* Read the HBA Host Status Register */
4737                 if (lpfc_readl(phba->HSregaddr, &status)) {
4738                         retval = 1;
4739                         break;
4740                 }
4741         }
4742
4743         /* Check to see if any errors occurred during init */
4744         if ((status & HS_FFERM) || (i >= 20)) {
4745                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4746                                 "2751 Adapter failed to restart, "
4747                                 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4748                                 status,
4749                                 readl(phba->MBslimaddr + 0xa8),
4750                                 readl(phba->MBslimaddr + 0xac));
4751                 phba->link_state = LPFC_HBA_ERROR;
4752                 retval = 1;
4753         }
4754
4755         return retval;
4756 }
4757
4758 /**
4759  * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4760  * @phba: Pointer to HBA context object.
4761  * @mask: Bit mask to be checked.
4762  *
4763  * This function checks the host status register to check if HBA is
4764  * ready. This function will wait in a loop for the HBA to be ready
4765  * If the HBA is not ready , the function will will reset the HBA PCI
4766  * function again. The function returns 1 when HBA fail to be ready
4767  * otherwise returns zero.
4768  **/
4769 static int
4770 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4771 {
4772         uint32_t status;
4773         int retval = 0;
4774
4775         /* Read the HBA Host Status Register */
4776         status = lpfc_sli4_post_status_check(phba);
4777
4778         if (status) {
4779                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4780                 lpfc_sli_brdrestart(phba);
4781                 status = lpfc_sli4_post_status_check(phba);
4782         }
4783
4784         /* Check to see if any errors occurred during init */
4785         if (status) {
4786                 phba->link_state = LPFC_HBA_ERROR;
4787                 retval = 1;
4788         } else
4789                 phba->sli4_hba.intr_enable = 0;
4790
4791         phba->hba_flag &= ~HBA_SETUP;
4792         return retval;
4793 }
4794
4795 /**
4796  * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4797  * @phba: Pointer to HBA context object.
4798  * @mask: Bit mask to be checked.
4799  *
4800  * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4801  * from the API jump table function pointer from the lpfc_hba struct.
4802  **/
4803 int
4804 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4805 {
4806         return phba->lpfc_sli_brdready(phba, mask);
4807 }
4808
4809 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4810
4811 /**
4812  * lpfc_reset_barrier - Make HBA ready for HBA reset
4813  * @phba: Pointer to HBA context object.
4814  *
4815  * This function is called before resetting an HBA. This function is called
4816  * with hbalock held and requests HBA to quiesce DMAs before a reset.
4817  **/
4818 void lpfc_reset_barrier(struct lpfc_hba *phba)
4819 {
4820         uint32_t __iomem *resp_buf;
4821         uint32_t __iomem *mbox_buf;
4822         volatile struct MAILBOX_word0 mbox;
4823         uint32_t hc_copy, ha_copy, resp_data;
4824         int  i;
4825         uint8_t hdrtype;
4826
4827         lockdep_assert_held(&phba->hbalock);
4828
4829         pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4830         if (hdrtype != 0x80 ||
4831             (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4832              FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4833                 return;
4834
4835         /*
4836          * Tell the other part of the chip to suspend temporarily all
4837          * its DMA activity.
4838          */
4839         resp_buf = phba->MBslimaddr;
4840
4841         /* Disable the error attention */
4842         if (lpfc_readl(phba->HCregaddr, &hc_copy))
4843                 return;
4844         writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4845         readl(phba->HCregaddr); /* flush */
4846         phba->link_flag |= LS_IGNORE_ERATT;
4847
4848         if (lpfc_readl(phba->HAregaddr, &ha_copy))
4849                 return;
4850         if (ha_copy & HA_ERATT) {
4851                 /* Clear Chip error bit */
4852                 writel(HA_ERATT, phba->HAregaddr);
4853                 phba->pport->stopped = 1;
4854         }
4855
4856         mbox.word0 = 0;
4857         mbox.mbxCommand = MBX_KILL_BOARD;
4858         mbox.mbxOwner = OWN_CHIP;
4859
4860         writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4861         mbox_buf = phba->MBslimaddr;
4862         writel(mbox.word0, mbox_buf);
4863
4864         for (i = 0; i < 50; i++) {
4865                 if (lpfc_readl((resp_buf + 1), &resp_data))
4866                         return;
4867                 if (resp_data != ~(BARRIER_TEST_PATTERN))
4868                         mdelay(1);
4869                 else
4870                         break;
4871         }
4872         resp_data = 0;
4873         if (lpfc_readl((resp_buf + 1), &resp_data))
4874                 return;
4875         if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
4876                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4877                     phba->pport->stopped)
4878                         goto restore_hc;
4879                 else
4880                         goto clear_errat;
4881         }
4882
4883         mbox.mbxOwner = OWN_HOST;
4884         resp_data = 0;
4885         for (i = 0; i < 500; i++) {
4886                 if (lpfc_readl(resp_buf, &resp_data))
4887                         return;
4888                 if (resp_data != mbox.word0)
4889                         mdelay(1);
4890                 else
4891                         break;
4892         }
4893
4894 clear_errat:
4895
4896         while (++i < 500) {
4897                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4898                         return;
4899                 if (!(ha_copy & HA_ERATT))
4900                         mdelay(1);
4901                 else
4902                         break;
4903         }
4904
4905         if (readl(phba->HAregaddr) & HA_ERATT) {
4906                 writel(HA_ERATT, phba->HAregaddr);
4907                 phba->pport->stopped = 1;
4908         }
4909
4910 restore_hc:
4911         phba->link_flag &= ~LS_IGNORE_ERATT;
4912         writel(hc_copy, phba->HCregaddr);
4913         readl(phba->HCregaddr); /* flush */
4914 }
4915
4916 /**
4917  * lpfc_sli_brdkill - Issue a kill_board mailbox command
4918  * @phba: Pointer to HBA context object.
4919  *
4920  * This function issues a kill_board mailbox command and waits for
4921  * the error attention interrupt. This function is called for stopping
4922  * the firmware processing. The caller is not required to hold any
4923  * locks. This function calls lpfc_hba_down_post function to free
4924  * any pending commands after the kill. The function will return 1 when it
4925  * fails to kill the board else will return 0.
4926  **/
4927 int
4928 lpfc_sli_brdkill(struct lpfc_hba *phba)
4929 {
4930         struct lpfc_sli *psli;
4931         LPFC_MBOXQ_t *pmb;
4932         uint32_t status;
4933         uint32_t ha_copy;
4934         int retval;
4935         int i = 0;
4936
4937         psli = &phba->sli;
4938
4939         /* Kill HBA */
4940         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4941                         "0329 Kill HBA Data: x%x x%x\n",
4942                         phba->pport->port_state, psli->sli_flag);
4943
4944         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4945         if (!pmb)
4946                 return 1;
4947
4948         /* Disable the error attention */
4949         spin_lock_irq(&phba->hbalock);
4950         if (lpfc_readl(phba->HCregaddr, &status)) {
4951                 spin_unlock_irq(&phba->hbalock);
4952                 mempool_free(pmb, phba->mbox_mem_pool);
4953                 return 1;
4954         }
4955         status &= ~HC_ERINT_ENA;
4956         writel(status, phba->HCregaddr);
4957         readl(phba->HCregaddr); /* flush */
4958         phba->link_flag |= LS_IGNORE_ERATT;
4959         spin_unlock_irq(&phba->hbalock);
4960
4961         lpfc_kill_board(phba, pmb);
4962         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4963         retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4964
4965         if (retval != MBX_SUCCESS) {
4966                 if (retval != MBX_BUSY)
4967                         mempool_free(pmb, phba->mbox_mem_pool);
4968                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4969                                 "2752 KILL_BOARD command failed retval %d\n",
4970                                 retval);
4971                 spin_lock_irq(&phba->hbalock);
4972                 phba->link_flag &= ~LS_IGNORE_ERATT;
4973                 spin_unlock_irq(&phba->hbalock);
4974                 return 1;
4975         }
4976
4977         spin_lock_irq(&phba->hbalock);
4978         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4979         spin_unlock_irq(&phba->hbalock);
4980
4981         mempool_free(pmb, phba->mbox_mem_pool);
4982
4983         /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4984          * attention every 100ms for 3 seconds. If we don't get ERATT after
4985          * 3 seconds we still set HBA_ERROR state because the status of the
4986          * board is now undefined.
4987          */
4988         if (lpfc_readl(phba->HAregaddr, &ha_copy))
4989                 return 1;
4990         while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4991                 mdelay(100);
4992                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4993                         return 1;
4994         }
4995
4996         del_timer_sync(&psli->mbox_tmo);
4997         if (ha_copy & HA_ERATT) {
4998                 writel(HA_ERATT, phba->HAregaddr);
4999                 phba->pport->stopped = 1;
5000         }
5001         spin_lock_irq(&phba->hbalock);
5002         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5003         psli->mbox_active = NULL;
5004         phba->link_flag &= ~LS_IGNORE_ERATT;
5005         spin_unlock_irq(&phba->hbalock);
5006
5007         lpfc_hba_down_post(phba);
5008         phba->link_state = LPFC_HBA_ERROR;
5009
5010         return ha_copy & HA_ERATT ? 0 : 1;
5011 }
5012
5013 /**
5014  * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
5015  * @phba: Pointer to HBA context object.
5016  *
5017  * This function resets the HBA by writing HC_INITFF to the control
5018  * register. After the HBA resets, this function resets all the iocb ring
5019  * indices. This function disables PCI layer parity checking during
5020  * the reset.
5021  * This function returns 0 always.
5022  * The caller is not required to hold any locks.
5023  **/
5024 int
5025 lpfc_sli_brdreset(struct lpfc_hba *phba)
5026 {
5027         struct lpfc_sli *psli;
5028         struct lpfc_sli_ring *pring;
5029         uint16_t cfg_value;
5030         int i;
5031
5032         psli = &phba->sli;
5033
5034         /* Reset HBA */
5035         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5036                         "0325 Reset HBA Data: x%x x%x\n",
5037                         (phba->pport) ? phba->pport->port_state : 0,
5038                         psli->sli_flag);
5039
5040         /* perform board reset */
5041         phba->fc_eventTag = 0;
5042         phba->link_events = 0;
5043         phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5044         if (phba->pport) {
5045                 phba->pport->fc_myDID = 0;
5046                 phba->pport->fc_prevDID = 0;
5047         }
5048
5049         /* Turn off parity checking and serr during the physical reset */
5050         if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
5051                 return -EIO;
5052
5053         pci_write_config_word(phba->pcidev, PCI_COMMAND,
5054                               (cfg_value &
5055                                ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5056
5057         psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
5058
5059         /* Now toggle INITFF bit in the Host Control Register */
5060         writel(HC_INITFF, phba->HCregaddr);
5061         mdelay(1);
5062         readl(phba->HCregaddr); /* flush */
5063         writel(0, phba->HCregaddr);
5064         readl(phba->HCregaddr); /* flush */
5065
5066         /* Restore PCI cmd register */
5067         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5068
5069         /* Initialize relevant SLI info */
5070         for (i = 0; i < psli->num_rings; i++) {
5071                 pring = &psli->sli3_ring[i];
5072                 pring->flag = 0;
5073                 pring->sli.sli3.rspidx = 0;
5074                 pring->sli.sli3.next_cmdidx  = 0;
5075                 pring->sli.sli3.local_getidx = 0;
5076                 pring->sli.sli3.cmdidx = 0;
5077                 pring->missbufcnt = 0;
5078         }
5079
5080         phba->link_state = LPFC_WARM_START;
5081         return 0;
5082 }
5083
5084 /**
5085  * lpfc_sli4_brdreset - Reset a sli-4 HBA
5086  * @phba: Pointer to HBA context object.
5087  *
5088  * This function resets a SLI4 HBA. This function disables PCI layer parity
5089  * checking during resets the device. The caller is not required to hold
5090  * any locks.
5091  *
5092  * This function returns 0 on success else returns negative error code.
5093  **/
5094 int
5095 lpfc_sli4_brdreset(struct lpfc_hba *phba)
5096 {
5097         struct lpfc_sli *psli = &phba->sli;
5098         uint16_t cfg_value;
5099         int rc = 0;
5100
5101         /* Reset HBA */
5102         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5103                         "0295 Reset HBA Data: x%x x%x x%x\n",
5104                         phba->pport->port_state, psli->sli_flag,
5105                         phba->hba_flag);
5106
5107         /* perform board reset */
5108         phba->fc_eventTag = 0;
5109         phba->link_events = 0;
5110         phba->pport->fc_myDID = 0;
5111         phba->pport->fc_prevDID = 0;
5112         phba->hba_flag &= ~HBA_SETUP;
5113
5114         spin_lock_irq(&phba->hbalock);
5115         psli->sli_flag &= ~(LPFC_PROCESS_LA);
5116         phba->fcf.fcf_flag = 0;
5117         spin_unlock_irq(&phba->hbalock);
5118
5119         /* Now physically reset the device */
5120         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5121                         "0389 Performing PCI function reset!\n");
5122
5123         /* Turn off parity checking and serr during the physical reset */
5124         if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
5125                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5126                                 "3205 PCI read Config failed\n");
5127                 return -EIO;
5128         }
5129
5130         pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
5131                               ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5132
5133         /* Perform FCoE PCI function reset before freeing queue memory */
5134         rc = lpfc_pci_function_reset(phba);
5135
5136         /* Restore PCI cmd register */
5137         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5138
5139         return rc;
5140 }
5141
5142 /**
5143  * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
5144  * @phba: Pointer to HBA context object.
5145  *
5146  * This function is called in the SLI initialization code path to
5147  * restart the HBA. The caller is not required to hold any lock.
5148  * This function writes MBX_RESTART mailbox command to the SLIM and
5149  * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
5150  * function to free any pending commands. The function enables
5151  * POST only during the first initialization. The function returns zero.
5152  * The function does not guarantee completion of MBX_RESTART mailbox
5153  * command before the return of this function.
5154  **/
5155 static int
5156 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
5157 {
5158         volatile struct MAILBOX_word0 mb;
5159         struct lpfc_sli *psli;
5160         void __iomem *to_slim;
5161         uint32_t hba_aer_enabled;
5162
5163         spin_lock_irq(&phba->hbalock);
5164
5165         /* Take PCIe device Advanced Error Reporting (AER) state */
5166         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
5167
5168         psli = &phba->sli;
5169
5170         /* Restart HBA */
5171         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5172                         "0337 Restart HBA Data: x%x x%x\n",
5173                         (phba->pport) ? phba->pport->port_state : 0,
5174                         psli->sli_flag);
5175
5176         mb.word0 = 0;
5177         mb.mbxCommand = MBX_RESTART;
5178         mb.mbxHc = 1;
5179
5180         lpfc_reset_barrier(phba);
5181
5182         to_slim = phba->MBslimaddr;
5183         writel(mb.word0, to_slim);
5184         readl(to_slim); /* flush */
5185
5186         /* Only skip post after fc_ffinit is completed */
5187         if (phba->pport && phba->pport->port_state)
5188                 mb.word0 = 1;   /* This is really setting up word1 */
5189         else
5190                 mb.word0 = 0;   /* This is really setting up word1 */
5191         to_slim = phba->MBslimaddr + sizeof (uint32_t);
5192         writel(mb.word0, to_slim);
5193         readl(to_slim); /* flush */
5194
5195         lpfc_sli_brdreset(phba);
5196         if (phba->pport)
5197                 phba->pport->stopped = 0;
5198         phba->link_state = LPFC_INIT_START;
5199         phba->hba_flag = 0;
5200         spin_unlock_irq(&phba->hbalock);
5201
5202         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5203         psli->stats_start = ktime_get_seconds();
5204
5205         /* Give the INITFF and Post time to settle. */
5206         mdelay(100);
5207
5208         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
5209         if (hba_aer_enabled)
5210                 pci_disable_pcie_error_reporting(phba->pcidev);
5211
5212         lpfc_hba_down_post(phba);
5213
5214         return 0;
5215 }
5216
5217 /**
5218  * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
5219  * @phba: Pointer to HBA context object.
5220  *
5221  * This function is called in the SLI initialization code path to restart
5222  * a SLI4 HBA. The caller is not required to hold any lock.
5223  * At the end of the function, it calls lpfc_hba_down_post function to
5224  * free any pending commands.
5225  **/
5226 static int
5227 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
5228 {
5229         struct lpfc_sli *psli = &phba->sli;
5230         uint32_t hba_aer_enabled;
5231         int rc;
5232
5233         /* Restart HBA */
5234         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5235                         "0296 Restart HBA Data: x%x x%x\n",
5236                         phba->pport->port_state, psli->sli_flag);
5237
5238         /* Take PCIe device Advanced Error Reporting (AER) state */
5239         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
5240
5241         rc = lpfc_sli4_brdreset(phba);
5242         if (rc) {
5243                 phba->link_state = LPFC_HBA_ERROR;
5244                 goto hba_down_queue;
5245         }
5246
5247         spin_lock_irq(&phba->hbalock);
5248         phba->pport->stopped = 0;
5249         phba->link_state = LPFC_INIT_START;
5250         phba->hba_flag = 0;
5251         spin_unlock_irq(&phba->hbalock);
5252
5253         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5254         psli->stats_start = ktime_get_seconds();
5255
5256         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
5257         if (hba_aer_enabled)
5258                 pci_disable_pcie_error_reporting(phba->pcidev);
5259
5260 hba_down_queue:
5261         lpfc_hba_down_post(phba);
5262         lpfc_sli4_queue_destroy(phba);
5263
5264         return rc;
5265 }
5266
5267 /**
5268  * lpfc_sli_brdrestart - Wrapper func for restarting hba
5269  * @phba: Pointer to HBA context object.
5270  *
5271  * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
5272  * API jump table function pointer from the lpfc_hba struct.
5273 **/
5274 int
5275 lpfc_sli_brdrestart(struct lpfc_hba *phba)
5276 {
5277         return phba->lpfc_sli_brdrestart(phba);
5278 }
5279
5280 /**
5281  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
5282  * @phba: Pointer to HBA context object.
5283  *
5284  * This function is called after a HBA restart to wait for successful
5285  * restart of the HBA. Successful restart of the HBA is indicated by
5286  * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
5287  * iteration, the function will restart the HBA again. The function returns
5288  * zero if HBA successfully restarted else returns negative error code.
5289  **/
5290 int
5291 lpfc_sli_chipset_init(struct lpfc_hba *phba)
5292 {
5293         uint32_t status, i = 0;
5294
5295         /* Read the HBA Host Status Register */
5296         if (lpfc_readl(phba->HSregaddr, &status))
5297                 return -EIO;
5298
5299         /* Check status register to see what current state is */
5300         i = 0;
5301         while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
5302
5303                 /* Check every 10ms for 10 retries, then every 100ms for 90
5304                  * retries, then every 1 sec for 50 retires for a total of
5305                  * ~60 seconds before reset the board again and check every
5306                  * 1 sec for 50 retries. The up to 60 seconds before the
5307                  * board ready is required by the Falcon FIPS zeroization
5308                  * complete, and any reset the board in between shall cause
5309                  * restart of zeroization, further delay the board ready.
5310                  */
5311                 if (i++ >= 200) {
5312                         /* Adapter failed to init, timeout, status reg
5313                            <status> */
5314                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5315                                         "0436 Adapter failed to init, "
5316                                         "timeout, status reg x%x, "
5317                                         "FW Data: A8 x%x AC x%x\n", status,
5318                                         readl(phba->MBslimaddr + 0xa8),
5319                                         readl(phba->MBslimaddr + 0xac));
5320                         phba->link_state = LPFC_HBA_ERROR;
5321                         return -ETIMEDOUT;
5322                 }
5323
5324                 /* Check to see if any errors occurred during init */
5325                 if (status & HS_FFERM) {
5326                         /* ERROR: During chipset initialization */
5327                         /* Adapter failed to init, chipset, status reg
5328                            <status> */
5329                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5330                                         "0437 Adapter failed to init, "
5331                                         "chipset, status reg x%x, "
5332                                         "FW Data: A8 x%x AC x%x\n", status,
5333                                         readl(phba->MBslimaddr + 0xa8),
5334                                         readl(phba->MBslimaddr + 0xac));
5335                         phba->link_state = LPFC_HBA_ERROR;
5336                         return -EIO;
5337                 }
5338
5339                 if (i <= 10)
5340                         msleep(10);
5341                 else if (i <= 100)
5342                         msleep(100);
5343                 else
5344                         msleep(1000);
5345
5346                 if (i == 150) {
5347                         /* Do post */
5348                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5349                         lpfc_sli_brdrestart(phba);
5350                 }
5351                 /* Read the HBA Host Status Register */
5352                 if (lpfc_readl(phba->HSregaddr, &status))
5353                         return -EIO;
5354         }
5355
5356         /* Check to see if any errors occurred during init */
5357         if (status & HS_FFERM) {
5358                 /* ERROR: During chipset initialization */
5359                 /* Adapter failed to init, chipset, status reg <status> */
5360                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5361                                 "0438 Adapter failed to init, chipset, "
5362                                 "status reg x%x, "
5363                                 "FW Data: A8 x%x AC x%x\n", status,
5364                                 readl(phba->MBslimaddr + 0xa8),
5365                                 readl(phba->MBslimaddr + 0xac));
5366                 phba->link_state = LPFC_HBA_ERROR;
5367                 return -EIO;
5368         }
5369
5370         phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5371
5372         /* Clear all interrupt enable conditions */
5373         writel(0, phba->HCregaddr);
5374         readl(phba->HCregaddr); /* flush */
5375
5376         /* setup host attn register */
5377         writel(0xffffffff, phba->HAregaddr);
5378         readl(phba->HAregaddr); /* flush */
5379         return 0;
5380 }
5381
5382 /**
5383  * lpfc_sli_hbq_count - Get the number of HBQs to be configured
5384  *
5385  * This function calculates and returns the number of HBQs required to be
5386  * configured.
5387  **/
5388 int
5389 lpfc_sli_hbq_count(void)
5390 {
5391         return ARRAY_SIZE(lpfc_hbq_defs);
5392 }
5393
5394 /**
5395  * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
5396  *
5397  * This function adds the number of hbq entries in every HBQ to get
5398  * the total number of hbq entries required for the HBA and returns
5399  * the total count.
5400  **/
5401 static int
5402 lpfc_sli_hbq_entry_count(void)
5403 {
5404         int  hbq_count = lpfc_sli_hbq_count();
5405         int  count = 0;
5406         int  i;
5407
5408         for (i = 0; i < hbq_count; ++i)
5409                 count += lpfc_hbq_defs[i]->entry_count;
5410         return count;
5411 }
5412
5413 /**
5414  * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
5415  *
5416  * This function calculates amount of memory required for all hbq entries
5417  * to be configured and returns the total memory required.
5418  **/
5419 int
5420 lpfc_sli_hbq_size(void)
5421 {
5422         return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5423 }
5424
5425 /**
5426  * lpfc_sli_hbq_setup - configure and initialize HBQs
5427  * @phba: Pointer to HBA context object.
5428  *
5429  * This function is called during the SLI initialization to configure
5430  * all the HBQs and post buffers to the HBQ. The caller is not
5431  * required to hold any locks. This function will return zero if successful
5432  * else it will return negative error code.
5433  **/
5434 static int
5435 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5436 {
5437         int  hbq_count = lpfc_sli_hbq_count();
5438         LPFC_MBOXQ_t *pmb;
5439         MAILBOX_t *pmbox;
5440         uint32_t hbqno;
5441         uint32_t hbq_entry_index;
5442
5443                                 /* Get a Mailbox buffer to setup mailbox
5444                                  * commands for HBA initialization
5445                                  */
5446         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5447
5448         if (!pmb)
5449                 return -ENOMEM;
5450
5451         pmbox = &pmb->u.mb;
5452
5453         /* Initialize the struct lpfc_sli_hbq structure for each hbq */
5454         phba->link_state = LPFC_INIT_MBX_CMDS;
5455         phba->hbq_in_use = 1;
5456
5457         hbq_entry_index = 0;
5458         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5459                 phba->hbqs[hbqno].next_hbqPutIdx = 0;
5460                 phba->hbqs[hbqno].hbqPutIdx      = 0;
5461                 phba->hbqs[hbqno].local_hbqGetIdx   = 0;
5462                 phba->hbqs[hbqno].entry_count =
5463                         lpfc_hbq_defs[hbqno]->entry_count;
5464                 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5465                         hbq_entry_index, pmb);
5466                 hbq_entry_index += phba->hbqs[hbqno].entry_count;
5467
5468                 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5469                         /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
5470                            mbxStatus <status>, ring <num> */
5471
5472                         lpfc_printf_log(phba, KERN_ERR,
5473                                         LOG_SLI | LOG_VPORT,
5474                                         "1805 Adapter failed to init. "
5475                                         "Data: x%x x%x x%x\n",
5476                                         pmbox->mbxCommand,
5477                                         pmbox->mbxStatus, hbqno);
5478
5479                         phba->link_state = LPFC_HBA_ERROR;
5480                         mempool_free(pmb, phba->mbox_mem_pool);
5481                         return -ENXIO;
5482                 }
5483         }
5484         phba->hbq_count = hbq_count;
5485
5486         mempool_free(pmb, phba->mbox_mem_pool);
5487
5488         /* Initially populate or replenish the HBQs */
5489         for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5490                 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5491         return 0;
5492 }
5493
5494 /**
5495  * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5496  * @phba: Pointer to HBA context object.
5497  *
5498  * This function is called during the SLI initialization to configure
5499  * all the HBQs and post buffers to the HBQ. The caller is not
5500  * required to hold any locks. This function will return zero if successful
5501  * else it will return negative error code.
5502  **/
5503 static int
5504 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5505 {
5506         phba->hbq_in_use = 1;
5507         /**
5508          * Specific case when the MDS diagnostics is enabled and supported.
5509          * The receive buffer count is truncated to manage the incoming
5510          * traffic.
5511          **/
5512         if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5513                 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5514                         lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5515         else
5516                 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5517                         lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5518         phba->hbq_count = 1;
5519         lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5520         /* Initially populate or replenish the HBQs */
5521         return 0;
5522 }
5523
5524 /**
5525  * lpfc_sli_config_port - Issue config port mailbox command
5526  * @phba: Pointer to HBA context object.
5527  * @sli_mode: sli mode - 2/3
5528  *
5529  * This function is called by the sli initialization code path
5530  * to issue config_port mailbox command. This function restarts the
5531  * HBA firmware and issues a config_port mailbox command to configure
5532  * the SLI interface in the sli mode specified by sli_mode
5533  * variable. The caller is not required to hold any locks.
5534  * The function returns 0 if successful, else returns negative error
5535  * code.
5536  **/
5537 int
5538 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5539 {
5540         LPFC_MBOXQ_t *pmb;
5541         uint32_t resetcount = 0, rc = 0, done = 0;
5542
5543         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5544         if (!pmb) {
5545                 phba->link_state = LPFC_HBA_ERROR;
5546                 return -ENOMEM;
5547         }
5548
5549         phba->sli_rev = sli_mode;
5550         while (resetcount < 2 && !done) {
5551                 spin_lock_irq(&phba->hbalock);
5552                 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5553                 spin_unlock_irq(&phba->hbalock);
5554                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5555                 lpfc_sli_brdrestart(phba);
5556                 rc = lpfc_sli_chipset_init(phba);
5557                 if (rc)
5558                         break;
5559
5560                 spin_lock_irq(&phba->hbalock);
5561                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5562                 spin_unlock_irq(&phba->hbalock);
5563                 resetcount++;
5564
5565                 /* Call pre CONFIG_PORT mailbox command initialization.  A
5566                  * value of 0 means the call was successful.  Any other
5567                  * nonzero value is a failure, but if ERESTART is returned,
5568                  * the driver may reset the HBA and try again.
5569                  */
5570                 rc = lpfc_config_port_prep(phba);
5571                 if (rc == -ERESTART) {
5572                         phba->link_state = LPFC_LINK_UNKNOWN;
5573                         continue;
5574                 } else if (rc)
5575                         break;
5576
5577                 phba->link_state = LPFC_INIT_MBX_CMDS;
5578                 lpfc_config_port(phba, pmb);
5579                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5580                 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5581                                         LPFC_SLI3_HBQ_ENABLED |
5582                                         LPFC_SLI3_CRP_ENABLED |
5583                                         LPFC_SLI3_DSS_ENABLED);
5584                 if (rc != MBX_SUCCESS) {
5585                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5586                                 "0442 Adapter failed to init, mbxCmd x%x "
5587                                 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5588                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5589                         spin_lock_irq(&phba->hbalock);
5590                         phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5591                         spin_unlock_irq(&phba->hbalock);
5592                         rc = -ENXIO;
5593                 } else {
5594                         /* Allow asynchronous mailbox command to go through */
5595                         spin_lock_irq(&phba->hbalock);
5596                         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5597                         spin_unlock_irq(&phba->hbalock);
5598                         done = 1;
5599
5600                         if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5601                             (pmb->u.mb.un.varCfgPort.gasabt == 0))
5602                                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5603                                         "3110 Port did not grant ASABT\n");
5604                 }
5605         }
5606         if (!done) {
5607                 rc = -EINVAL;
5608                 goto do_prep_failed;
5609         }
5610         if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5611                 if (!pmb->u.mb.un.varCfgPort.cMA) {
5612                         rc = -ENXIO;
5613                         goto do_prep_failed;
5614                 }
5615                 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5616                         phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5617                         phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5618                         phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5619                                 phba->max_vpi : phba->max_vports;
5620
5621                 } else
5622                         phba->max_vpi = 0;
5623                 if (pmb->u.mb.un.varCfgPort.gerbm)
5624                         phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5625                 if (pmb->u.mb.un.varCfgPort.gcrp)
5626                         phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5627
5628                 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5629                 phba->port_gp = phba->mbox->us.s3_pgp.port;
5630
5631                 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5632                         if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5633                                 phba->cfg_enable_bg = 0;
5634                                 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5635                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5636                                                 "0443 Adapter did not grant "
5637                                                 "BlockGuard\n");
5638                         }
5639                 }
5640         } else {
5641                 phba->hbq_get = NULL;
5642                 phba->port_gp = phba->mbox->us.s2.port;
5643                 phba->max_vpi = 0;
5644         }
5645 do_prep_failed:
5646         mempool_free(pmb, phba->mbox_mem_pool);
5647         return rc;
5648 }
5649
5650
5651 /**
5652  * lpfc_sli_hba_setup - SLI initialization function
5653  * @phba: Pointer to HBA context object.
5654  *
5655  * This function is the main SLI initialization function. This function
5656  * is called by the HBA initialization code, HBA reset code and HBA
5657  * error attention handler code. Caller is not required to hold any
5658  * locks. This function issues config_port mailbox command to configure
5659  * the SLI, setup iocb rings and HBQ rings. In the end the function
5660  * calls the config_port_post function to issue init_link mailbox
5661  * command and to start the discovery. The function will return zero
5662  * if successful, else it will return negative error code.
5663  **/
5664 int
5665 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5666 {
5667         uint32_t rc;
5668         int  i;
5669         int longs;
5670
5671         /* Enable ISR already does config_port because of config_msi mbx */
5672         if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
5673                 rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5674                 if (rc)
5675                         return -EIO;
5676                 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
5677         }
5678         phba->fcp_embed_io = 0; /* SLI4 FC support only */
5679
5680         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5681         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5682                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5683                 if (!rc) {
5684                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5685                                         "2709 This device supports "
5686                                         "Advanced Error Reporting (AER)\n");
5687                         spin_lock_irq(&phba->hbalock);
5688                         phba->hba_flag |= HBA_AER_ENABLED;
5689                         spin_unlock_irq(&phba->hbalock);
5690                 } else {
5691                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5692                                         "2708 This device does not support "
5693                                         "Advanced Error Reporting (AER): %d\n",
5694                                         rc);
5695                         phba->cfg_aer_support = 0;
5696                 }
5697         }
5698
5699         if (phba->sli_rev == 3) {
5700                 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5701                 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5702         } else {
5703                 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5704                 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5705                 phba->sli3_options = 0;
5706         }
5707
5708         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5709                         "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5710                         phba->sli_rev, phba->max_vpi);
5711         rc = lpfc_sli_ring_map(phba);
5712
5713         if (rc)
5714                 goto lpfc_sli_hba_setup_error;
5715
5716         /* Initialize VPIs. */
5717         if (phba->sli_rev == LPFC_SLI_REV3) {
5718                 /*
5719                  * The VPI bitmask and physical ID array are allocated
5720                  * and initialized once only - at driver load.  A port
5721                  * reset doesn't need to reinitialize this memory.
5722                  */
5723                 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5724                         longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5725                         phba->vpi_bmask = kcalloc(longs,
5726                                                   sizeof(unsigned long),
5727                                                   GFP_KERNEL);
5728                         if (!phba->vpi_bmask) {
5729                                 rc = -ENOMEM;
5730                                 goto lpfc_sli_hba_setup_error;
5731                         }
5732
5733                         phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5734                                                 sizeof(uint16_t),
5735                                                 GFP_KERNEL);
5736                         if (!phba->vpi_ids) {
5737                                 kfree(phba->vpi_bmask);
5738                                 rc = -ENOMEM;
5739                                 goto lpfc_sli_hba_setup_error;
5740                         }
5741                         for (i = 0; i < phba->max_vpi; i++)
5742                                 phba->vpi_ids[i] = i;
5743                 }
5744         }
5745
5746         /* Init HBQs */
5747         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5748                 rc = lpfc_sli_hbq_setup(phba);
5749                 if (rc)
5750                         goto lpfc_sli_hba_setup_error;
5751         }
5752         spin_lock_irq(&phba->hbalock);
5753         phba->sli.sli_flag |= LPFC_PROCESS_LA;
5754         spin_unlock_irq(&phba->hbalock);
5755
5756         rc = lpfc_config_port_post(phba);
5757         if (rc)
5758                 goto lpfc_sli_hba_setup_error;
5759
5760         return rc;
5761
5762 lpfc_sli_hba_setup_error:
5763         phba->link_state = LPFC_HBA_ERROR;
5764         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5765                         "0445 Firmware initialization failed\n");
5766         return rc;
5767 }
5768
5769 /**
5770  * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5771  * @phba: Pointer to HBA context object.
5772  *
5773  * This function issue a dump mailbox command to read config region
5774  * 23 and parse the records in the region and populate driver
5775  * data structure.
5776  **/
5777 static int
5778 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5779 {
5780         LPFC_MBOXQ_t *mboxq;
5781         struct lpfc_dmabuf *mp;
5782         struct lpfc_mqe *mqe;
5783         uint32_t data_length;
5784         int rc;
5785
5786         /* Program the default value of vlan_id and fc_map */
5787         phba->valid_vlan = 0;
5788         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5789         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5790         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5791
5792         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5793         if (!mboxq)
5794                 return -ENOMEM;
5795
5796         mqe = &mboxq->u.mqe;
5797         if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5798                 rc = -ENOMEM;
5799                 goto out_free_mboxq;
5800         }
5801
5802         mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5803         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5804
5805         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5806                         "(%d):2571 Mailbox cmd x%x Status x%x "
5807                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5808                         "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5809                         "CQ: x%x x%x x%x x%x\n",
5810                         mboxq->vport ? mboxq->vport->vpi : 0,
5811                         bf_get(lpfc_mqe_command, mqe),
5812                         bf_get(lpfc_mqe_status, mqe),
5813                         mqe->un.mb_words[0], mqe->un.mb_words[1],
5814                         mqe->un.mb_words[2], mqe->un.mb_words[3],
5815                         mqe->un.mb_words[4], mqe->un.mb_words[5],
5816                         mqe->un.mb_words[6], mqe->un.mb_words[7],
5817                         mqe->un.mb_words[8], mqe->un.mb_words[9],
5818                         mqe->un.mb_words[10], mqe->un.mb_words[11],
5819                         mqe->un.mb_words[12], mqe->un.mb_words[13],
5820                         mqe->un.mb_words[14], mqe->un.mb_words[15],
5821                         mqe->un.mb_words[16], mqe->un.mb_words[50],
5822                         mboxq->mcqe.word0,
5823                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
5824                         mboxq->mcqe.trailer);
5825
5826         if (rc) {
5827                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5828                 kfree(mp);
5829                 rc = -EIO;
5830                 goto out_free_mboxq;
5831         }
5832         data_length = mqe->un.mb_words[5];
5833         if (data_length > DMP_RGN23_SIZE) {
5834                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5835                 kfree(mp);
5836                 rc = -EIO;
5837                 goto out_free_mboxq;
5838         }
5839
5840         lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5841         lpfc_mbuf_free(phba, mp->virt, mp->phys);
5842         kfree(mp);
5843         rc = 0;
5844
5845 out_free_mboxq:
5846         mempool_free(mboxq, phba->mbox_mem_pool);
5847         return rc;
5848 }
5849
5850 /**
5851  * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5852  * @phba: pointer to lpfc hba data structure.
5853  * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5854  * @vpd: pointer to the memory to hold resulting port vpd data.
5855  * @vpd_size: On input, the number of bytes allocated to @vpd.
5856  *            On output, the number of data bytes in @vpd.
5857  *
5858  * This routine executes a READ_REV SLI4 mailbox command.  In
5859  * addition, this routine gets the port vpd data.
5860  *
5861  * Return codes
5862  *      0 - successful
5863  *      -ENOMEM - could not allocated memory.
5864  **/
5865 static int
5866 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5867                     uint8_t *vpd, uint32_t *vpd_size)
5868 {
5869         int rc = 0;
5870         uint32_t dma_size;
5871         struct lpfc_dmabuf *dmabuf;
5872         struct lpfc_mqe *mqe;
5873
5874         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5875         if (!dmabuf)
5876                 return -ENOMEM;
5877
5878         /*
5879          * Get a DMA buffer for the vpd data resulting from the READ_REV
5880          * mailbox command.
5881          */
5882         dma_size = *vpd_size;
5883         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5884                                           &dmabuf->phys, GFP_KERNEL);
5885         if (!dmabuf->virt) {
5886                 kfree(dmabuf);
5887                 return -ENOMEM;
5888         }
5889
5890         /*
5891          * The SLI4 implementation of READ_REV conflicts at word1,
5892          * bits 31:16 and SLI4 adds vpd functionality not present
5893          * in SLI3.  This code corrects the conflicts.
5894          */
5895         lpfc_read_rev(phba, mboxq);
5896         mqe = &mboxq->u.mqe;
5897         mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5898         mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5899         mqe->un.read_rev.word1 &= 0x0000FFFF;
5900         bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5901         bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5902
5903         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5904         if (rc) {
5905                 dma_free_coherent(&phba->pcidev->dev, dma_size,
5906                                   dmabuf->virt, dmabuf->phys);
5907                 kfree(dmabuf);
5908                 return -EIO;
5909         }
5910
5911         /*
5912          * The available vpd length cannot be bigger than the
5913          * DMA buffer passed to the port.  Catch the less than
5914          * case and update the caller's size.
5915          */
5916         if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5917                 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5918
5919         memcpy(vpd, dmabuf->virt, *vpd_size);
5920
5921         dma_free_coherent(&phba->pcidev->dev, dma_size,
5922                           dmabuf->virt, dmabuf->phys);
5923         kfree(dmabuf);
5924         return 0;
5925 }
5926
5927 /**
5928  * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5929  * @phba: pointer to lpfc hba data structure.
5930  *
5931  * This routine retrieves SLI4 device physical port name this PCI function
5932  * is attached to.
5933  *
5934  * Return codes
5935  *      0 - successful
5936  *      otherwise - failed to retrieve controller attributes
5937  **/
5938 static int
5939 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5940 {
5941         LPFC_MBOXQ_t *mboxq;
5942         struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5943         struct lpfc_controller_attribute *cntl_attr;
5944         void *virtaddr = NULL;
5945         uint32_t alloclen, reqlen;
5946         uint32_t shdr_status, shdr_add_status;
5947         union lpfc_sli4_cfg_shdr *shdr;
5948         int rc;
5949
5950         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5951         if (!mboxq)
5952                 return -ENOMEM;
5953
5954         /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5955         reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5956         alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5957                         LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5958                         LPFC_SLI4_MBX_NEMBED);
5959
5960         if (alloclen < reqlen) {
5961                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5962                                 "3084 Allocated DMA memory size (%d) is "
5963                                 "less than the requested DMA memory size "
5964                                 "(%d)\n", alloclen, reqlen);
5965                 rc = -ENOMEM;
5966                 goto out_free_mboxq;
5967         }
5968         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5969         virtaddr = mboxq->sge_array->addr[0];
5970         mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5971         shdr = &mbx_cntl_attr->cfg_shdr;
5972         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5973         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5974         if (shdr_status || shdr_add_status || rc) {
5975                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5976                                 "3085 Mailbox x%x (x%x/x%x) failed, "
5977                                 "rc:x%x, status:x%x, add_status:x%x\n",
5978                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5979                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5980                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5981                                 rc, shdr_status, shdr_add_status);
5982                 rc = -ENXIO;
5983                 goto out_free_mboxq;
5984         }
5985
5986         cntl_attr = &mbx_cntl_attr->cntl_attr;
5987         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5988         phba->sli4_hba.lnk_info.lnk_tp =
5989                 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5990         phba->sli4_hba.lnk_info.lnk_no =
5991                 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5992         phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
5993         phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
5994
5995         memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5996         strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5997                 sizeof(phba->BIOSVersion));
5998
5999         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6000                         "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
6001                         "flash_id: x%02x, asic_rev: x%02x\n",
6002                         phba->sli4_hba.lnk_info.lnk_tp,
6003                         phba->sli4_hba.lnk_info.lnk_no,
6004                         phba->BIOSVersion, phba->sli4_hba.flash_id,
6005                         phba->sli4_hba.asic_rev);
6006 out_free_mboxq:
6007         if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6008                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6009         else
6010                 mempool_free(mboxq, phba->mbox_mem_pool);
6011         return rc;
6012 }
6013
6014 /**
6015  * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
6016  * @phba: pointer to lpfc hba data structure.
6017  *
6018  * This routine retrieves SLI4 device physical port name this PCI function
6019  * is attached to.
6020  *
6021  * Return codes
6022  *      0 - successful
6023  *      otherwise - failed to retrieve physical port name
6024  **/
6025 static int
6026 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
6027 {
6028         LPFC_MBOXQ_t *mboxq;
6029         struct lpfc_mbx_get_port_name *get_port_name;
6030         uint32_t shdr_status, shdr_add_status;
6031         union lpfc_sli4_cfg_shdr *shdr;
6032         char cport_name = 0;
6033         int rc;
6034
6035         /* We assume nothing at this point */
6036         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6037         phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
6038
6039         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6040         if (!mboxq)
6041                 return -ENOMEM;
6042         /* obtain link type and link number via READ_CONFIG */
6043         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6044         lpfc_sli4_read_config(phba);
6045         if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
6046                 goto retrieve_ppname;
6047
6048         /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
6049         rc = lpfc_sli4_get_ctl_attr(phba);
6050         if (rc)
6051                 goto out_free_mboxq;
6052
6053 retrieve_ppname:
6054         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6055                 LPFC_MBOX_OPCODE_GET_PORT_NAME,
6056                 sizeof(struct lpfc_mbx_get_port_name) -
6057                 sizeof(struct lpfc_sli4_cfg_mhdr),
6058                 LPFC_SLI4_MBX_EMBED);
6059         get_port_name = &mboxq->u.mqe.un.get_port_name;
6060         shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
6061         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
6062         bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
6063                 phba->sli4_hba.lnk_info.lnk_tp);
6064         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6065         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6066         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6067         if (shdr_status || shdr_add_status || rc) {
6068                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6069                                 "3087 Mailbox x%x (x%x/x%x) failed: "
6070                                 "rc:x%x, status:x%x, add_status:x%x\n",
6071                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6072                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6073                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6074                                 rc, shdr_status, shdr_add_status);
6075                 rc = -ENXIO;
6076                 goto out_free_mboxq;
6077         }
6078         switch (phba->sli4_hba.lnk_info.lnk_no) {
6079         case LPFC_LINK_NUMBER_0:
6080                 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
6081                                 &get_port_name->u.response);
6082                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6083                 break;
6084         case LPFC_LINK_NUMBER_1:
6085                 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
6086                                 &get_port_name->u.response);
6087                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6088                 break;
6089         case LPFC_LINK_NUMBER_2:
6090                 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
6091                                 &get_port_name->u.response);
6092                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6093                 break;
6094         case LPFC_LINK_NUMBER_3:
6095                 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
6096                                 &get_port_name->u.response);
6097                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6098                 break;
6099         default:
6100                 break;
6101         }
6102
6103         if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
6104                 phba->Port[0] = cport_name;
6105                 phba->Port[1] = '\0';
6106                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6107                                 "3091 SLI get port name: %s\n", phba->Port);
6108         }
6109
6110 out_free_mboxq:
6111         if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6112                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6113         else
6114                 mempool_free(mboxq, phba->mbox_mem_pool);
6115         return rc;
6116 }
6117
6118 /**
6119  * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
6120  * @phba: pointer to lpfc hba data structure.
6121  *
6122  * This routine is called to explicitly arm the SLI4 device's completion and
6123  * event queues
6124  **/
6125 static void
6126 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
6127 {
6128         int qidx;
6129         struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
6130         struct lpfc_sli4_hdw_queue *qp;
6131         struct lpfc_queue *eq;
6132
6133         sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
6134         sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
6135         if (sli4_hba->nvmels_cq)
6136                 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
6137                                            LPFC_QUEUE_REARM);
6138
6139         if (sli4_hba->hdwq) {
6140                 /* Loop thru all Hardware Queues */
6141                 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
6142                         qp = &sli4_hba->hdwq[qidx];
6143                         /* ARM the corresponding CQ */
6144                         sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
6145                                                 LPFC_QUEUE_REARM);
6146                 }
6147
6148                 /* Loop thru all IRQ vectors */
6149                 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
6150                         eq = sli4_hba->hba_eq_hdl[qidx].eq;
6151                         /* ARM the corresponding EQ */
6152                         sli4_hba->sli4_write_eq_db(phba, eq,
6153                                                    0, LPFC_QUEUE_REARM);
6154                 }
6155         }
6156
6157         if (phba->nvmet_support) {
6158                 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
6159                         sli4_hba->sli4_write_cq_db(phba,
6160                                 sli4_hba->nvmet_cqset[qidx], 0,
6161                                 LPFC_QUEUE_REARM);
6162                 }
6163         }
6164 }
6165
6166 /**
6167  * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
6168  * @phba: Pointer to HBA context object.
6169  * @type: The resource extent type.
6170  * @extnt_count: buffer to hold port available extent count.
6171  * @extnt_size: buffer to hold element count per extent.
6172  *
6173  * This function calls the port and retrievs the number of available
6174  * extents and their size for a particular extent type.
6175  *
6176  * Returns: 0 if successful.  Nonzero otherwise.
6177  **/
6178 int
6179 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
6180                                uint16_t *extnt_count, uint16_t *extnt_size)
6181 {
6182         int rc = 0;
6183         uint32_t length;
6184         uint32_t mbox_tmo;
6185         struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
6186         LPFC_MBOXQ_t *mbox;
6187
6188         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6189         if (!mbox)
6190                 return -ENOMEM;
6191
6192         /* Find out how many extents are available for this resource type */
6193         length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
6194                   sizeof(struct lpfc_sli4_cfg_mhdr));
6195         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6196                          LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
6197                          length, LPFC_SLI4_MBX_EMBED);
6198
6199         /* Send an extents count of 0 - the GET doesn't use it. */
6200         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6201                                         LPFC_SLI4_MBX_EMBED);
6202         if (unlikely(rc)) {
6203                 rc = -EIO;
6204                 goto err_exit;
6205         }
6206
6207         if (!phba->sli4_hba.intr_enable)
6208                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6209         else {
6210                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6211                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6212         }
6213         if (unlikely(rc)) {
6214                 rc = -EIO;
6215                 goto err_exit;
6216         }
6217
6218         rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
6219         if (bf_get(lpfc_mbox_hdr_status,
6220                    &rsrc_info->header.cfg_shdr.response)) {
6221                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6222                                 "2930 Failed to get resource extents "
6223                                 "Status 0x%x Add'l Status 0x%x\n",
6224                                 bf_get(lpfc_mbox_hdr_status,
6225                                        &rsrc_info->header.cfg_shdr.response),
6226                                 bf_get(lpfc_mbox_hdr_add_status,
6227                                        &rsrc_info->header.cfg_shdr.response));
6228                 rc = -EIO;
6229                 goto err_exit;
6230         }
6231
6232         *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
6233                               &rsrc_info->u.rsp);
6234         *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
6235                              &rsrc_info->u.rsp);
6236
6237         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6238                         "3162 Retrieved extents type-%d from port: count:%d, "
6239                         "size:%d\n", type, *extnt_count, *extnt_size);
6240
6241 err_exit:
6242         mempool_free(mbox, phba->mbox_mem_pool);
6243         return rc;
6244 }
6245
6246 /**
6247  * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
6248  * @phba: Pointer to HBA context object.
6249  * @type: The extent type to check.
6250  *
6251  * This function reads the current available extents from the port and checks
6252  * if the extent count or extent size has changed since the last access.
6253  * Callers use this routine post port reset to understand if there is a
6254  * extent reprovisioning requirement.
6255  *
6256  * Returns:
6257  *   -Error: error indicates problem.
6258  *   1: Extent count or size has changed.
6259  *   0: No changes.
6260  **/
6261 static int
6262 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
6263 {
6264         uint16_t curr_ext_cnt, rsrc_ext_cnt;
6265         uint16_t size_diff, rsrc_ext_size;
6266         int rc = 0;
6267         struct lpfc_rsrc_blks *rsrc_entry;
6268         struct list_head *rsrc_blk_list = NULL;
6269
6270         size_diff = 0;
6271         curr_ext_cnt = 0;
6272         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6273                                             &rsrc_ext_cnt,
6274                                             &rsrc_ext_size);
6275         if (unlikely(rc))
6276                 return -EIO;
6277
6278         switch (type) {
6279         case LPFC_RSC_TYPE_FCOE_RPI:
6280                 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6281                 break;
6282         case LPFC_RSC_TYPE_FCOE_VPI:
6283                 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
6284                 break;
6285         case LPFC_RSC_TYPE_FCOE_XRI:
6286                 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6287                 break;
6288         case LPFC_RSC_TYPE_FCOE_VFI:
6289                 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6290                 break;
6291         default:
6292                 break;
6293         }
6294
6295         list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
6296                 curr_ext_cnt++;
6297                 if (rsrc_entry->rsrc_size != rsrc_ext_size)
6298                         size_diff++;
6299         }
6300
6301         if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
6302                 rc = 1;
6303
6304         return rc;
6305 }
6306
6307 /**
6308  * lpfc_sli4_cfg_post_extnts -
6309  * @phba: Pointer to HBA context object.
6310  * @extnt_cnt: number of available extents.
6311  * @type: the extent type (rpi, xri, vfi, vpi).
6312  * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
6313  * @mbox: pointer to the caller's allocated mailbox structure.
6314  *
6315  * This function executes the extents allocation request.  It also
6316  * takes care of the amount of memory needed to allocate or get the
6317  * allocated extents. It is the caller's responsibility to evaluate
6318  * the response.
6319  *
6320  * Returns:
6321  *   -Error:  Error value describes the condition found.
6322  *   0: if successful
6323  **/
6324 static int
6325 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6326                           uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6327 {
6328         int rc = 0;
6329         uint32_t req_len;
6330         uint32_t emb_len;
6331         uint32_t alloc_len, mbox_tmo;
6332
6333         /* Calculate the total requested length of the dma memory */
6334         req_len = extnt_cnt * sizeof(uint16_t);
6335
6336         /*
6337          * Calculate the size of an embedded mailbox.  The uint32_t
6338          * accounts for extents-specific word.
6339          */
6340         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6341                 sizeof(uint32_t);
6342
6343         /*
6344          * Presume the allocation and response will fit into an embedded
6345          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
6346          */
6347         *emb = LPFC_SLI4_MBX_EMBED;
6348         if (req_len > emb_len) {
6349                 req_len = extnt_cnt * sizeof(uint16_t) +
6350                         sizeof(union lpfc_sli4_cfg_shdr) +
6351                         sizeof(uint32_t);
6352                 *emb = LPFC_SLI4_MBX_NEMBED;
6353         }
6354
6355         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6356                                      LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6357                                      req_len, *emb);
6358         if (alloc_len < req_len) {
6359                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6360                         "2982 Allocated DMA memory size (x%x) is "
6361                         "less than the requested DMA memory "
6362                         "size (x%x)\n", alloc_len, req_len);
6363                 return -ENOMEM;
6364         }
6365         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6366         if (unlikely(rc))
6367                 return -EIO;
6368
6369         if (!phba->sli4_hba.intr_enable)
6370                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6371         else {
6372                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6373                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6374         }
6375
6376         if (unlikely(rc))
6377                 rc = -EIO;
6378         return rc;
6379 }
6380
6381 /**
6382  * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
6383  * @phba: Pointer to HBA context object.
6384  * @type:  The resource extent type to allocate.
6385  *
6386  * This function allocates the number of elements for the specified
6387  * resource type.
6388  **/
6389 static int
6390 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6391 {
6392         bool emb = false;
6393         uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6394         uint16_t rsrc_id, rsrc_start, j, k;
6395         uint16_t *ids;
6396         int i, rc;
6397         unsigned long longs;
6398         unsigned long *bmask;
6399         struct lpfc_rsrc_blks *rsrc_blks;
6400         LPFC_MBOXQ_t *mbox;
6401         uint32_t length;
6402         struct lpfc_id_range *id_array = NULL;
6403         void *virtaddr = NULL;
6404         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6405         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6406         struct list_head *ext_blk_list;
6407
6408         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6409                                             &rsrc_cnt,
6410                                             &rsrc_size);
6411         if (unlikely(rc))
6412                 return -EIO;
6413
6414         if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
6415                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6416                         "3009 No available Resource Extents "
6417                         "for resource type 0x%x: Count: 0x%x, "
6418                         "Size 0x%x\n", type, rsrc_cnt,
6419                         rsrc_size);
6420                 return -ENOMEM;
6421         }
6422
6423         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6424                         "2903 Post resource extents type-0x%x: "
6425                         "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6426
6427         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6428         if (!mbox)
6429                 return -ENOMEM;
6430
6431         rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6432         if (unlikely(rc)) {
6433                 rc = -EIO;
6434                 goto err_exit;
6435         }
6436
6437         /*
6438          * Figure out where the response is located.  Then get local pointers
6439          * to the response data.  The port does not guarantee to respond to
6440          * all extents counts request so update the local variable with the
6441          * allocated count from the port.
6442          */
6443         if (emb == LPFC_SLI4_MBX_EMBED) {
6444                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6445                 id_array = &rsrc_ext->u.rsp.id[0];
6446                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6447         } else {
6448                 virtaddr = mbox->sge_array->addr[0];
6449                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6450                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6451                 id_array = &n_rsrc->id;
6452         }
6453
6454         longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6455         rsrc_id_cnt = rsrc_cnt * rsrc_size;
6456
6457         /*
6458          * Based on the resource size and count, correct the base and max
6459          * resource values.
6460          */
6461         length = sizeof(struct lpfc_rsrc_blks);
6462         switch (type) {
6463         case LPFC_RSC_TYPE_FCOE_RPI:
6464                 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6465                                                    sizeof(unsigned long),
6466                                                    GFP_KERNEL);
6467                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6468                         rc = -ENOMEM;
6469                         goto err_exit;
6470                 }
6471                 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6472                                                  sizeof(uint16_t),
6473                                                  GFP_KERNEL);
6474                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6475                         kfree(phba->sli4_hba.rpi_bmask);
6476                         rc = -ENOMEM;
6477                         goto err_exit;
6478                 }
6479
6480                 /*
6481                  * The next_rpi was initialized with the maximum available
6482                  * count but the port may allocate a smaller number.  Catch
6483                  * that case and update the next_rpi.
6484                  */
6485                 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6486
6487                 /* Initialize local ptrs for common extent processing later. */
6488                 bmask = phba->sli4_hba.rpi_bmask;
6489                 ids = phba->sli4_hba.rpi_ids;
6490                 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6491                 break;
6492         case LPFC_RSC_TYPE_FCOE_VPI:
6493                 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6494                                           GFP_KERNEL);
6495                 if (unlikely(!phba->vpi_bmask)) {
6496                         rc = -ENOMEM;
6497                         goto err_exit;
6498                 }
6499                 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6500                                          GFP_KERNEL);
6501                 if (unlikely(!phba->vpi_ids)) {
6502                         kfree(phba->vpi_bmask);
6503                         rc = -ENOMEM;
6504                         goto err_exit;
6505                 }
6506
6507                 /* Initialize local ptrs for common extent processing later. */
6508                 bmask = phba->vpi_bmask;
6509                 ids = phba->vpi_ids;
6510                 ext_blk_list = &phba->lpfc_vpi_blk_list;
6511                 break;
6512         case LPFC_RSC_TYPE_FCOE_XRI:
6513                 phba->sli4_hba.xri_bmask = kcalloc(longs,
6514                                                    sizeof(unsigned long),
6515                                                    GFP_KERNEL);
6516                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6517                         rc = -ENOMEM;
6518                         goto err_exit;
6519                 }
6520                 phba->sli4_hba.max_cfg_param.xri_used = 0;
6521                 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6522                                                  sizeof(uint16_t),
6523                                                  GFP_KERNEL);
6524                 if (unlikely(!phba->sli4_hba.xri_ids)) {
6525                         kfree(phba->sli4_hba.xri_bmask);
6526                         rc = -ENOMEM;
6527                         goto err_exit;
6528                 }
6529
6530                 /* Initialize local ptrs for common extent processing later. */
6531                 bmask = phba->sli4_hba.xri_bmask;
6532                 ids = phba->sli4_hba.xri_ids;
6533                 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6534                 break;
6535         case LPFC_RSC_TYPE_FCOE_VFI:
6536                 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6537                                                    sizeof(unsigned long),
6538                                                    GFP_KERNEL);
6539                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6540                         rc = -ENOMEM;
6541                         goto err_exit;
6542                 }
6543                 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6544                                                  sizeof(uint16_t),
6545                                                  GFP_KERNEL);
6546                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6547                         kfree(phba->sli4_hba.vfi_bmask);
6548                         rc = -ENOMEM;
6549                         goto err_exit;
6550                 }
6551
6552                 /* Initialize local ptrs for common extent processing later. */
6553                 bmask = phba->sli4_hba.vfi_bmask;
6554                 ids = phba->sli4_hba.vfi_ids;
6555                 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6556                 break;
6557         default:
6558                 /* Unsupported Opcode.  Fail call. */
6559                 id_array = NULL;
6560                 bmask = NULL;
6561                 ids = NULL;
6562                 ext_blk_list = NULL;
6563                 goto err_exit;
6564         }
6565
6566         /*
6567          * Complete initializing the extent configuration with the
6568          * allocated ids assigned to this function.  The bitmask serves
6569          * as an index into the array and manages the available ids.  The
6570          * array just stores the ids communicated to the port via the wqes.
6571          */
6572         for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6573                 if ((i % 2) == 0)
6574                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6575                                          &id_array[k]);
6576                 else
6577                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6578                                          &id_array[k]);
6579
6580                 rsrc_blks = kzalloc(length, GFP_KERNEL);
6581                 if (unlikely(!rsrc_blks)) {
6582                         rc = -ENOMEM;
6583                         kfree(bmask);
6584                         kfree(ids);
6585                         goto err_exit;
6586                 }
6587                 rsrc_blks->rsrc_start = rsrc_id;
6588                 rsrc_blks->rsrc_size = rsrc_size;
6589                 list_add_tail(&rsrc_blks->list, ext_blk_list);
6590                 rsrc_start = rsrc_id;
6591                 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6592                         phba->sli4_hba.io_xri_start = rsrc_start +
6593                                 lpfc_sli4_get_iocb_cnt(phba);
6594                 }
6595
6596                 while (rsrc_id < (rsrc_start + rsrc_size)) {
6597                         ids[j] = rsrc_id;
6598                         rsrc_id++;
6599                         j++;
6600                 }
6601                 /* Entire word processed.  Get next word.*/
6602                 if ((i % 2) == 1)
6603                         k++;
6604         }
6605  err_exit:
6606         lpfc_sli4_mbox_cmd_free(phba, mbox);
6607         return rc;
6608 }
6609
6610
6611
6612 /**
6613  * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6614  * @phba: Pointer to HBA context object.
6615  * @type: the extent's type.
6616  *
6617  * This function deallocates all extents of a particular resource type.
6618  * SLI4 does not allow for deallocating a particular extent range.  It
6619  * is the caller's responsibility to release all kernel memory resources.
6620  **/
6621 static int
6622 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6623 {
6624         int rc;
6625         uint32_t length, mbox_tmo = 0;
6626         LPFC_MBOXQ_t *mbox;
6627         struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6628         struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6629
6630         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6631         if (!mbox)
6632                 return -ENOMEM;
6633
6634         /*
6635          * This function sends an embedded mailbox because it only sends the
6636          * the resource type.  All extents of this type are released by the
6637          * port.
6638          */
6639         length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6640                   sizeof(struct lpfc_sli4_cfg_mhdr));
6641         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6642                          LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6643                          length, LPFC_SLI4_MBX_EMBED);
6644
6645         /* Send an extents count of 0 - the dealloc doesn't use it. */
6646         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6647                                         LPFC_SLI4_MBX_EMBED);
6648         if (unlikely(rc)) {
6649                 rc = -EIO;
6650                 goto out_free_mbox;
6651         }
6652         if (!phba->sli4_hba.intr_enable)
6653                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6654         else {
6655                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6656                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6657         }
6658         if (unlikely(rc)) {
6659                 rc = -EIO;
6660                 goto out_free_mbox;
6661         }
6662
6663         dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6664         if (bf_get(lpfc_mbox_hdr_status,
6665                    &dealloc_rsrc->header.cfg_shdr.response)) {
6666                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6667                                 "2919 Failed to release resource extents "
6668                                 "for type %d - Status 0x%x Add'l Status 0x%x. "
6669                                 "Resource memory not released.\n",
6670                                 type,
6671                                 bf_get(lpfc_mbox_hdr_status,
6672                                     &dealloc_rsrc->header.cfg_shdr.response),
6673                                 bf_get(lpfc_mbox_hdr_add_status,
6674                                     &dealloc_rsrc->header.cfg_shdr.response));
6675                 rc = -EIO;
6676                 goto out_free_mbox;
6677         }
6678
6679         /* Release kernel memory resources for the specific type. */
6680         switch (type) {
6681         case LPFC_RSC_TYPE_FCOE_VPI:
6682                 kfree(phba->vpi_bmask);
6683                 kfree(phba->vpi_ids);
6684                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6685                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6686                                     &phba->lpfc_vpi_blk_list, list) {
6687                         list_del_init(&rsrc_blk->list);
6688                         kfree(rsrc_blk);
6689                 }
6690                 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6691                 break;
6692         case LPFC_RSC_TYPE_FCOE_XRI:
6693                 kfree(phba->sli4_hba.xri_bmask);
6694                 kfree(phba->sli4_hba.xri_ids);
6695                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6696                                     &phba->sli4_hba.lpfc_xri_blk_list, list) {
6697                         list_del_init(&rsrc_blk->list);
6698                         kfree(rsrc_blk);
6699                 }
6700                 break;
6701         case LPFC_RSC_TYPE_FCOE_VFI:
6702                 kfree(phba->sli4_hba.vfi_bmask);
6703                 kfree(phba->sli4_hba.vfi_ids);
6704                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6705                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6706                                     &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6707                         list_del_init(&rsrc_blk->list);
6708                         kfree(rsrc_blk);
6709                 }
6710                 break;
6711         case LPFC_RSC_TYPE_FCOE_RPI:
6712                 /* RPI bitmask and physical id array are cleaned up earlier. */
6713                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6714                                     &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6715                         list_del_init(&rsrc_blk->list);
6716                         kfree(rsrc_blk);
6717                 }
6718                 break;
6719         default:
6720                 break;
6721         }
6722
6723         bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6724
6725  out_free_mbox:
6726         mempool_free(mbox, phba->mbox_mem_pool);
6727         return rc;
6728 }
6729
6730 static void
6731 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6732                   uint32_t feature)
6733 {
6734         uint32_t len;
6735         u32 sig_freq = 0;
6736
6737         len = sizeof(struct lpfc_mbx_set_feature) -
6738                 sizeof(struct lpfc_sli4_cfg_mhdr);
6739         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6740                          LPFC_MBOX_OPCODE_SET_FEATURES, len,
6741                          LPFC_SLI4_MBX_EMBED);
6742
6743         switch (feature) {
6744         case LPFC_SET_UE_RECOVERY:
6745                 bf_set(lpfc_mbx_set_feature_UER,
6746                        &mbox->u.mqe.un.set_feature, 1);
6747                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6748                 mbox->u.mqe.un.set_feature.param_len = 8;
6749                 break;
6750         case LPFC_SET_MDS_DIAGS:
6751                 bf_set(lpfc_mbx_set_feature_mds,
6752                        &mbox->u.mqe.un.set_feature, 1);
6753                 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6754                        &mbox->u.mqe.un.set_feature, 1);
6755                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6756                 mbox->u.mqe.un.set_feature.param_len = 8;
6757                 break;
6758         case LPFC_SET_CGN_SIGNAL:
6759                 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6760                         sig_freq = 0;
6761                 else
6762                         sig_freq = phba->cgn_sig_freq;
6763
6764                 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6765                         bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
6766                                &mbox->u.mqe.un.set_feature, sig_freq);
6767                         bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6768                                &mbox->u.mqe.un.set_feature, sig_freq);
6769                 }
6770
6771                 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
6772                         bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6773                                &mbox->u.mqe.un.set_feature, sig_freq);
6774
6775                 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6776                     phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
6777                         sig_freq = 0;
6778                 else
6779                         sig_freq = lpfc_acqe_cgn_frequency;
6780
6781                 bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
6782                        &mbox->u.mqe.un.set_feature, sig_freq);
6783
6784                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
6785                 mbox->u.mqe.un.set_feature.param_len = 12;
6786                 break;
6787         case LPFC_SET_DUAL_DUMP:
6788                 bf_set(lpfc_mbx_set_feature_dd,
6789                        &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6790                 bf_set(lpfc_mbx_set_feature_ddquery,
6791                        &mbox->u.mqe.un.set_feature, 0);
6792                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6793                 mbox->u.mqe.un.set_feature.param_len = 4;
6794                 break;
6795         case LPFC_SET_ENABLE_MI:
6796                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
6797                 mbox->u.mqe.un.set_feature.param_len = 4;
6798                 bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
6799                        phba->pport->cfg_lun_queue_depth);
6800                 bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
6801                        phba->sli4_hba.pc_sli4_params.mi_ver);
6802                 break;
6803         case LPFC_SET_ENABLE_CMF:
6804                 bf_set(lpfc_mbx_set_feature_dd, &mbox->u.mqe.un.set_feature, 1);
6805                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
6806                 mbox->u.mqe.un.set_feature.param_len = 4;
6807                 bf_set(lpfc_mbx_set_feature_cmf,
6808                        &mbox->u.mqe.un.set_feature, 1);
6809                 break;
6810         }
6811         return;
6812 }
6813
6814 /**
6815  * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6816  * @phba: Pointer to HBA context object.
6817  *
6818  * Disable FW logging into host memory on the adapter. To
6819  * be done before reading logs from the host memory.
6820  **/
6821 void
6822 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6823 {
6824         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6825
6826         spin_lock_irq(&phba->hbalock);
6827         ras_fwlog->state = INACTIVE;
6828         spin_unlock_irq(&phba->hbalock);
6829
6830         /* Disable FW logging to host memory */
6831         writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6832                phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6833
6834         /* Wait 10ms for firmware to stop using DMA buffer */
6835         usleep_range(10 * 1000, 20 * 1000);
6836 }
6837
6838 /**
6839  * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6840  * @phba: Pointer to HBA context object.
6841  *
6842  * This function is called to free memory allocated for RAS FW logging
6843  * support in the driver.
6844  **/
6845 void
6846 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6847 {
6848         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6849         struct lpfc_dmabuf *dmabuf, *next;
6850
6851         if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6852                 list_for_each_entry_safe(dmabuf, next,
6853                                     &ras_fwlog->fwlog_buff_list,
6854                                     list) {
6855                         list_del(&dmabuf->list);
6856                         dma_free_coherent(&phba->pcidev->dev,
6857                                           LPFC_RAS_MAX_ENTRY_SIZE,
6858                                           dmabuf->virt, dmabuf->phys);
6859                         kfree(dmabuf);
6860                 }
6861         }
6862
6863         if (ras_fwlog->lwpd.virt) {
6864                 dma_free_coherent(&phba->pcidev->dev,
6865                                   sizeof(uint32_t) * 2,
6866                                   ras_fwlog->lwpd.virt,
6867                                   ras_fwlog->lwpd.phys);
6868                 ras_fwlog->lwpd.virt = NULL;
6869         }
6870
6871         spin_lock_irq(&phba->hbalock);
6872         ras_fwlog->state = INACTIVE;
6873         spin_unlock_irq(&phba->hbalock);
6874 }
6875
6876 /**
6877  * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6878  * @phba: Pointer to HBA context object.
6879  * @fwlog_buff_count: Count of buffers to be created.
6880  *
6881  * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6882  * to update FW log is posted to the adapter.
6883  * Buffer count is calculated based on module param ras_fwlog_buffsize
6884  * Size of each buffer posted to FW is 64K.
6885  **/
6886
6887 static int
6888 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6889                         uint32_t fwlog_buff_count)
6890 {
6891         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6892         struct lpfc_dmabuf *dmabuf;
6893         int rc = 0, i = 0;
6894
6895         /* Initialize List */
6896         INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6897
6898         /* Allocate memory for the LWPD */
6899         ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6900                                             sizeof(uint32_t) * 2,
6901                                             &ras_fwlog->lwpd.phys,
6902                                             GFP_KERNEL);
6903         if (!ras_fwlog->lwpd.virt) {
6904                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6905                                 "6185 LWPD Memory Alloc Failed\n");
6906
6907                 return -ENOMEM;
6908         }
6909
6910         ras_fwlog->fw_buffcount = fwlog_buff_count;
6911         for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6912                 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6913                                  GFP_KERNEL);
6914                 if (!dmabuf) {
6915                         rc = -ENOMEM;
6916                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6917                                         "6186 Memory Alloc failed FW logging");
6918                         goto free_mem;
6919                 }
6920
6921                 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6922                                                   LPFC_RAS_MAX_ENTRY_SIZE,
6923                                                   &dmabuf->phys, GFP_KERNEL);
6924                 if (!dmabuf->virt) {
6925                         kfree(dmabuf);
6926                         rc = -ENOMEM;
6927                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6928                                         "6187 DMA Alloc Failed FW logging");
6929                         goto free_mem;
6930                 }
6931                 dmabuf->buffer_tag = i;
6932                 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6933         }
6934
6935 free_mem:
6936         if (rc)
6937                 lpfc_sli4_ras_dma_free(phba);
6938
6939         return rc;
6940 }
6941
6942 /**
6943  * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6944  * @phba: pointer to lpfc hba data structure.
6945  * @pmb: pointer to the driver internal queue element for mailbox command.
6946  *
6947  * Completion handler for driver's RAS MBX command to the device.
6948  **/
6949 static void
6950 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6951 {
6952         MAILBOX_t *mb;
6953         union lpfc_sli4_cfg_shdr *shdr;
6954         uint32_t shdr_status, shdr_add_status;
6955         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6956
6957         mb = &pmb->u.mb;
6958
6959         shdr = (union lpfc_sli4_cfg_shdr *)
6960                 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6961         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6962         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6963
6964         if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6965                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6966                                 "6188 FW LOG mailbox "
6967                                 "completed with status x%x add_status x%x,"
6968                                 " mbx status x%x\n",
6969                                 shdr_status, shdr_add_status, mb->mbxStatus);
6970
6971                 ras_fwlog->ras_hwsupport = false;
6972                 goto disable_ras;
6973         }
6974
6975         spin_lock_irq(&phba->hbalock);
6976         ras_fwlog->state = ACTIVE;
6977         spin_unlock_irq(&phba->hbalock);
6978         mempool_free(pmb, phba->mbox_mem_pool);
6979
6980         return;
6981
6982 disable_ras:
6983         /* Free RAS DMA memory */
6984         lpfc_sli4_ras_dma_free(phba);
6985         mempool_free(pmb, phba->mbox_mem_pool);
6986 }
6987
6988 /**
6989  * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6990  * @phba: pointer to lpfc hba data structure.
6991  * @fwlog_level: Logging verbosity level.
6992  * @fwlog_enable: Enable/Disable logging.
6993  *
6994  * Initialize memory and post mailbox command to enable FW logging in host
6995  * memory.
6996  **/
6997 int
6998 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6999                          uint32_t fwlog_level,
7000                          uint32_t fwlog_enable)
7001 {
7002         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
7003         struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
7004         struct lpfc_dmabuf *dmabuf;
7005         LPFC_MBOXQ_t *mbox;
7006         uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
7007         int rc = 0;
7008
7009         spin_lock_irq(&phba->hbalock);
7010         ras_fwlog->state = INACTIVE;
7011         spin_unlock_irq(&phba->hbalock);
7012
7013         fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
7014                           phba->cfg_ras_fwlog_buffsize);
7015         fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
7016
7017         /*
7018          * If re-enabling FW logging support use earlier allocated
7019          * DMA buffers while posting MBX command.
7020          **/
7021         if (!ras_fwlog->lwpd.virt) {
7022                 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
7023                 if (rc) {
7024                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7025                                         "6189 FW Log Memory Allocation Failed");
7026                         return rc;
7027                 }
7028         }
7029
7030         /* Setup Mailbox command */
7031         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7032         if (!mbox) {
7033                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7034                                 "6190 RAS MBX Alloc Failed");
7035                 rc = -ENOMEM;
7036                 goto mem_free;
7037         }
7038
7039         ras_fwlog->fw_loglevel = fwlog_level;
7040         len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
7041                 sizeof(struct lpfc_sli4_cfg_mhdr));
7042
7043         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
7044                          LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
7045                          len, LPFC_SLI4_MBX_EMBED);
7046
7047         mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
7048         bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
7049                fwlog_enable);
7050         bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
7051                ras_fwlog->fw_loglevel);
7052         bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
7053                ras_fwlog->fw_buffcount);
7054         bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
7055                LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
7056
7057         /* Update DMA buffer address */
7058         list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
7059                 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
7060
7061                 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
7062                         putPaddrLow(dmabuf->phys);
7063
7064                 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
7065                         putPaddrHigh(dmabuf->phys);
7066         }
7067
7068         /* Update LPWD address */
7069         mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
7070         mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
7071
7072         spin_lock_irq(&phba->hbalock);
7073         ras_fwlog->state = REG_INPROGRESS;
7074         spin_unlock_irq(&phba->hbalock);
7075         mbox->vport = phba->pport;
7076         mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
7077
7078         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7079
7080         if (rc == MBX_NOT_FINISHED) {
7081                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7082                                 "6191 FW-Log Mailbox failed. "
7083                                 "status %d mbxStatus : x%x", rc,
7084                                 bf_get(lpfc_mqe_status, &mbox->u.mqe));
7085                 mempool_free(mbox, phba->mbox_mem_pool);
7086                 rc = -EIO;
7087                 goto mem_free;
7088         } else
7089                 rc = 0;
7090 mem_free:
7091         if (rc)
7092                 lpfc_sli4_ras_dma_free(phba);
7093
7094         return rc;
7095 }
7096
7097 /**
7098  * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
7099  * @phba: Pointer to HBA context object.
7100  *
7101  * Check if RAS is supported on the adapter and initialize it.
7102  **/
7103 void
7104 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
7105 {
7106         /* Check RAS FW Log needs to be enabled or not */
7107         if (lpfc_check_fwlog_support(phba))
7108                 return;
7109
7110         lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
7111                                  LPFC_RAS_ENABLE_LOGGING);
7112 }
7113
7114 /**
7115  * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
7116  * @phba: Pointer to HBA context object.
7117  *
7118  * This function allocates all SLI4 resource identifiers.
7119  **/
7120 int
7121 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
7122 {
7123         int i, rc, error = 0;
7124         uint16_t count, base;
7125         unsigned long longs;
7126
7127         if (!phba->sli4_hba.rpi_hdrs_in_use)
7128                 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
7129         if (phba->sli4_hba.extents_in_use) {
7130                 /*
7131                  * The port supports resource extents. The XRI, VPI, VFI, RPI
7132                  * resource extent count must be read and allocated before
7133                  * provisioning the resource id arrays.
7134                  */
7135                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7136                     LPFC_IDX_RSRC_RDY) {
7137                         /*
7138                          * Extent-based resources are set - the driver could
7139                          * be in a port reset. Figure out if any corrective
7140                          * actions need to be taken.
7141                          */
7142                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7143                                                  LPFC_RSC_TYPE_FCOE_VFI);
7144                         if (rc != 0)
7145                                 error++;
7146                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7147                                                  LPFC_RSC_TYPE_FCOE_VPI);
7148                         if (rc != 0)
7149                                 error++;
7150                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7151                                                  LPFC_RSC_TYPE_FCOE_XRI);
7152                         if (rc != 0)
7153                                 error++;
7154                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7155                                                  LPFC_RSC_TYPE_FCOE_RPI);
7156                         if (rc != 0)
7157                                 error++;
7158
7159                         /*
7160                          * It's possible that the number of resources
7161                          * provided to this port instance changed between
7162                          * resets.  Detect this condition and reallocate
7163                          * resources.  Otherwise, there is no action.
7164                          */
7165                         if (error) {
7166                                 lpfc_printf_log(phba, KERN_INFO,
7167                                                 LOG_MBOX | LOG_INIT,
7168                                                 "2931 Detected extent resource "
7169                                                 "change.  Reallocating all "
7170                                                 "extents.\n");
7171                                 rc = lpfc_sli4_dealloc_extent(phba,
7172                                                  LPFC_RSC_TYPE_FCOE_VFI);
7173                                 rc = lpfc_sli4_dealloc_extent(phba,
7174                                                  LPFC_RSC_TYPE_FCOE_VPI);
7175                                 rc = lpfc_sli4_dealloc_extent(phba,
7176                                                  LPFC_RSC_TYPE_FCOE_XRI);
7177                                 rc = lpfc_sli4_dealloc_extent(phba,
7178                                                  LPFC_RSC_TYPE_FCOE_RPI);
7179                         } else
7180                                 return 0;
7181                 }
7182
7183                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7184                 if (unlikely(rc))
7185                         goto err_exit;
7186
7187                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7188                 if (unlikely(rc))
7189                         goto err_exit;
7190
7191                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7192                 if (unlikely(rc))
7193                         goto err_exit;
7194
7195                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7196                 if (unlikely(rc))
7197                         goto err_exit;
7198                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7199                        LPFC_IDX_RSRC_RDY);
7200                 return rc;
7201         } else {
7202                 /*
7203                  * The port does not support resource extents.  The XRI, VPI,
7204                  * VFI, RPI resource ids were determined from READ_CONFIG.
7205                  * Just allocate the bitmasks and provision the resource id
7206                  * arrays.  If a port reset is active, the resources don't
7207                  * need any action - just exit.
7208                  */
7209                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7210                     LPFC_IDX_RSRC_RDY) {
7211                         lpfc_sli4_dealloc_resource_identifiers(phba);
7212                         lpfc_sli4_remove_rpis(phba);
7213                 }
7214                 /* RPIs. */
7215                 count = phba->sli4_hba.max_cfg_param.max_rpi;
7216                 if (count <= 0) {
7217                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7218                                         "3279 Invalid provisioning of "
7219                                         "rpi:%d\n", count);
7220                         rc = -EINVAL;
7221                         goto err_exit;
7222                 }
7223                 base = phba->sli4_hba.max_cfg_param.rpi_base;
7224                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7225                 phba->sli4_hba.rpi_bmask = kcalloc(longs,
7226                                                    sizeof(unsigned long),
7227                                                    GFP_KERNEL);
7228                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
7229                         rc = -ENOMEM;
7230                         goto err_exit;
7231                 }
7232                 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
7233                                                  GFP_KERNEL);
7234                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
7235                         rc = -ENOMEM;
7236                         goto free_rpi_bmask;
7237                 }
7238
7239                 for (i = 0; i < count; i++)
7240                         phba->sli4_hba.rpi_ids[i] = base + i;
7241
7242                 /* VPIs. */
7243                 count = phba->sli4_hba.max_cfg_param.max_vpi;
7244                 if (count <= 0) {
7245                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7246                                         "3280 Invalid provisioning of "
7247                                         "vpi:%d\n", count);
7248                         rc = -EINVAL;
7249                         goto free_rpi_ids;
7250                 }
7251                 base = phba->sli4_hba.max_cfg_param.vpi_base;
7252                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7253                 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
7254                                           GFP_KERNEL);
7255                 if (unlikely(!phba->vpi_bmask)) {
7256                         rc = -ENOMEM;
7257                         goto free_rpi_ids;
7258                 }
7259                 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
7260                                         GFP_KERNEL);
7261                 if (unlikely(!phba->vpi_ids)) {
7262                         rc = -ENOMEM;
7263                         goto free_vpi_bmask;
7264                 }
7265
7266                 for (i = 0; i < count; i++)
7267                         phba->vpi_ids[i] = base + i;
7268
7269                 /* XRIs. */
7270                 count = phba->sli4_hba.max_cfg_param.max_xri;
7271                 if (count <= 0) {
7272                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7273                                         "3281 Invalid provisioning of "
7274                                         "xri:%d\n", count);
7275                         rc = -EINVAL;
7276                         goto free_vpi_ids;
7277                 }
7278                 base = phba->sli4_hba.max_cfg_param.xri_base;
7279                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7280                 phba->sli4_hba.xri_bmask = kcalloc(longs,
7281                                                    sizeof(unsigned long),
7282                                                    GFP_KERNEL);
7283                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
7284                         rc = -ENOMEM;
7285                         goto free_vpi_ids;
7286                 }
7287                 phba->sli4_hba.max_cfg_param.xri_used = 0;
7288                 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
7289                                                  GFP_KERNEL);
7290                 if (unlikely(!phba->sli4_hba.xri_ids)) {
7291                         rc = -ENOMEM;
7292                         goto free_xri_bmask;
7293                 }
7294
7295                 for (i = 0; i < count; i++)
7296                         phba->sli4_hba.xri_ids[i] = base + i;
7297
7298                 /* VFIs. */
7299                 count = phba->sli4_hba.max_cfg_param.max_vfi;
7300                 if (count <= 0) {
7301                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7302                                         "3282 Invalid provisioning of "
7303                                         "vfi:%d\n", count);
7304                         rc = -EINVAL;
7305                         goto free_xri_ids;
7306                 }
7307                 base = phba->sli4_hba.max_cfg_param.vfi_base;
7308                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7309                 phba->sli4_hba.vfi_bmask = kcalloc(longs,
7310                                                    sizeof(unsigned long),
7311                                                    GFP_KERNEL);
7312                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
7313                         rc = -ENOMEM;
7314                         goto free_xri_ids;
7315                 }
7316                 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
7317                                                  GFP_KERNEL);
7318                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
7319                         rc = -ENOMEM;
7320                         goto free_vfi_bmask;
7321                 }
7322
7323                 for (i = 0; i < count; i++)
7324                         phba->sli4_hba.vfi_ids[i] = base + i;
7325
7326                 /*
7327                  * Mark all resources ready.  An HBA reset doesn't need
7328                  * to reset the initialization.
7329                  */
7330                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7331                        LPFC_IDX_RSRC_RDY);
7332                 return 0;
7333         }
7334
7335  free_vfi_bmask:
7336         kfree(phba->sli4_hba.vfi_bmask);
7337         phba->sli4_hba.vfi_bmask = NULL;
7338  free_xri_ids:
7339         kfree(phba->sli4_hba.xri_ids);
7340         phba->sli4_hba.xri_ids = NULL;
7341  free_xri_bmask:
7342         kfree(phba->sli4_hba.xri_bmask);
7343         phba->sli4_hba.xri_bmask = NULL;
7344  free_vpi_ids:
7345         kfree(phba->vpi_ids);
7346         phba->vpi_ids = NULL;
7347  free_vpi_bmask:
7348         kfree(phba->vpi_bmask);
7349         phba->vpi_bmask = NULL;
7350  free_rpi_ids:
7351         kfree(phba->sli4_hba.rpi_ids);
7352         phba->sli4_hba.rpi_ids = NULL;
7353  free_rpi_bmask:
7354         kfree(phba->sli4_hba.rpi_bmask);
7355         phba->sli4_hba.rpi_bmask = NULL;
7356  err_exit:
7357         return rc;
7358 }
7359
7360 /**
7361  * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
7362  * @phba: Pointer to HBA context object.
7363  *
7364  * This function allocates the number of elements for the specified
7365  * resource type.
7366  **/
7367 int
7368 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7369 {
7370         if (phba->sli4_hba.extents_in_use) {
7371                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7372                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7373                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7374                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7375         } else {
7376                 kfree(phba->vpi_bmask);
7377                 phba->sli4_hba.max_cfg_param.vpi_used = 0;
7378                 kfree(phba->vpi_ids);
7379                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7380                 kfree(phba->sli4_hba.xri_bmask);
7381                 kfree(phba->sli4_hba.xri_ids);
7382                 kfree(phba->sli4_hba.vfi_bmask);
7383                 kfree(phba->sli4_hba.vfi_ids);
7384                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7385                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7386         }
7387
7388         return 0;
7389 }
7390
7391 /**
7392  * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
7393  * @phba: Pointer to HBA context object.
7394  * @type: The resource extent type.
7395  * @extnt_cnt: buffer to hold port extent count response
7396  * @extnt_size: buffer to hold port extent size response.
7397  *
7398  * This function calls the port to read the host allocated extents
7399  * for a particular type.
7400  **/
7401 int
7402 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7403                                uint16_t *extnt_cnt, uint16_t *extnt_size)
7404 {
7405         bool emb;
7406         int rc = 0;
7407         uint16_t curr_blks = 0;
7408         uint32_t req_len, emb_len;
7409         uint32_t alloc_len, mbox_tmo;
7410         struct list_head *blk_list_head;
7411         struct lpfc_rsrc_blks *rsrc_blk;
7412         LPFC_MBOXQ_t *mbox;
7413         void *virtaddr = NULL;
7414         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7415         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7416         union  lpfc_sli4_cfg_shdr *shdr;
7417
7418         switch (type) {
7419         case LPFC_RSC_TYPE_FCOE_VPI:
7420                 blk_list_head = &phba->lpfc_vpi_blk_list;
7421                 break;
7422         case LPFC_RSC_TYPE_FCOE_XRI:
7423                 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7424                 break;
7425         case LPFC_RSC_TYPE_FCOE_VFI:
7426                 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7427                 break;
7428         case LPFC_RSC_TYPE_FCOE_RPI:
7429                 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7430                 break;
7431         default:
7432                 return -EIO;
7433         }
7434
7435         /* Count the number of extents currently allocatd for this type. */
7436         list_for_each_entry(rsrc_blk, blk_list_head, list) {
7437                 if (curr_blks == 0) {
7438                         /*
7439                          * The GET_ALLOCATED mailbox does not return the size,
7440                          * just the count.  The size should be just the size
7441                          * stored in the current allocated block and all sizes
7442                          * for an extent type are the same so set the return
7443                          * value now.
7444                          */
7445                         *extnt_size = rsrc_blk->rsrc_size;
7446                 }
7447                 curr_blks++;
7448         }
7449
7450         /*
7451          * Calculate the size of an embedded mailbox.  The uint32_t
7452          * accounts for extents-specific word.
7453          */
7454         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7455                 sizeof(uint32_t);
7456
7457         /*
7458          * Presume the allocation and response will fit into an embedded
7459          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
7460          */
7461         emb = LPFC_SLI4_MBX_EMBED;
7462         req_len = emb_len;
7463         if (req_len > emb_len) {
7464                 req_len = curr_blks * sizeof(uint16_t) +
7465                         sizeof(union lpfc_sli4_cfg_shdr) +
7466                         sizeof(uint32_t);
7467                 emb = LPFC_SLI4_MBX_NEMBED;
7468         }
7469
7470         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7471         if (!mbox)
7472                 return -ENOMEM;
7473         memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7474
7475         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7476                                      LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7477                                      req_len, emb);
7478         if (alloc_len < req_len) {
7479                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7480                         "2983 Allocated DMA memory size (x%x) is "
7481                         "less than the requested DMA memory "
7482                         "size (x%x)\n", alloc_len, req_len);
7483                 rc = -ENOMEM;
7484                 goto err_exit;
7485         }
7486         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7487         if (unlikely(rc)) {
7488                 rc = -EIO;
7489                 goto err_exit;
7490         }
7491
7492         if (!phba->sli4_hba.intr_enable)
7493                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7494         else {
7495                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7496                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7497         }
7498
7499         if (unlikely(rc)) {
7500                 rc = -EIO;
7501                 goto err_exit;
7502         }
7503
7504         /*
7505          * Figure out where the response is located.  Then get local pointers
7506          * to the response data.  The port does not guarantee to respond to
7507          * all extents counts request so update the local variable with the
7508          * allocated count from the port.
7509          */
7510         if (emb == LPFC_SLI4_MBX_EMBED) {
7511                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7512                 shdr = &rsrc_ext->header.cfg_shdr;
7513                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7514         } else {
7515                 virtaddr = mbox->sge_array->addr[0];
7516                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7517                 shdr = &n_rsrc->cfg_shdr;
7518                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7519         }
7520
7521         if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7522                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7523                         "2984 Failed to read allocated resources "
7524                         "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7525                         type,
7526                         bf_get(lpfc_mbox_hdr_status, &shdr->response),
7527                         bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7528                 rc = -EIO;
7529                 goto err_exit;
7530         }
7531  err_exit:
7532         lpfc_sli4_mbox_cmd_free(phba, mbox);
7533         return rc;
7534 }
7535
7536 /**
7537  * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7538  * @phba: pointer to lpfc hba data structure.
7539  * @sgl_list: linked link of sgl buffers to post
7540  * @cnt: number of linked list buffers
7541  *
7542  * This routine walks the list of buffers that have been allocated and
7543  * repost them to the port by using SGL block post. This is needed after a
7544  * pci_function_reset/warm_start or start. It attempts to construct blocks
7545  * of buffer sgls which contains contiguous xris and uses the non-embedded
7546  * SGL block post mailbox commands to post them to the port. For single
7547  * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7548  * mailbox command for posting.
7549  *
7550  * Returns: 0 = success, non-zero failure.
7551  **/
7552 static int
7553 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7554                           struct list_head *sgl_list, int cnt)
7555 {
7556         struct lpfc_sglq *sglq_entry = NULL;
7557         struct lpfc_sglq *sglq_entry_next = NULL;
7558         struct lpfc_sglq *sglq_entry_first = NULL;
7559         int status, total_cnt;
7560         int post_cnt = 0, num_posted = 0, block_cnt = 0;
7561         int last_xritag = NO_XRI;
7562         LIST_HEAD(prep_sgl_list);
7563         LIST_HEAD(blck_sgl_list);
7564         LIST_HEAD(allc_sgl_list);
7565         LIST_HEAD(post_sgl_list);
7566         LIST_HEAD(free_sgl_list);
7567
7568         spin_lock_irq(&phba->hbalock);
7569         spin_lock(&phba->sli4_hba.sgl_list_lock);
7570         list_splice_init(sgl_list, &allc_sgl_list);
7571         spin_unlock(&phba->sli4_hba.sgl_list_lock);
7572         spin_unlock_irq(&phba->hbalock);
7573
7574         total_cnt = cnt;
7575         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7576                                  &allc_sgl_list, list) {
7577                 list_del_init(&sglq_entry->list);
7578                 block_cnt++;
7579                 if ((last_xritag != NO_XRI) &&
7580                     (sglq_entry->sli4_xritag != last_xritag + 1)) {
7581                         /* a hole in xri block, form a sgl posting block */
7582                         list_splice_init(&prep_sgl_list, &blck_sgl_list);
7583                         post_cnt = block_cnt - 1;
7584                         /* prepare list for next posting block */
7585                         list_add_tail(&sglq_entry->list, &prep_sgl_list);
7586                         block_cnt = 1;
7587                 } else {
7588                         /* prepare list for next posting block */
7589                         list_add_tail(&sglq_entry->list, &prep_sgl_list);
7590                         /* enough sgls for non-embed sgl mbox command */
7591                         if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7592                                 list_splice_init(&prep_sgl_list,
7593                                                  &blck_sgl_list);
7594                                 post_cnt = block_cnt;
7595                                 block_cnt = 0;
7596                         }
7597                 }
7598                 num_posted++;
7599
7600                 /* keep track of last sgl's xritag */
7601                 last_xritag = sglq_entry->sli4_xritag;
7602
7603                 /* end of repost sgl list condition for buffers */
7604                 if (num_posted == total_cnt) {
7605                         if (post_cnt == 0) {
7606                                 list_splice_init(&prep_sgl_list,
7607                                                  &blck_sgl_list);
7608                                 post_cnt = block_cnt;
7609                         } else if (block_cnt == 1) {
7610                                 status = lpfc_sli4_post_sgl(phba,
7611                                                 sglq_entry->phys, 0,
7612                                                 sglq_entry->sli4_xritag);
7613                                 if (!status) {
7614                                         /* successful, put sgl to posted list */
7615                                         list_add_tail(&sglq_entry->list,
7616                                                       &post_sgl_list);
7617                                 } else {
7618                                         /* Failure, put sgl to free list */
7619                                         lpfc_printf_log(phba, KERN_WARNING,
7620                                                 LOG_SLI,
7621                                                 "3159 Failed to post "
7622                                                 "sgl, xritag:x%x\n",
7623                                                 sglq_entry->sli4_xritag);
7624                                         list_add_tail(&sglq_entry->list,
7625                                                       &free_sgl_list);
7626                                         total_cnt--;
7627                                 }
7628                         }
7629                 }
7630
7631                 /* continue until a nembed page worth of sgls */
7632                 if (post_cnt == 0)
7633                         continue;
7634
7635                 /* post the buffer list sgls as a block */
7636                 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7637                                                  post_cnt);
7638
7639                 if (!status) {
7640                         /* success, put sgl list to posted sgl list */
7641                         list_splice_init(&blck_sgl_list, &post_sgl_list);
7642                 } else {
7643                         /* Failure, put sgl list to free sgl list */
7644                         sglq_entry_first = list_first_entry(&blck_sgl_list,
7645                                                             struct lpfc_sglq,
7646                                                             list);
7647                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7648                                         "3160 Failed to post sgl-list, "
7649                                         "xritag:x%x-x%x\n",
7650                                         sglq_entry_first->sli4_xritag,
7651                                         (sglq_entry_first->sli4_xritag +
7652                                          post_cnt - 1));
7653                         list_splice_init(&blck_sgl_list, &free_sgl_list);
7654                         total_cnt -= post_cnt;
7655                 }
7656
7657                 /* don't reset xirtag due to hole in xri block */
7658                 if (block_cnt == 0)
7659                         last_xritag = NO_XRI;
7660
7661                 /* reset sgl post count for next round of posting */
7662                 post_cnt = 0;
7663         }
7664
7665         /* free the sgls failed to post */
7666         lpfc_free_sgl_list(phba, &free_sgl_list);
7667
7668         /* push sgls posted to the available list */
7669         if (!list_empty(&post_sgl_list)) {
7670                 spin_lock_irq(&phba->hbalock);
7671                 spin_lock(&phba->sli4_hba.sgl_list_lock);
7672                 list_splice_init(&post_sgl_list, sgl_list);
7673                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7674                 spin_unlock_irq(&phba->hbalock);
7675         } else {
7676                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7677                                 "3161 Failure to post sgl to port.\n");
7678                 return -EIO;
7679         }
7680
7681         /* return the number of XRIs actually posted */
7682         return total_cnt;
7683 }
7684
7685 /**
7686  * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7687  * @phba: pointer to lpfc hba data structure.
7688  *
7689  * This routine walks the list of nvme buffers that have been allocated and
7690  * repost them to the port by using SGL block post. This is needed after a
7691  * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7692  * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7693  * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7694  *
7695  * Returns: 0 = success, non-zero failure.
7696  **/
7697 static int
7698 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7699 {
7700         LIST_HEAD(post_nblist);
7701         int num_posted, rc = 0;
7702
7703         /* get all NVME buffers need to repost to a local list */
7704         lpfc_io_buf_flush(phba, &post_nblist);
7705
7706         /* post the list of nvme buffer sgls to port if available */
7707         if (!list_empty(&post_nblist)) {
7708                 num_posted = lpfc_sli4_post_io_sgl_list(
7709                         phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7710                 /* failed to post any nvme buffer, return error */
7711                 if (num_posted == 0)
7712                         rc = -EIO;
7713         }
7714         return rc;
7715 }
7716
7717 static void
7718 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7719 {
7720         uint32_t len;
7721
7722         len = sizeof(struct lpfc_mbx_set_host_data) -
7723                 sizeof(struct lpfc_sli4_cfg_mhdr);
7724         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7725                          LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7726                          LPFC_SLI4_MBX_EMBED);
7727
7728         mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7729         mbox->u.mqe.un.set_host_data.param_len =
7730                                         LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7731         snprintf(mbox->u.mqe.un.set_host_data.un.data,
7732                  LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7733                  "Linux %s v"LPFC_DRIVER_VERSION,
7734                  (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7735 }
7736
7737 int
7738 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7739                     struct lpfc_queue *drq, int count, int idx)
7740 {
7741         int rc, i;
7742         struct lpfc_rqe hrqe;
7743         struct lpfc_rqe drqe;
7744         struct lpfc_rqb *rqbp;
7745         unsigned long flags;
7746         struct rqb_dmabuf *rqb_buffer;
7747         LIST_HEAD(rqb_buf_list);
7748
7749         rqbp = hrq->rqbp;
7750         for (i = 0; i < count; i++) {
7751                 spin_lock_irqsave(&phba->hbalock, flags);
7752                 /* IF RQ is already full, don't bother */
7753                 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7754                         spin_unlock_irqrestore(&phba->hbalock, flags);
7755                         break;
7756                 }
7757                 spin_unlock_irqrestore(&phba->hbalock, flags);
7758
7759                 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7760                 if (!rqb_buffer)
7761                         break;
7762                 rqb_buffer->hrq = hrq;
7763                 rqb_buffer->drq = drq;
7764                 rqb_buffer->idx = idx;
7765                 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7766         }
7767
7768         spin_lock_irqsave(&phba->hbalock, flags);
7769         while (!list_empty(&rqb_buf_list)) {
7770                 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7771                                  hbuf.list);
7772
7773                 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7774                 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7775                 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7776                 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7777                 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7778                 if (rc < 0) {
7779                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7780                                         "6421 Cannot post to HRQ %d: %x %x %x "
7781                                         "DRQ %x %x\n",
7782                                         hrq->queue_id,
7783                                         hrq->host_index,
7784                                         hrq->hba_index,
7785                                         hrq->entry_count,
7786                                         drq->host_index,
7787                                         drq->hba_index);
7788                         rqbp->rqb_free_buffer(phba, rqb_buffer);
7789                 } else {
7790                         list_add_tail(&rqb_buffer->hbuf.list,
7791                                       &rqbp->rqb_buffer_list);
7792                         rqbp->buffer_count++;
7793                 }
7794         }
7795         spin_unlock_irqrestore(&phba->hbalock, flags);
7796         return 1;
7797 }
7798
7799 static void
7800 lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7801 {
7802         struct lpfc_vport *vport = pmb->vport;
7803         union lpfc_sli4_cfg_shdr *shdr;
7804         u32 shdr_status, shdr_add_status;
7805         u32 sig, acqe;
7806
7807         /* Two outcomes. (1) Set featurs was successul and EDC negotiation
7808          * is done. (2) Mailbox failed and send FPIN support only.
7809          */
7810         shdr = (union lpfc_sli4_cfg_shdr *)
7811                 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7812         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7813         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7814         if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7815                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
7816                                 "2516 CGN SET_FEATURE mbox failed with "
7817                                 "status x%x add_status x%x, mbx status x%x "
7818                                 "Reset Congestion to FPINs only\n",
7819                                 shdr_status, shdr_add_status,
7820                                 pmb->u.mb.mbxStatus);
7821                 /* If there is a mbox error, move on to RDF */
7822                 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7823                 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7824                 goto out;
7825         }
7826
7827         /* Zero out Congestion Signal ACQE counter */
7828         phba->cgn_acqe_cnt = 0;
7829
7830         acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
7831                       &pmb->u.mqe.un.set_feature);
7832         sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
7833                      &pmb->u.mqe.un.set_feature);
7834         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7835                         "4620 SET_FEATURES Success: Freq: %ds %dms "
7836                         " Reg: x%x x%x\n", acqe, sig,
7837                         phba->cgn_reg_signal, phba->cgn_reg_fpin);
7838 out:
7839         mempool_free(pmb, phba->mbox_mem_pool);
7840
7841         /* Register for FPIN events from the fabric now that the
7842          * EDC common_set_features has completed.
7843          */
7844         lpfc_issue_els_rdf(vport, 0);
7845 }
7846
7847 int
7848 lpfc_config_cgn_signal(struct lpfc_hba *phba)
7849 {
7850         LPFC_MBOXQ_t *mboxq;
7851         u32 rc;
7852
7853         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7854         if (!mboxq)
7855                 goto out_rdf;
7856
7857         lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
7858         mboxq->vport = phba->pport;
7859         mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
7860
7861         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7862                         "4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
7863                         "Reg: x%x x%x\n",
7864                         phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
7865                         phba->cgn_reg_signal, phba->cgn_reg_fpin);
7866
7867         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7868         if (rc == MBX_NOT_FINISHED)
7869                 goto out;
7870         return 0;
7871
7872 out:
7873         mempool_free(mboxq, phba->mbox_mem_pool);
7874 out_rdf:
7875         /* If there is a mbox error, move on to RDF */
7876         phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7877         phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7878         lpfc_issue_els_rdf(phba->pport, 0);
7879         return -EIO;
7880 }
7881
7882 /**
7883  * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7884  * @phba: pointer to lpfc hba data structure.
7885  *
7886  * This routine initializes the per-cq idle_stat to dynamically dictate
7887  * polling decisions.
7888  *
7889  * Return codes:
7890  *   None
7891  **/
7892 static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7893 {
7894         int i;
7895         struct lpfc_sli4_hdw_queue *hdwq;
7896         struct lpfc_queue *cq;
7897         struct lpfc_idle_stat *idle_stat;
7898         u64 wall;
7899
7900         for_each_present_cpu(i) {
7901                 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7902                 cq = hdwq->io_cq;
7903
7904                 /* Skip if we've already handled this cq's primary CPU */
7905                 if (cq->chann != i)
7906                         continue;
7907
7908                 idle_stat = &phba->sli4_hba.idle_stat[i];
7909
7910                 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7911                 idle_stat->prev_wall = wall;
7912
7913                 if (phba->nvmet_support ||
7914                     phba->cmf_active_mode != LPFC_CFG_OFF)
7915                         cq->poll_mode = LPFC_QUEUE_WORK;
7916                 else
7917                         cq->poll_mode = LPFC_IRQ_POLL;
7918         }
7919
7920         if (!phba->nvmet_support)
7921                 schedule_delayed_work(&phba->idle_stat_delay_work,
7922                                       msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7923 }
7924
7925 static void lpfc_sli4_dip(struct lpfc_hba *phba)
7926 {
7927         uint32_t if_type;
7928
7929         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7930         if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
7931             if_type == LPFC_SLI_INTF_IF_TYPE_6) {
7932                 struct lpfc_register reg_data;
7933
7934                 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7935                                &reg_data.word0))
7936                         return;
7937
7938                 if (bf_get(lpfc_sliport_status_dip, &reg_data))
7939                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7940                                         "2904 Firmware Dump Image Present"
7941                                         " on Adapter");
7942         }
7943 }
7944
7945 /**
7946  * lpfc_cmf_setup - Initialize idle_stat tracking
7947  * @phba: Pointer to HBA context object.
7948  *
7949  * This is called from HBA setup during driver load or when the HBA
7950  * comes online. this does all the initialization to support CMF and MI.
7951  **/
7952 static int
7953 lpfc_cmf_setup(struct lpfc_hba *phba)
7954 {
7955         LPFC_MBOXQ_t *mboxq;
7956         struct lpfc_dmabuf *mp;
7957         struct lpfc_pc_sli4_params *sli4_params;
7958         int rc, cmf, mi_ver;
7959
7960         rc = lpfc_sli4_refresh_params(phba);
7961         if (unlikely(rc))
7962                 return rc;
7963
7964         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7965         if (!mboxq)
7966                 return -ENOMEM;
7967
7968         sli4_params = &phba->sli4_hba.pc_sli4_params;
7969
7970         /* Are we forcing MI off via module parameter? */
7971         if (!phba->cfg_enable_mi)
7972                 sli4_params->mi_ver = 0;
7973
7974         /* Always try to enable MI feature if we can */
7975         if (sli4_params->mi_ver) {
7976                 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
7977                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7978                 mi_ver = bf_get(lpfc_mbx_set_feature_mi,
7979                                  &mboxq->u.mqe.un.set_feature);
7980
7981                 if (rc == MBX_SUCCESS) {
7982                         if (mi_ver) {
7983                                 lpfc_printf_log(phba,
7984                                                 KERN_WARNING, LOG_CGN_MGMT,
7985                                                 "6215 MI is enabled\n");
7986                                 sli4_params->mi_ver = mi_ver;
7987                         } else {
7988                                 lpfc_printf_log(phba,
7989                                                 KERN_WARNING, LOG_CGN_MGMT,
7990                                                 "6338 MI is disabled\n");
7991                                 sli4_params->mi_ver = 0;
7992                         }
7993                 } else {
7994                         /* mi_ver is already set from GET_SLI4_PARAMETERS */
7995                         lpfc_printf_log(phba, KERN_INFO,
7996                                         LOG_CGN_MGMT | LOG_INIT,
7997                                         "6245 Enable MI Mailbox x%x (x%x/x%x) "
7998                                         "failed, rc:x%x mi:x%x\n",
7999                                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8000                                         lpfc_sli_config_mbox_subsys_get
8001                                                 (phba, mboxq),
8002                                         lpfc_sli_config_mbox_opcode_get
8003                                                 (phba, mboxq),
8004                                         rc, sli4_params->mi_ver);
8005                 }
8006         } else {
8007                 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8008                                 "6217 MI is disabled\n");
8009         }
8010
8011         /* Ensure FDMI is enabled for MI if enable_mi is set */
8012         if (sli4_params->mi_ver)
8013                 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
8014
8015         /* Always try to enable CMF feature if we can */
8016         if (sli4_params->cmf) {
8017                 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
8018                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8019                 cmf = bf_get(lpfc_mbx_set_feature_cmf,
8020                              &mboxq->u.mqe.un.set_feature);
8021                 if (rc == MBX_SUCCESS && cmf) {
8022                         lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8023                                         "6218 CMF is enabled: mode %d\n",
8024                                         phba->cmf_active_mode);
8025                 } else {
8026                         lpfc_printf_log(phba, KERN_WARNING,
8027                                         LOG_CGN_MGMT | LOG_INIT,
8028                                         "6219 Enable CMF Mailbox x%x (x%x/x%x) "
8029                                         "failed, rc:x%x dd:x%x\n",
8030                                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8031                                         lpfc_sli_config_mbox_subsys_get
8032                                                 (phba, mboxq),
8033                                         lpfc_sli_config_mbox_opcode_get
8034                                                 (phba, mboxq),
8035                                         rc, cmf);
8036                         sli4_params->cmf = 0;
8037                         phba->cmf_active_mode = LPFC_CFG_OFF;
8038                         goto no_cmf;
8039                 }
8040
8041                 /* Allocate Congestion Information Buffer */
8042                 if (!phba->cgn_i) {
8043                         mp = kmalloc(sizeof(*mp), GFP_KERNEL);
8044                         if (mp)
8045                                 mp->virt = dma_alloc_coherent
8046                                                 (&phba->pcidev->dev,
8047                                                 sizeof(struct lpfc_cgn_info),
8048                                                 &mp->phys, GFP_KERNEL);
8049                         if (!mp || !mp->virt) {
8050                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8051                                                 "2640 Failed to alloc memory "
8052                                                 "for Congestion Info\n");
8053                                 kfree(mp);
8054                                 sli4_params->cmf = 0;
8055                                 phba->cmf_active_mode = LPFC_CFG_OFF;
8056                                 goto no_cmf;
8057                         }
8058                         phba->cgn_i = mp;
8059
8060                         /* initialize congestion buffer info */
8061                         lpfc_init_congestion_buf(phba);
8062                         lpfc_init_congestion_stat(phba);
8063
8064                         /* Zero out Congestion Signal counters */
8065                         atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
8066                         atomic64_set(&phba->cgn_acqe_stat.warn, 0);
8067                 }
8068
8069                 rc = lpfc_sli4_cgn_params_read(phba);
8070                 if (rc < 0) {
8071                         lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8072                                         "6242 Error reading Cgn Params (%d)\n",
8073                                         rc);
8074                         /* Ensure CGN Mode is off */
8075                         sli4_params->cmf = 0;
8076                 } else if (!rc) {
8077                         lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8078                                         "6243 CGN Event empty object.\n");
8079                         /* Ensure CGN Mode is off */
8080                         sli4_params->cmf = 0;
8081                 }
8082         } else {
8083 no_cmf:
8084                 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8085                                 "6220 CMF is disabled\n");
8086         }
8087
8088         /* Only register congestion buffer with firmware if BOTH
8089          * CMF and E2E are enabled.
8090          */
8091         if (sli4_params->cmf && sli4_params->mi_ver) {
8092                 rc = lpfc_reg_congestion_buf(phba);
8093                 if (rc) {
8094                         dma_free_coherent(&phba->pcidev->dev,
8095                                           sizeof(struct lpfc_cgn_info),
8096                                           phba->cgn_i->virt, phba->cgn_i->phys);
8097                         kfree(phba->cgn_i);
8098                         phba->cgn_i = NULL;
8099                         /* Ensure CGN Mode is off */
8100                         phba->cmf_active_mode = LPFC_CFG_OFF;
8101                         return 0;
8102                 }
8103         }
8104         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8105                         "6470 Setup MI version %d CMF %d mode %d\n",
8106                         sli4_params->mi_ver, sli4_params->cmf,
8107                         phba->cmf_active_mode);
8108
8109         mempool_free(mboxq, phba->mbox_mem_pool);
8110
8111         /* Initialize atomic counters */
8112         atomic_set(&phba->cgn_fabric_warn_cnt, 0);
8113         atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
8114         atomic_set(&phba->cgn_sync_alarm_cnt, 0);
8115         atomic_set(&phba->cgn_sync_warn_cnt, 0);
8116         atomic_set(&phba->cgn_driver_evt_cnt, 0);
8117         atomic_set(&phba->cgn_latency_evt_cnt, 0);
8118         atomic64_set(&phba->cgn_latency_evt, 0);
8119
8120         phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
8121
8122         /* Allocate RX Monitor Buffer */
8123         if (!phba->rxtable) {
8124                 phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY,
8125                                               sizeof(struct rxtable_entry),
8126                                               GFP_KERNEL);
8127                 if (!phba->rxtable) {
8128                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8129                                         "2644 Failed to alloc memory "
8130                                         "for RX Monitor Buffer\n");
8131                         return -ENOMEM;
8132                 }
8133         }
8134         atomic_set(&phba->rxtable_idx_head, 0);
8135         atomic_set(&phba->rxtable_idx_tail, 0);
8136         return 0;
8137 }
8138
8139 static int
8140 lpfc_set_host_tm(struct lpfc_hba *phba)
8141 {
8142         LPFC_MBOXQ_t *mboxq;
8143         uint32_t len, rc;
8144         struct timespec64 cur_time;
8145         struct tm broken;
8146         uint32_t month, day, year;
8147         uint32_t hour, minute, second;
8148         struct lpfc_mbx_set_host_date_time *tm;
8149
8150         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8151         if (!mboxq)
8152                 return -ENOMEM;
8153
8154         len = sizeof(struct lpfc_mbx_set_host_data) -
8155                 sizeof(struct lpfc_sli4_cfg_mhdr);
8156         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8157                          LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
8158                          LPFC_SLI4_MBX_EMBED);
8159
8160         mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
8161         mboxq->u.mqe.un.set_host_data.param_len =
8162                         sizeof(struct lpfc_mbx_set_host_date_time);
8163         tm = &mboxq->u.mqe.un.set_host_data.un.tm;
8164         ktime_get_real_ts64(&cur_time);
8165         time64_to_tm(cur_time.tv_sec, 0, &broken);
8166         month = broken.tm_mon + 1;
8167         day = broken.tm_mday;
8168         year = broken.tm_year - 100;
8169         hour = broken.tm_hour;
8170         minute = broken.tm_min;
8171         second = broken.tm_sec;
8172         bf_set(lpfc_mbx_set_host_month, tm, month);
8173         bf_set(lpfc_mbx_set_host_day, tm, day);
8174         bf_set(lpfc_mbx_set_host_year, tm, year);
8175         bf_set(lpfc_mbx_set_host_hour, tm, hour);
8176         bf_set(lpfc_mbx_set_host_min, tm, minute);
8177         bf_set(lpfc_mbx_set_host_sec, tm, second);
8178
8179         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8180         mempool_free(mboxq, phba->mbox_mem_pool);
8181         return rc;
8182 }
8183
8184 /**
8185  * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
8186  * @phba: Pointer to HBA context object.
8187  *
8188  * This function is the main SLI4 device initialization PCI function. This
8189  * function is called by the HBA initialization code, HBA reset code and
8190  * HBA error attention handler code. Caller is not required to hold any
8191  * locks.
8192  **/
8193 int
8194 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
8195 {
8196         int rc, i, cnt, len, dd;
8197         LPFC_MBOXQ_t *mboxq;
8198         struct lpfc_mqe *mqe;
8199         uint8_t *vpd;
8200         uint32_t vpd_size;
8201         uint32_t ftr_rsp = 0;
8202         struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
8203         struct lpfc_vport *vport = phba->pport;
8204         struct lpfc_dmabuf *mp;
8205         struct lpfc_rqb *rqbp;
8206         u32 flg;
8207
8208         /* Perform a PCI function reset to start from clean */
8209         rc = lpfc_pci_function_reset(phba);
8210         if (unlikely(rc))
8211                 return -ENODEV;
8212
8213         /* Check the HBA Host Status Register for readyness */
8214         rc = lpfc_sli4_post_status_check(phba);
8215         if (unlikely(rc))
8216                 return -ENODEV;
8217         else {
8218                 spin_lock_irq(&phba->hbalock);
8219                 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
8220                 flg = phba->sli.sli_flag;
8221                 spin_unlock_irq(&phba->hbalock);
8222                 /* Allow a little time after setting SLI_ACTIVE for any polled
8223                  * MBX commands to complete via BSG.
8224                  */
8225                 for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
8226                         msleep(20);
8227                         spin_lock_irq(&phba->hbalock);
8228                         flg = phba->sli.sli_flag;
8229                         spin_unlock_irq(&phba->hbalock);
8230                 }
8231         }
8232
8233         lpfc_sli4_dip(phba);
8234
8235         /*
8236          * Allocate a single mailbox container for initializing the
8237          * port.
8238          */
8239         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8240         if (!mboxq)
8241                 return -ENOMEM;
8242
8243         /* Issue READ_REV to collect vpd and FW information. */
8244         vpd_size = SLI4_PAGE_SIZE;
8245         vpd = kzalloc(vpd_size, GFP_KERNEL);
8246         if (!vpd) {
8247                 rc = -ENOMEM;
8248                 goto out_free_mbox;
8249         }
8250
8251         rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
8252         if (unlikely(rc)) {
8253                 kfree(vpd);
8254                 goto out_free_mbox;
8255         }
8256
8257         mqe = &mboxq->u.mqe;
8258         phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
8259         if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
8260                 phba->hba_flag |= HBA_FCOE_MODE;
8261                 phba->fcp_embed_io = 0; /* SLI4 FC support only */
8262         } else {
8263                 phba->hba_flag &= ~HBA_FCOE_MODE;
8264         }
8265
8266         if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
8267                 LPFC_DCBX_CEE_MODE)
8268                 phba->hba_flag |= HBA_FIP_SUPPORT;
8269         else
8270                 phba->hba_flag &= ~HBA_FIP_SUPPORT;
8271
8272         phba->hba_flag &= ~HBA_IOQ_FLUSH;
8273
8274         if (phba->sli_rev != LPFC_SLI_REV4) {
8275                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8276                         "0376 READ_REV Error. SLI Level %d "
8277                         "FCoE enabled %d\n",
8278                         phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
8279                 rc = -EIO;
8280                 kfree(vpd);
8281                 goto out_free_mbox;
8282         }
8283
8284         rc = lpfc_set_host_tm(phba);
8285         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
8286                         "6468 Set host date / time: Status x%x:\n", rc);
8287
8288         /*
8289          * Continue initialization with default values even if driver failed
8290          * to read FCoE param config regions, only read parameters if the
8291          * board is FCoE
8292          */
8293         if (phba->hba_flag & HBA_FCOE_MODE &&
8294             lpfc_sli4_read_fcoe_params(phba))
8295                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
8296                         "2570 Failed to read FCoE parameters\n");
8297
8298         /*
8299          * Retrieve sli4 device physical port name, failure of doing it
8300          * is considered as non-fatal.
8301          */
8302         rc = lpfc_sli4_retrieve_pport_name(phba);
8303         if (!rc)
8304                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8305                                 "3080 Successful retrieving SLI4 device "
8306                                 "physical port name: %s.\n", phba->Port);
8307
8308         rc = lpfc_sli4_get_ctl_attr(phba);
8309         if (!rc)
8310                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8311                                 "8351 Successful retrieving SLI4 device "
8312                                 "CTL ATTR\n");
8313
8314         /*
8315          * Evaluate the read rev and vpd data. Populate the driver
8316          * state with the results. If this routine fails, the failure
8317          * is not fatal as the driver will use generic values.
8318          */
8319         rc = lpfc_parse_vpd(phba, vpd, vpd_size);
8320         if (unlikely(!rc)) {
8321                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8322                                 "0377 Error %d parsing vpd. "
8323                                 "Using defaults.\n", rc);
8324                 rc = 0;
8325         }
8326         kfree(vpd);
8327
8328         /* Save information as VPD data */
8329         phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
8330         phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
8331
8332         /*
8333          * This is because first G7 ASIC doesn't support the standard
8334          * 0x5a NVME cmd descriptor type/subtype
8335          */
8336         if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8337                         LPFC_SLI_INTF_IF_TYPE_6) &&
8338             (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
8339             (phba->vpd.rev.smRev == 0) &&
8340             (phba->cfg_nvme_embed_cmd == 1))
8341                 phba->cfg_nvme_embed_cmd = 0;
8342
8343         phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
8344         phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
8345                                          &mqe->un.read_rev);
8346         phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
8347                                        &mqe->un.read_rev);
8348         phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
8349                                             &mqe->un.read_rev);
8350         phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
8351                                            &mqe->un.read_rev);
8352         phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
8353         memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
8354         phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
8355         memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
8356         phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
8357         memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
8358         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8359                         "(%d):0380 READ_REV Status x%x "
8360                         "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
8361                         mboxq->vport ? mboxq->vport->vpi : 0,
8362                         bf_get(lpfc_mqe_status, mqe),
8363                         phba->vpd.rev.opFwName,
8364                         phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
8365                         phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
8366
8367         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8368             LPFC_SLI_INTF_IF_TYPE_0) {
8369                 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
8370                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8371                 if (rc == MBX_SUCCESS) {
8372                         phba->hba_flag |= HBA_RECOVERABLE_UE;
8373                         /* Set 1Sec interval to detect UE */
8374                         phba->eratt_poll_interval = 1;
8375                         phba->sli4_hba.ue_to_sr = bf_get(
8376                                         lpfc_mbx_set_feature_UESR,
8377                                         &mboxq->u.mqe.un.set_feature);
8378                         phba->sli4_hba.ue_to_rp = bf_get(
8379                                         lpfc_mbx_set_feature_UERP,
8380                                         &mboxq->u.mqe.un.set_feature);
8381                 }
8382         }
8383
8384         if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
8385                 /* Enable MDS Diagnostics only if the SLI Port supports it */
8386                 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
8387                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8388                 if (rc != MBX_SUCCESS)
8389                         phba->mds_diags_support = 0;
8390         }
8391
8392         /*
8393          * Discover the port's supported feature set and match it against the
8394          * hosts requests.
8395          */
8396         lpfc_request_features(phba, mboxq);
8397         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8398         if (unlikely(rc)) {
8399                 rc = -EIO;
8400                 goto out_free_mbox;
8401         }
8402
8403         /* Disable VMID if app header is not supported */
8404         if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
8405                                                   &mqe->un.req_ftrs))) {
8406                 bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
8407                 phba->cfg_vmid_app_header = 0;
8408                 lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
8409                                 "1242 vmid feature not supported\n");
8410         }
8411
8412         /*
8413          * The port must support FCP initiator mode as this is the
8414          * only mode running in the host.
8415          */
8416         if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
8417                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8418                                 "0378 No support for fcpi mode.\n");
8419                 ftr_rsp++;
8420         }
8421
8422         /* Performance Hints are ONLY for FCoE */
8423         if (phba->hba_flag & HBA_FCOE_MODE) {
8424                 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
8425                         phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
8426                 else
8427                         phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
8428         }
8429
8430         /*
8431          * If the port cannot support the host's requested features
8432          * then turn off the global config parameters to disable the
8433          * feature in the driver.  This is not a fatal error.
8434          */
8435         if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8436                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
8437                         phba->cfg_enable_bg = 0;
8438                         phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
8439                         ftr_rsp++;
8440                 }
8441         }
8442
8443         if (phba->max_vpi && phba->cfg_enable_npiv &&
8444             !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8445                 ftr_rsp++;
8446
8447         if (ftr_rsp) {
8448                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8449                                 "0379 Feature Mismatch Data: x%08x %08x "
8450                                 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
8451                                 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
8452                                 phba->cfg_enable_npiv, phba->max_vpi);
8453                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
8454                         phba->cfg_enable_bg = 0;
8455                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8456                         phba->cfg_enable_npiv = 0;
8457         }
8458
8459         /* These SLI3 features are assumed in SLI4 */
8460         spin_lock_irq(&phba->hbalock);
8461         phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
8462         spin_unlock_irq(&phba->hbalock);
8463
8464         /* Always try to enable dual dump feature if we can */
8465         lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
8466         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8467         dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
8468         if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
8469                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8470                                 "6448 Dual Dump is enabled\n");
8471         else
8472                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
8473                                 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
8474                                 "rc:x%x dd:x%x\n",
8475                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8476                                 lpfc_sli_config_mbox_subsys_get(
8477                                         phba, mboxq),
8478                                 lpfc_sli_config_mbox_opcode_get(
8479                                         phba, mboxq),
8480                                 rc, dd);
8481         /*
8482          * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
8483          * calls depends on these resources to complete port setup.
8484          */
8485         rc = lpfc_sli4_alloc_resource_identifiers(phba);
8486         if (rc) {
8487                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8488                                 "2920 Failed to alloc Resource IDs "
8489                                 "rc = x%x\n", rc);
8490                 goto out_free_mbox;
8491         }
8492
8493         lpfc_set_host_data(phba, mboxq);
8494
8495         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8496         if (rc) {
8497                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8498                                 "2134 Failed to set host os driver version %x",
8499                                 rc);
8500         }
8501
8502         /* Read the port's service parameters. */
8503         rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
8504         if (rc) {
8505                 phba->link_state = LPFC_HBA_ERROR;
8506                 rc = -ENOMEM;
8507                 goto out_free_mbox;
8508         }
8509
8510         mboxq->vport = vport;
8511         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8512         mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
8513         if (rc == MBX_SUCCESS) {
8514                 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
8515                 rc = 0;
8516         }
8517
8518         /*
8519          * This memory was allocated by the lpfc_read_sparam routine. Release
8520          * it to the mbuf pool.
8521          */
8522         lpfc_mbuf_free(phba, mp->virt, mp->phys);
8523         kfree(mp);
8524         mboxq->ctx_buf = NULL;
8525         if (unlikely(rc)) {
8526                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8527                                 "0382 READ_SPARAM command failed "
8528                                 "status %d, mbxStatus x%x\n",
8529                                 rc, bf_get(lpfc_mqe_status, mqe));
8530                 phba->link_state = LPFC_HBA_ERROR;
8531                 rc = -EIO;
8532                 goto out_free_mbox;
8533         }
8534
8535         lpfc_update_vport_wwn(vport);
8536
8537         /* Update the fc_host data structures with new wwn. */
8538         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
8539         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
8540
8541         /* Create all the SLI4 queues */
8542         rc = lpfc_sli4_queue_create(phba);
8543         if (rc) {
8544                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8545                                 "3089 Failed to allocate queues\n");
8546                 rc = -ENODEV;
8547                 goto out_free_mbox;
8548         }
8549         /* Set up all the queues to the device */
8550         rc = lpfc_sli4_queue_setup(phba);
8551         if (unlikely(rc)) {
8552                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8553                                 "0381 Error %d during queue setup.\n ", rc);
8554                 goto out_stop_timers;
8555         }
8556         /* Initialize the driver internal SLI layer lists. */
8557         lpfc_sli4_setup(phba);
8558         lpfc_sli4_queue_init(phba);
8559
8560         /* update host els xri-sgl sizes and mappings */
8561         rc = lpfc_sli4_els_sgl_update(phba);
8562         if (unlikely(rc)) {
8563                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8564                                 "1400 Failed to update xri-sgl size and "
8565                                 "mapping: %d\n", rc);
8566                 goto out_destroy_queue;
8567         }
8568
8569         /* register the els sgl pool to the port */
8570         rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
8571                                        phba->sli4_hba.els_xri_cnt);
8572         if (unlikely(rc < 0)) {
8573                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8574                                 "0582 Error %d during els sgl post "
8575                                 "operation\n", rc);
8576                 rc = -ENODEV;
8577                 goto out_destroy_queue;
8578         }
8579         phba->sli4_hba.els_xri_cnt = rc;
8580
8581         if (phba->nvmet_support) {
8582                 /* update host nvmet xri-sgl sizes and mappings */
8583                 rc = lpfc_sli4_nvmet_sgl_update(phba);
8584                 if (unlikely(rc)) {
8585                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8586                                         "6308 Failed to update nvmet-sgl size "
8587                                         "and mapping: %d\n", rc);
8588                         goto out_destroy_queue;
8589                 }
8590
8591                 /* register the nvmet sgl pool to the port */
8592                 rc = lpfc_sli4_repost_sgl_list(
8593                         phba,
8594                         &phba->sli4_hba.lpfc_nvmet_sgl_list,
8595                         phba->sli4_hba.nvmet_xri_cnt);
8596                 if (unlikely(rc < 0)) {
8597                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8598                                         "3117 Error %d during nvmet "
8599                                         "sgl post\n", rc);
8600                         rc = -ENODEV;
8601                         goto out_destroy_queue;
8602                 }
8603                 phba->sli4_hba.nvmet_xri_cnt = rc;
8604
8605                 /* We allocate an iocbq for every receive context SGL.
8606                  * The additional allocation is for abort and ls handling.
8607                  */
8608                 cnt = phba->sli4_hba.nvmet_xri_cnt +
8609                         phba->sli4_hba.max_cfg_param.max_xri;
8610         } else {
8611                 /* update host common xri-sgl sizes and mappings */
8612                 rc = lpfc_sli4_io_sgl_update(phba);
8613                 if (unlikely(rc)) {
8614                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8615                                         "6082 Failed to update nvme-sgl size "
8616                                         "and mapping: %d\n", rc);
8617                         goto out_destroy_queue;
8618                 }
8619
8620                 /* register the allocated common sgl pool to the port */
8621                 rc = lpfc_sli4_repost_io_sgl_list(phba);
8622                 if (unlikely(rc)) {
8623                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8624                                         "6116 Error %d during nvme sgl post "
8625                                         "operation\n", rc);
8626                         /* Some NVME buffers were moved to abort nvme list */
8627                         /* A pci function reset will repost them */
8628                         rc = -ENODEV;
8629                         goto out_destroy_queue;
8630                 }
8631                 /* Each lpfc_io_buf job structure has an iocbq element.
8632                  * This cnt provides for abort, els, ct and ls requests.
8633                  */
8634                 cnt = phba->sli4_hba.max_cfg_param.max_xri;
8635         }
8636
8637         if (!phba->sli.iocbq_lookup) {
8638                 /* Initialize and populate the iocb list per host */
8639                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8640                                 "2821 initialize iocb list with %d entries\n",
8641                                 cnt);
8642                 rc = lpfc_init_iocb_list(phba, cnt);
8643                 if (rc) {
8644                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8645                                         "1413 Failed to init iocb list.\n");
8646                         goto out_destroy_queue;
8647                 }
8648         }
8649
8650         if (phba->nvmet_support)
8651                 lpfc_nvmet_create_targetport(phba);
8652
8653         if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
8654                 /* Post initial buffers to all RQs created */
8655                 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
8656                         rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
8657                         INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
8658                         rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
8659                         rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
8660                         rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
8661                         rqbp->buffer_count = 0;
8662
8663                         lpfc_post_rq_buffer(
8664                                 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
8665                                 phba->sli4_hba.nvmet_mrq_data[i],
8666                                 phba->cfg_nvmet_mrq_post, i);
8667                 }
8668         }
8669
8670         /* Post the rpi header region to the device. */
8671         rc = lpfc_sli4_post_all_rpi_hdrs(phba);
8672         if (unlikely(rc)) {
8673                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8674                                 "0393 Error %d during rpi post operation\n",
8675                                 rc);
8676                 rc = -ENODEV;
8677                 goto out_free_iocblist;
8678         }
8679         lpfc_sli4_node_prep(phba);
8680
8681         if (!(phba->hba_flag & HBA_FCOE_MODE)) {
8682                 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
8683                         /*
8684                          * The FC Port needs to register FCFI (index 0)
8685                          */
8686                         lpfc_reg_fcfi(phba, mboxq);
8687                         mboxq->vport = phba->pport;
8688                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8689                         if (rc != MBX_SUCCESS)
8690                                 goto out_unset_queue;
8691                         rc = 0;
8692                         phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
8693                                                 &mboxq->u.mqe.un.reg_fcfi);
8694                 } else {
8695                         /* We are a NVME Target mode with MRQ > 1 */
8696
8697                         /* First register the FCFI */
8698                         lpfc_reg_fcfi_mrq(phba, mboxq, 0);
8699                         mboxq->vport = phba->pport;
8700                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8701                         if (rc != MBX_SUCCESS)
8702                                 goto out_unset_queue;
8703                         rc = 0;
8704                         phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
8705                                                 &mboxq->u.mqe.un.reg_fcfi_mrq);
8706
8707                         /* Next register the MRQs */
8708                         lpfc_reg_fcfi_mrq(phba, mboxq, 1);
8709                         mboxq->vport = phba->pport;
8710                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8711                         if (rc != MBX_SUCCESS)
8712                                 goto out_unset_queue;
8713                         rc = 0;
8714                 }
8715                 /* Check if the port is configured to be disabled */
8716                 lpfc_sli_read_link_ste(phba);
8717         }
8718
8719         /* Don't post more new bufs if repost already recovered
8720          * the nvme sgls.
8721          */
8722         if (phba->nvmet_support == 0) {
8723                 if (phba->sli4_hba.io_xri_cnt == 0) {
8724                         len = lpfc_new_io_buf(
8725                                               phba, phba->sli4_hba.io_xri_max);
8726                         if (len == 0) {
8727                                 rc = -ENOMEM;
8728                                 goto out_unset_queue;
8729                         }
8730
8731                         if (phba->cfg_xri_rebalancing)
8732                                 lpfc_create_multixri_pools(phba);
8733                 }
8734         } else {
8735                 phba->cfg_xri_rebalancing = 0;
8736         }
8737
8738         /* Allow asynchronous mailbox command to go through */
8739         spin_lock_irq(&phba->hbalock);
8740         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8741         spin_unlock_irq(&phba->hbalock);
8742
8743         /* Post receive buffers to the device */
8744         lpfc_sli4_rb_setup(phba);
8745
8746         /* Reset HBA FCF states after HBA reset */
8747         phba->fcf.fcf_flag = 0;
8748         phba->fcf.current_rec.flag = 0;
8749
8750         /* Start the ELS watchdog timer */
8751         mod_timer(&vport->els_tmofunc,
8752                   jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
8753
8754         /* Start heart beat timer */
8755         mod_timer(&phba->hb_tmofunc,
8756                   jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
8757         phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
8758         phba->last_completion_time = jiffies;
8759
8760         /* start eq_delay heartbeat */
8761         if (phba->cfg_auto_imax)
8762                 queue_delayed_work(phba->wq, &phba->eq_delay_work,
8763                                    msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
8764
8765         /* start per phba idle_stat_delay heartbeat */
8766         lpfc_init_idle_stat_hb(phba);
8767
8768         /* Start error attention (ERATT) polling timer */
8769         mod_timer(&phba->eratt_poll,
8770                   jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
8771
8772         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
8773         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
8774                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
8775                 if (!rc) {
8776                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8777                                         "2829 This device supports "
8778                                         "Advanced Error Reporting (AER)\n");
8779                         spin_lock_irq(&phba->hbalock);
8780                         phba->hba_flag |= HBA_AER_ENABLED;
8781                         spin_unlock_irq(&phba->hbalock);
8782                 } else {
8783                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8784                                         "2830 This device does not support "
8785                                         "Advanced Error Reporting (AER)\n");
8786                         phba->cfg_aer_support = 0;
8787                 }
8788                 rc = 0;
8789         }
8790
8791         /*
8792          * The port is ready, set the host's link state to LINK_DOWN
8793          * in preparation for link interrupts.
8794          */
8795         spin_lock_irq(&phba->hbalock);
8796         phba->link_state = LPFC_LINK_DOWN;
8797
8798         /* Check if physical ports are trunked */
8799         if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
8800                 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
8801         if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
8802                 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
8803         if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
8804                 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
8805         if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
8806                 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
8807         spin_unlock_irq(&phba->hbalock);
8808
8809         /* Arm the CQs and then EQs on device */
8810         lpfc_sli4_arm_cqeq_intr(phba);
8811
8812         /* Indicate device interrupt mode */
8813         phba->sli4_hba.intr_enable = 1;
8814
8815         /* Setup CMF after HBA is initialized */
8816         lpfc_cmf_setup(phba);
8817
8818         if (!(phba->hba_flag & HBA_FCOE_MODE) &&
8819             (phba->hba_flag & LINK_DISABLED)) {
8820                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8821                                 "3103 Adapter Link is disabled.\n");
8822                 lpfc_down_link(phba, mboxq);
8823                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8824                 if (rc != MBX_SUCCESS) {
8825                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8826                                         "3104 Adapter failed to issue "
8827                                         "DOWN_LINK mbox cmd, rc:x%x\n", rc);
8828                         goto out_io_buff_free;
8829                 }
8830         } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
8831                 /* don't perform init_link on SLI4 FC port loopback test */
8832                 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
8833                         rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
8834                         if (rc)
8835                                 goto out_io_buff_free;
8836                 }
8837         }
8838         mempool_free(mboxq, phba->mbox_mem_pool);
8839
8840         phba->hba_flag |= HBA_SETUP;
8841         return rc;
8842
8843 out_io_buff_free:
8844         /* Free allocated IO Buffers */
8845         lpfc_io_free(phba);
8846 out_unset_queue:
8847         /* Unset all the queues set up in this routine when error out */
8848         lpfc_sli4_queue_unset(phba);
8849 out_free_iocblist:
8850         lpfc_free_iocb_list(phba);
8851 out_destroy_queue:
8852         lpfc_sli4_queue_destroy(phba);
8853 out_stop_timers:
8854         lpfc_stop_hba_timers(phba);
8855 out_free_mbox:
8856         mempool_free(mboxq, phba->mbox_mem_pool);
8857         return rc;
8858 }
8859
8860 /**
8861  * lpfc_mbox_timeout - Timeout call back function for mbox timer
8862  * @t: Context to fetch pointer to hba structure from.
8863  *
8864  * This is the callback function for mailbox timer. The mailbox
8865  * timer is armed when a new mailbox command is issued and the timer
8866  * is deleted when the mailbox complete. The function is called by
8867  * the kernel timer code when a mailbox does not complete within
8868  * expected time. This function wakes up the worker thread to
8869  * process the mailbox timeout and returns. All the processing is
8870  * done by the worker thread function lpfc_mbox_timeout_handler.
8871  **/
8872 void
8873 lpfc_mbox_timeout(struct timer_list *t)
8874 {
8875         struct lpfc_hba  *phba = from_timer(phba, t, sli.mbox_tmo);
8876         unsigned long iflag;
8877         uint32_t tmo_posted;
8878
8879         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
8880         tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
8881         if (!tmo_posted)
8882                 phba->pport->work_port_events |= WORKER_MBOX_TMO;
8883         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
8884
8885         if (!tmo_posted)
8886                 lpfc_worker_wake_up(phba);
8887         return;
8888 }
8889
8890 /**
8891  * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
8892  *                                    are pending
8893  * @phba: Pointer to HBA context object.
8894  *
8895  * This function checks if any mailbox completions are present on the mailbox
8896  * completion queue.
8897  **/
8898 static bool
8899 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
8900 {
8901
8902         uint32_t idx;
8903         struct lpfc_queue *mcq;
8904         struct lpfc_mcqe *mcqe;
8905         bool pending_completions = false;
8906         uint8_t qe_valid;
8907
8908         if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8909                 return false;
8910
8911         /* Check for completions on mailbox completion queue */
8912
8913         mcq = phba->sli4_hba.mbx_cq;
8914         idx = mcq->hba_index;
8915         qe_valid = mcq->qe_valid;
8916         while (bf_get_le32(lpfc_cqe_valid,
8917                (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
8918                 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
8919                 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
8920                     (!bf_get_le32(lpfc_trailer_async, mcqe))) {
8921                         pending_completions = true;
8922                         break;
8923                 }
8924                 idx = (idx + 1) % mcq->entry_count;
8925                 if (mcq->hba_index == idx)
8926                         break;
8927
8928                 /* if the index wrapped around, toggle the valid bit */
8929                 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
8930                         qe_valid = (qe_valid) ? 0 : 1;
8931         }
8932         return pending_completions;
8933
8934 }
8935
8936 /**
8937  * lpfc_sli4_process_missed_mbox_completions - process mbox completions
8938  *                                            that were missed.
8939  * @phba: Pointer to HBA context object.
8940  *
8941  * For sli4, it is possible to miss an interrupt. As such mbox completions
8942  * maybe missed causing erroneous mailbox timeouts to occur. This function
8943  * checks to see if mbox completions are on the mailbox completion queue
8944  * and will process all the completions associated with the eq for the
8945  * mailbox completion queue.
8946  **/
8947 static bool
8948 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
8949 {
8950         struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
8951         uint32_t eqidx;
8952         struct lpfc_queue *fpeq = NULL;
8953         struct lpfc_queue *eq;
8954         bool mbox_pending;
8955
8956         if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8957                 return false;
8958
8959         /* Find the EQ associated with the mbox CQ */
8960         if (sli4_hba->hdwq) {
8961                 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8962                         eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
8963                         if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
8964                                 fpeq = eq;
8965                                 break;
8966                         }
8967                 }
8968         }
8969         if (!fpeq)
8970                 return false;
8971
8972         /* Turn off interrupts from this EQ */
8973
8974         sli4_hba->sli4_eq_clr_intr(fpeq);
8975
8976         /* Check to see if a mbox completion is pending */
8977
8978         mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
8979
8980         /*
8981          * If a mbox completion is pending, process all the events on EQ
8982          * associated with the mbox completion queue (this could include
8983          * mailbox commands, async events, els commands, receive queue data
8984          * and fcp commands)
8985          */
8986
8987         if (mbox_pending)
8988                 /* process and rearm the EQ */
8989                 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
8990         else
8991                 /* Always clear and re-arm the EQ */
8992                 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
8993
8994         return mbox_pending;
8995
8996 }
8997
8998 /**
8999  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
9000  * @phba: Pointer to HBA context object.
9001  *
9002  * This function is called from worker thread when a mailbox command times out.
9003  * The caller is not required to hold any locks. This function will reset the
9004  * HBA and recover all the pending commands.
9005  **/
9006 void
9007 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
9008 {
9009         LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
9010         MAILBOX_t *mb = NULL;
9011
9012         struct lpfc_sli *psli = &phba->sli;
9013
9014         /* If the mailbox completed, process the completion */
9015         lpfc_sli4_process_missed_mbox_completions(phba);
9016
9017         if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
9018                 return;
9019
9020         if (pmbox != NULL)
9021                 mb = &pmbox->u.mb;
9022         /* Check the pmbox pointer first.  There is a race condition
9023          * between the mbox timeout handler getting executed in the
9024          * worklist and the mailbox actually completing. When this
9025          * race condition occurs, the mbox_active will be NULL.
9026          */
9027         spin_lock_irq(&phba->hbalock);
9028         if (pmbox == NULL) {
9029                 lpfc_printf_log(phba, KERN_WARNING,
9030                                 LOG_MBOX | LOG_SLI,
9031                                 "0353 Active Mailbox cleared - mailbox timeout "
9032                                 "exiting\n");
9033                 spin_unlock_irq(&phba->hbalock);
9034                 return;
9035         }
9036
9037         /* Mbox cmd <mbxCommand> timeout */
9038         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9039                         "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
9040                         mb->mbxCommand,
9041                         phba->pport->port_state,
9042                         phba->sli.sli_flag,
9043                         phba->sli.mbox_active);
9044         spin_unlock_irq(&phba->hbalock);
9045
9046         /* Setting state unknown so lpfc_sli_abort_iocb_ring
9047          * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
9048          * it to fail all outstanding SCSI IO.
9049          */
9050         spin_lock_irq(&phba->pport->work_port_lock);
9051         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9052         spin_unlock_irq(&phba->pport->work_port_lock);
9053         spin_lock_irq(&phba->hbalock);
9054         phba->link_state = LPFC_LINK_UNKNOWN;
9055         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9056         spin_unlock_irq(&phba->hbalock);
9057
9058         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9059                         "0345 Resetting board due to mailbox timeout\n");
9060
9061         /* Reset the HBA device */
9062         lpfc_reset_hba(phba);
9063 }
9064
9065 /**
9066  * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
9067  * @phba: Pointer to HBA context object.
9068  * @pmbox: Pointer to mailbox object.
9069  * @flag: Flag indicating how the mailbox need to be processed.
9070  *
9071  * This function is called by discovery code and HBA management code
9072  * to submit a mailbox command to firmware with SLI-3 interface spec. This
9073  * function gets the hbalock to protect the data structures.
9074  * The mailbox command can be submitted in polling mode, in which case
9075  * this function will wait in a polling loop for the completion of the
9076  * mailbox.
9077  * If the mailbox is submitted in no_wait mode (not polling) the
9078  * function will submit the command and returns immediately without waiting
9079  * for the mailbox completion. The no_wait is supported only when HBA
9080  * is in SLI2/SLI3 mode - interrupts are enabled.
9081  * The SLI interface allows only one mailbox pending at a time. If the
9082  * mailbox is issued in polling mode and there is already a mailbox
9083  * pending, then the function will return an error. If the mailbox is issued
9084  * in NO_WAIT mode and there is a mailbox pending already, the function
9085  * will return MBX_BUSY after queuing the mailbox into mailbox queue.
9086  * The sli layer owns the mailbox object until the completion of mailbox
9087  * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
9088  * return codes the caller owns the mailbox command after the return of
9089  * the function.
9090  **/
9091 static int
9092 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
9093                        uint32_t flag)
9094 {
9095         MAILBOX_t *mbx;
9096         struct lpfc_sli *psli = &phba->sli;
9097         uint32_t status, evtctr;
9098         uint32_t ha_copy, hc_copy;
9099         int i;
9100         unsigned long timeout;
9101         unsigned long drvr_flag = 0;
9102         uint32_t word0, ldata;
9103         void __iomem *to_slim;
9104         int processing_queue = 0;
9105
9106         spin_lock_irqsave(&phba->hbalock, drvr_flag);
9107         if (!pmbox) {
9108                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9109                 /* processing mbox queue from intr_handler */
9110                 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9111                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9112                         return MBX_SUCCESS;
9113                 }
9114                 processing_queue = 1;
9115                 pmbox = lpfc_mbox_get(phba);
9116                 if (!pmbox) {
9117                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9118                         return MBX_SUCCESS;
9119                 }
9120         }
9121
9122         if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
9123                 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
9124                 if(!pmbox->vport) {
9125                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9126                         lpfc_printf_log(phba, KERN_ERR,
9127                                         LOG_MBOX | LOG_VPORT,
9128                                         "1806 Mbox x%x failed. No vport\n",
9129                                         pmbox->u.mb.mbxCommand);
9130                         dump_stack();
9131                         goto out_not_finished;
9132                 }
9133         }
9134
9135         /* If the PCI channel is in offline state, do not post mbox. */
9136         if (unlikely(pci_channel_offline(phba->pcidev))) {
9137                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9138                 goto out_not_finished;
9139         }
9140
9141         /* If HBA has a deferred error attention, fail the iocb. */
9142         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
9143                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9144                 goto out_not_finished;
9145         }
9146
9147         psli = &phba->sli;
9148
9149         mbx = &pmbox->u.mb;
9150         status = MBX_SUCCESS;
9151
9152         if (phba->link_state == LPFC_HBA_ERROR) {
9153                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9154
9155                 /* Mbox command <mbxCommand> cannot issue */
9156                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9157                                 "(%d):0311 Mailbox command x%x cannot "
9158                                 "issue Data: x%x x%x\n",
9159                                 pmbox->vport ? pmbox->vport->vpi : 0,
9160                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9161                 goto out_not_finished;
9162         }
9163
9164         if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9165                 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
9166                         !(hc_copy & HC_MBINT_ENA)) {
9167                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9168                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9169                                 "(%d):2528 Mailbox command x%x cannot "
9170                                 "issue Data: x%x x%x\n",
9171                                 pmbox->vport ? pmbox->vport->vpi : 0,
9172                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9173                         goto out_not_finished;
9174                 }
9175         }
9176
9177         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9178                 /* Polling for a mbox command when another one is already active
9179                  * is not allowed in SLI. Also, the driver must have established
9180                  * SLI2 mode to queue and process multiple mbox commands.
9181                  */
9182
9183                 if (flag & MBX_POLL) {
9184                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9185
9186                         /* Mbox command <mbxCommand> cannot issue */
9187                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9188                                         "(%d):2529 Mailbox command x%x "
9189                                         "cannot issue Data: x%x x%x\n",
9190                                         pmbox->vport ? pmbox->vport->vpi : 0,
9191                                         pmbox->u.mb.mbxCommand,
9192                                         psli->sli_flag, flag);
9193                         goto out_not_finished;
9194                 }
9195
9196                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
9197                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9198                         /* Mbox command <mbxCommand> cannot issue */
9199                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9200                                         "(%d):2530 Mailbox command x%x "
9201                                         "cannot issue Data: x%x x%x\n",
9202                                         pmbox->vport ? pmbox->vport->vpi : 0,
9203                                         pmbox->u.mb.mbxCommand,
9204                                         psli->sli_flag, flag);
9205                         goto out_not_finished;
9206                 }
9207
9208                 /* Another mailbox command is still being processed, queue this
9209                  * command to be processed later.
9210                  */
9211                 lpfc_mbox_put(phba, pmbox);
9212
9213                 /* Mbox cmd issue - BUSY */
9214                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9215                                 "(%d):0308 Mbox cmd issue - BUSY Data: "
9216                                 "x%x x%x x%x x%x\n",
9217                                 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
9218                                 mbx->mbxCommand,
9219                                 phba->pport ? phba->pport->port_state : 0xff,
9220                                 psli->sli_flag, flag);
9221
9222                 psli->slistat.mbox_busy++;
9223                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9224
9225                 if (pmbox->vport) {
9226                         lpfc_debugfs_disc_trc(pmbox->vport,
9227                                 LPFC_DISC_TRC_MBOX_VPORT,
9228                                 "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
9229                                 (uint32_t)mbx->mbxCommand,
9230                                 mbx->un.varWords[0], mbx->un.varWords[1]);
9231                 }
9232                 else {
9233                         lpfc_debugfs_disc_trc(phba->pport,
9234                                 LPFC_DISC_TRC_MBOX,
9235                                 "MBOX Bsy:        cmd:x%x mb:x%x x%x",
9236                                 (uint32_t)mbx->mbxCommand,
9237                                 mbx->un.varWords[0], mbx->un.varWords[1]);
9238                 }
9239
9240                 return MBX_BUSY;
9241         }
9242
9243         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9244
9245         /* If we are not polling, we MUST be in SLI2 mode */
9246         if (flag != MBX_POLL) {
9247                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
9248                     (mbx->mbxCommand != MBX_KILL_BOARD)) {
9249                         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9250                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9251                         /* Mbox command <mbxCommand> cannot issue */
9252                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9253                                         "(%d):2531 Mailbox command x%x "
9254                                         "cannot issue Data: x%x x%x\n",
9255                                         pmbox->vport ? pmbox->vport->vpi : 0,
9256                                         pmbox->u.mb.mbxCommand,
9257                                         psli->sli_flag, flag);
9258                         goto out_not_finished;
9259                 }
9260                 /* timeout active mbox command */
9261                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9262                                            1000);
9263                 mod_timer(&psli->mbox_tmo, jiffies + timeout);
9264         }
9265
9266         /* Mailbox cmd <cmd> issue */
9267         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9268                         "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
9269                         "x%x\n",
9270                         pmbox->vport ? pmbox->vport->vpi : 0,
9271                         mbx->mbxCommand,
9272                         phba->pport ? phba->pport->port_state : 0xff,
9273                         psli->sli_flag, flag);
9274
9275         if (mbx->mbxCommand != MBX_HEARTBEAT) {
9276                 if (pmbox->vport) {
9277                         lpfc_debugfs_disc_trc(pmbox->vport,
9278                                 LPFC_DISC_TRC_MBOX_VPORT,
9279                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9280                                 (uint32_t)mbx->mbxCommand,
9281                                 mbx->un.varWords[0], mbx->un.varWords[1]);
9282                 }
9283                 else {
9284                         lpfc_debugfs_disc_trc(phba->pport,
9285                                 LPFC_DISC_TRC_MBOX,
9286                                 "MBOX Send:       cmd:x%x mb:x%x x%x",
9287                                 (uint32_t)mbx->mbxCommand,
9288                                 mbx->un.varWords[0], mbx->un.varWords[1]);
9289                 }
9290         }
9291
9292         psli->slistat.mbox_cmd++;
9293         evtctr = psli->slistat.mbox_event;
9294
9295         /* next set own bit for the adapter and copy over command word */
9296         mbx->mbxOwner = OWN_CHIP;
9297
9298         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9299                 /* Populate mbox extension offset word. */
9300                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
9301                         *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9302                                 = (uint8_t *)phba->mbox_ext
9303                                   - (uint8_t *)phba->mbox;
9304                 }
9305
9306                 /* Copy the mailbox extension data */
9307                 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
9308                         lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
9309                                               (uint8_t *)phba->mbox_ext,
9310                                               pmbox->in_ext_byte_len);
9311                 }
9312                 /* Copy command data to host SLIM area */
9313                 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
9314         } else {
9315                 /* Populate mbox extension offset word. */
9316                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
9317                         *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9318                                 = MAILBOX_HBA_EXT_OFFSET;
9319
9320                 /* Copy the mailbox extension data */
9321                 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
9322                         lpfc_memcpy_to_slim(phba->MBslimaddr +
9323                                 MAILBOX_HBA_EXT_OFFSET,
9324                                 pmbox->ctx_buf, pmbox->in_ext_byte_len);
9325
9326                 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9327                         /* copy command data into host mbox for cmpl */
9328                         lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
9329                                               MAILBOX_CMD_SIZE);
9330
9331                 /* First copy mbox command data to HBA SLIM, skip past first
9332                    word */
9333                 to_slim = phba->MBslimaddr + sizeof (uint32_t);
9334                 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
9335                             MAILBOX_CMD_SIZE - sizeof (uint32_t));
9336
9337                 /* Next copy over first word, with mbxOwner set */
9338                 ldata = *((uint32_t *)mbx);
9339                 to_slim = phba->MBslimaddr;
9340                 writel(ldata, to_slim);
9341                 readl(to_slim); /* flush */
9342
9343                 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9344                         /* switch over to host mailbox */
9345                         psli->sli_flag |= LPFC_SLI_ACTIVE;
9346         }
9347
9348         wmb();
9349
9350         switch (flag) {
9351         case MBX_NOWAIT:
9352                 /* Set up reference to mailbox command */
9353                 psli->mbox_active = pmbox;
9354                 /* Interrupt board to do it */
9355                 writel(CA_MBATT, phba->CAregaddr);
9356                 readl(phba->CAregaddr); /* flush */
9357                 /* Don't wait for it to finish, just return */
9358                 break;
9359
9360         case MBX_POLL:
9361                 /* Set up null reference to mailbox command */
9362                 psli->mbox_active = NULL;
9363                 /* Interrupt board to do it */
9364                 writel(CA_MBATT, phba->CAregaddr);
9365                 readl(phba->CAregaddr); /* flush */
9366
9367                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9368                         /* First read mbox status word */
9369                         word0 = *((uint32_t *)phba->mbox);
9370                         word0 = le32_to_cpu(word0);
9371                 } else {
9372                         /* First read mbox status word */
9373                         if (lpfc_readl(phba->MBslimaddr, &word0)) {
9374                                 spin_unlock_irqrestore(&phba->hbalock,
9375                                                        drvr_flag);
9376                                 goto out_not_finished;
9377                         }
9378                 }
9379
9380                 /* Read the HBA Host Attention Register */
9381                 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9382                         spin_unlock_irqrestore(&phba->hbalock,
9383                                                        drvr_flag);
9384                         goto out_not_finished;
9385                 }
9386                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9387                                                         1000) + jiffies;
9388                 i = 0;
9389                 /* Wait for command to complete */
9390                 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
9391                        (!(ha_copy & HA_MBATT) &&
9392                         (phba->link_state > LPFC_WARM_START))) {
9393                         if (time_after(jiffies, timeout)) {
9394                                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9395                                 spin_unlock_irqrestore(&phba->hbalock,
9396                                                        drvr_flag);
9397                                 goto out_not_finished;
9398                         }
9399
9400                         /* Check if we took a mbox interrupt while we were
9401                            polling */
9402                         if (((word0 & OWN_CHIP) != OWN_CHIP)
9403                             && (evtctr != psli->slistat.mbox_event))
9404                                 break;
9405
9406                         if (i++ > 10) {
9407                                 spin_unlock_irqrestore(&phba->hbalock,
9408                                                        drvr_flag);
9409                                 msleep(1);
9410                                 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9411                         }
9412
9413                         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9414                                 /* First copy command data */
9415                                 word0 = *((uint32_t *)phba->mbox);
9416                                 word0 = le32_to_cpu(word0);
9417                                 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
9418                                         MAILBOX_t *slimmb;
9419                                         uint32_t slimword0;
9420                                         /* Check real SLIM for any errors */
9421                                         slimword0 = readl(phba->MBslimaddr);
9422                                         slimmb = (MAILBOX_t *) & slimword0;
9423                                         if (((slimword0 & OWN_CHIP) != OWN_CHIP)
9424                                             && slimmb->mbxStatus) {
9425                                                 psli->sli_flag &=
9426                                                     ~LPFC_SLI_ACTIVE;
9427                                                 word0 = slimword0;
9428                                         }
9429                                 }
9430                         } else {
9431                                 /* First copy command data */
9432                                 word0 = readl(phba->MBslimaddr);
9433                         }
9434                         /* Read the HBA Host Attention Register */
9435                         if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9436                                 spin_unlock_irqrestore(&phba->hbalock,
9437                                                        drvr_flag);
9438                                 goto out_not_finished;
9439                         }
9440                 }
9441
9442                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9443                         /* copy results back to user */
9444                         lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
9445                                                 MAILBOX_CMD_SIZE);
9446                         /* Copy the mailbox extension data */
9447                         if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9448                                 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
9449                                                       pmbox->ctx_buf,
9450                                                       pmbox->out_ext_byte_len);
9451                         }
9452                 } else {
9453                         /* First copy command data */
9454                         lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
9455                                                 MAILBOX_CMD_SIZE);
9456                         /* Copy the mailbox extension data */
9457                         if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9458                                 lpfc_memcpy_from_slim(
9459                                         pmbox->ctx_buf,
9460                                         phba->MBslimaddr +
9461                                         MAILBOX_HBA_EXT_OFFSET,
9462                                         pmbox->out_ext_byte_len);
9463                         }
9464                 }
9465
9466                 writel(HA_MBATT, phba->HAregaddr);
9467                 readl(phba->HAregaddr); /* flush */
9468
9469                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9470                 status = mbx->mbxStatus;
9471         }
9472
9473         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9474         return status;
9475
9476 out_not_finished:
9477         if (processing_queue) {
9478                 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
9479                 lpfc_mbox_cmpl_put(phba, pmbox);
9480         }
9481         return MBX_NOT_FINISHED;
9482 }
9483
9484 /**
9485  * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
9486  * @phba: Pointer to HBA context object.
9487  *
9488  * The function blocks the posting of SLI4 asynchronous mailbox commands from
9489  * the driver internal pending mailbox queue. It will then try to wait out the
9490  * possible outstanding mailbox command before return.
9491  *
9492  * Returns:
9493  *      0 - the outstanding mailbox command completed; otherwise, the wait for
9494  *      the outstanding mailbox command timed out.
9495  **/
9496 static int
9497 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
9498 {
9499         struct lpfc_sli *psli = &phba->sli;
9500         LPFC_MBOXQ_t *mboxq;
9501         int rc = 0;
9502         unsigned long timeout = 0;
9503         u32 sli_flag;
9504         u8 cmd, subsys, opcode;
9505
9506         /* Mark the asynchronous mailbox command posting as blocked */
9507         spin_lock_irq(&phba->hbalock);
9508         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9509         /* Determine how long we might wait for the active mailbox
9510          * command to be gracefully completed by firmware.
9511          */
9512         if (phba->sli.mbox_active)
9513                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
9514                                                 phba->sli.mbox_active) *
9515                                                 1000) + jiffies;
9516         spin_unlock_irq(&phba->hbalock);
9517
9518         /* Make sure the mailbox is really active */
9519         if (timeout)
9520                 lpfc_sli4_process_missed_mbox_completions(phba);
9521
9522         /* Wait for the outstanding mailbox command to complete */
9523         while (phba->sli.mbox_active) {
9524                 /* Check active mailbox complete status every 2ms */
9525                 msleep(2);
9526                 if (time_after(jiffies, timeout)) {
9527                         /* Timeout, mark the outstanding cmd not complete */
9528
9529                         /* Sanity check sli.mbox_active has not completed or
9530                          * cancelled from another context during last 2ms sleep,
9531                          * so take hbalock to be sure before logging.
9532                          */
9533                         spin_lock_irq(&phba->hbalock);
9534                         if (phba->sli.mbox_active) {
9535                                 mboxq = phba->sli.mbox_active;
9536                                 cmd = mboxq->u.mb.mbxCommand;
9537                                 subsys = lpfc_sli_config_mbox_subsys_get(phba,
9538                                                                          mboxq);
9539                                 opcode = lpfc_sli_config_mbox_opcode_get(phba,
9540                                                                          mboxq);
9541                                 sli_flag = psli->sli_flag;
9542                                 spin_unlock_irq(&phba->hbalock);
9543                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9544                                                 "2352 Mailbox command x%x "
9545                                                 "(x%x/x%x) sli_flag x%x could "
9546                                                 "not complete\n",
9547                                                 cmd, subsys, opcode,
9548                                                 sli_flag);
9549                         } else {
9550                                 spin_unlock_irq(&phba->hbalock);
9551                         }
9552
9553                         rc = 1;
9554                         break;
9555                 }
9556         }
9557
9558         /* Can not cleanly block async mailbox command, fails it */
9559         if (rc) {
9560                 spin_lock_irq(&phba->hbalock);
9561                 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9562                 spin_unlock_irq(&phba->hbalock);
9563         }
9564         return rc;
9565 }
9566
9567 /**
9568  * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
9569  * @phba: Pointer to HBA context object.
9570  *
9571  * The function unblocks and resume posting of SLI4 asynchronous mailbox
9572  * commands from the driver internal pending mailbox queue. It makes sure
9573  * that there is no outstanding mailbox command before resuming posting
9574  * asynchronous mailbox commands. If, for any reason, there is outstanding
9575  * mailbox command, it will try to wait it out before resuming asynchronous
9576  * mailbox command posting.
9577  **/
9578 static void
9579 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
9580 {
9581         struct lpfc_sli *psli = &phba->sli;
9582
9583         spin_lock_irq(&phba->hbalock);
9584         if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9585                 /* Asynchronous mailbox posting is not blocked, do nothing */
9586                 spin_unlock_irq(&phba->hbalock);
9587                 return;
9588         }
9589
9590         /* Outstanding synchronous mailbox command is guaranteed to be done,
9591          * successful or timeout, after timing-out the outstanding mailbox
9592          * command shall always be removed, so just unblock posting async
9593          * mailbox command and resume
9594          */
9595         psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9596         spin_unlock_irq(&phba->hbalock);
9597
9598         /* wake up worker thread to post asynchronous mailbox command */
9599         lpfc_worker_wake_up(phba);
9600 }
9601
9602 /**
9603  * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
9604  * @phba: Pointer to HBA context object.
9605  * @mboxq: Pointer to mailbox object.
9606  *
9607  * The function waits for the bootstrap mailbox register ready bit from
9608  * port for twice the regular mailbox command timeout value.
9609  *
9610  *      0 - no timeout on waiting for bootstrap mailbox register ready.
9611  *      MBXERR_ERROR - wait for bootstrap mailbox register timed out.
9612  **/
9613 static int
9614 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9615 {
9616         uint32_t db_ready;
9617         unsigned long timeout;
9618         struct lpfc_register bmbx_reg;
9619
9620         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
9621                                    * 1000) + jiffies;
9622
9623         do {
9624                 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
9625                 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
9626                 if (!db_ready)
9627                         mdelay(2);
9628
9629                 if (time_after(jiffies, timeout))
9630                         return MBXERR_ERROR;
9631         } while (!db_ready);
9632
9633         return 0;
9634 }
9635
9636 /**
9637  * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
9638  * @phba: Pointer to HBA context object.
9639  * @mboxq: Pointer to mailbox object.
9640  *
9641  * The function posts a mailbox to the port.  The mailbox is expected
9642  * to be comletely filled in and ready for the port to operate on it.
9643  * This routine executes a synchronous completion operation on the
9644  * mailbox by polling for its completion.
9645  *
9646  * The caller must not be holding any locks when calling this routine.
9647  *
9648  * Returns:
9649  *      MBX_SUCCESS - mailbox posted successfully
9650  *      Any of the MBX error values.
9651  **/
9652 static int
9653 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9654 {
9655         int rc = MBX_SUCCESS;
9656         unsigned long iflag;
9657         uint32_t mcqe_status;
9658         uint32_t mbx_cmnd;
9659         struct lpfc_sli *psli = &phba->sli;
9660         struct lpfc_mqe *mb = &mboxq->u.mqe;
9661         struct lpfc_bmbx_create *mbox_rgn;
9662         struct dma_address *dma_address;
9663
9664         /*
9665          * Only one mailbox can be active to the bootstrap mailbox region
9666          * at a time and there is no queueing provided.
9667          */
9668         spin_lock_irqsave(&phba->hbalock, iflag);
9669         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9670                 spin_unlock_irqrestore(&phba->hbalock, iflag);
9671                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9672                                 "(%d):2532 Mailbox command x%x (x%x/x%x) "
9673                                 "cannot issue Data: x%x x%x\n",
9674                                 mboxq->vport ? mboxq->vport->vpi : 0,
9675                                 mboxq->u.mb.mbxCommand,
9676                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9677                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9678                                 psli->sli_flag, MBX_POLL);
9679                 return MBXERR_ERROR;
9680         }
9681         /* The server grabs the token and owns it until release */
9682         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9683         phba->sli.mbox_active = mboxq;
9684         spin_unlock_irqrestore(&phba->hbalock, iflag);
9685
9686         /* wait for bootstrap mbox register for readyness */
9687         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9688         if (rc)
9689                 goto exit;
9690         /*
9691          * Initialize the bootstrap memory region to avoid stale data areas
9692          * in the mailbox post.  Then copy the caller's mailbox contents to
9693          * the bmbx mailbox region.
9694          */
9695         mbx_cmnd = bf_get(lpfc_mqe_command, mb);
9696         memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
9697         lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
9698                                sizeof(struct lpfc_mqe));
9699
9700         /* Post the high mailbox dma address to the port and wait for ready. */
9701         dma_address = &phba->sli4_hba.bmbx.dma_address;
9702         writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
9703
9704         /* wait for bootstrap mbox register for hi-address write done */
9705         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9706         if (rc)
9707                 goto exit;
9708
9709         /* Post the low mailbox dma address to the port. */
9710         writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
9711
9712         /* wait for bootstrap mbox register for low address write done */
9713         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9714         if (rc)
9715                 goto exit;
9716
9717         /*
9718          * Read the CQ to ensure the mailbox has completed.
9719          * If so, update the mailbox status so that the upper layers
9720          * can complete the request normally.
9721          */
9722         lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
9723                                sizeof(struct lpfc_mqe));
9724         mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
9725         lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
9726                                sizeof(struct lpfc_mcqe));
9727         mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
9728         /*
9729          * When the CQE status indicates a failure and the mailbox status
9730          * indicates success then copy the CQE status into the mailbox status
9731          * (and prefix it with x4000).
9732          */
9733         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
9734                 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
9735                         bf_set(lpfc_mqe_status, mb,
9736                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
9737                 rc = MBXERR_ERROR;
9738         } else
9739                 lpfc_sli4_swap_str(phba, mboxq);
9740
9741         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9742                         "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
9743                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
9744                         " x%x x%x CQ: x%x x%x x%x x%x\n",
9745                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9746                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9747                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9748                         bf_get(lpfc_mqe_status, mb),
9749                         mb->un.mb_words[0], mb->un.mb_words[1],
9750                         mb->un.mb_words[2], mb->un.mb_words[3],
9751                         mb->un.mb_words[4], mb->un.mb_words[5],
9752                         mb->un.mb_words[6], mb->un.mb_words[7],
9753                         mb->un.mb_words[8], mb->un.mb_words[9],
9754                         mb->un.mb_words[10], mb->un.mb_words[11],
9755                         mb->un.mb_words[12], mboxq->mcqe.word0,
9756                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
9757                         mboxq->mcqe.trailer);
9758 exit:
9759         /* We are holding the token, no needed for lock when release */
9760         spin_lock_irqsave(&phba->hbalock, iflag);
9761         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9762         phba->sli.mbox_active = NULL;
9763         spin_unlock_irqrestore(&phba->hbalock, iflag);
9764         return rc;
9765 }
9766
9767 /**
9768  * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
9769  * @phba: Pointer to HBA context object.
9770  * @mboxq: Pointer to mailbox object.
9771  * @flag: Flag indicating how the mailbox need to be processed.
9772  *
9773  * This function is called by discovery code and HBA management code to submit
9774  * a mailbox command to firmware with SLI-4 interface spec.
9775  *
9776  * Return codes the caller owns the mailbox command after the return of the
9777  * function.
9778  **/
9779 static int
9780 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
9781                        uint32_t flag)
9782 {
9783         struct lpfc_sli *psli = &phba->sli;
9784         unsigned long iflags;
9785         int rc;
9786
9787         /* dump from issue mailbox command if setup */
9788         lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
9789
9790         rc = lpfc_mbox_dev_check(phba);
9791         if (unlikely(rc)) {
9792                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9793                                 "(%d):2544 Mailbox command x%x (x%x/x%x) "
9794                                 "cannot issue Data: x%x x%x\n",
9795                                 mboxq->vport ? mboxq->vport->vpi : 0,
9796                                 mboxq->u.mb.mbxCommand,
9797                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9798                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9799                                 psli->sli_flag, flag);
9800                 goto out_not_finished;
9801         }
9802
9803         /* Detect polling mode and jump to a handler */
9804         if (!phba->sli4_hba.intr_enable) {
9805                 if (flag == MBX_POLL)
9806                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9807                 else
9808                         rc = -EIO;
9809                 if (rc != MBX_SUCCESS)
9810                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9811                                         "(%d):2541 Mailbox command x%x "
9812                                         "(x%x/x%x) failure: "
9813                                         "mqe_sta: x%x mcqe_sta: x%x/x%x "
9814                                         "Data: x%x x%x\n",
9815                                         mboxq->vport ? mboxq->vport->vpi : 0,
9816                                         mboxq->u.mb.mbxCommand,
9817                                         lpfc_sli_config_mbox_subsys_get(phba,
9818                                                                         mboxq),
9819                                         lpfc_sli_config_mbox_opcode_get(phba,
9820                                                                         mboxq),
9821                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9822                                         bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9823                                         bf_get(lpfc_mcqe_ext_status,
9824                                                &mboxq->mcqe),
9825                                         psli->sli_flag, flag);
9826                 return rc;
9827         } else if (flag == MBX_POLL) {
9828                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9829                                 "(%d):2542 Try to issue mailbox command "
9830                                 "x%x (x%x/x%x) synchronously ahead of async "
9831                                 "mailbox command queue: x%x x%x\n",
9832                                 mboxq->vport ? mboxq->vport->vpi : 0,
9833                                 mboxq->u.mb.mbxCommand,
9834                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9835                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9836                                 psli->sli_flag, flag);
9837                 /* Try to block the asynchronous mailbox posting */
9838                 rc = lpfc_sli4_async_mbox_block(phba);
9839                 if (!rc) {
9840                         /* Successfully blocked, now issue sync mbox cmd */
9841                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9842                         if (rc != MBX_SUCCESS)
9843                                 lpfc_printf_log(phba, KERN_WARNING,
9844                                         LOG_MBOX | LOG_SLI,
9845                                         "(%d):2597 Sync Mailbox command "
9846                                         "x%x (x%x/x%x) failure: "
9847                                         "mqe_sta: x%x mcqe_sta: x%x/x%x "
9848                                         "Data: x%x x%x\n",
9849                                         mboxq->vport ? mboxq->vport->vpi : 0,
9850                                         mboxq->u.mb.mbxCommand,
9851                                         lpfc_sli_config_mbox_subsys_get(phba,
9852                                                                         mboxq),
9853                                         lpfc_sli_config_mbox_opcode_get(phba,
9854                                                                         mboxq),
9855                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9856                                         bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9857                                         bf_get(lpfc_mcqe_ext_status,
9858                                                &mboxq->mcqe),
9859                                         psli->sli_flag, flag);
9860                         /* Unblock the async mailbox posting afterward */
9861                         lpfc_sli4_async_mbox_unblock(phba);
9862                 }
9863                 return rc;
9864         }
9865
9866         /* Now, interrupt mode asynchronous mailbox command */
9867         rc = lpfc_mbox_cmd_check(phba, mboxq);
9868         if (rc) {
9869                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9870                                 "(%d):2543 Mailbox command x%x (x%x/x%x) "
9871                                 "cannot issue Data: x%x x%x\n",
9872                                 mboxq->vport ? mboxq->vport->vpi : 0,
9873                                 mboxq->u.mb.mbxCommand,
9874                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9875                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9876                                 psli->sli_flag, flag);
9877                 goto out_not_finished;
9878         }
9879
9880         /* Put the mailbox command to the driver internal FIFO */
9881         psli->slistat.mbox_busy++;
9882         spin_lock_irqsave(&phba->hbalock, iflags);
9883         lpfc_mbox_put(phba, mboxq);
9884         spin_unlock_irqrestore(&phba->hbalock, iflags);
9885         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9886                         "(%d):0354 Mbox cmd issue - Enqueue Data: "
9887                         "x%x (x%x/x%x) x%x x%x x%x\n",
9888                         mboxq->vport ? mboxq->vport->vpi : 0xffffff,
9889                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
9890                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9891                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9892                         phba->pport->port_state,
9893                         psli->sli_flag, MBX_NOWAIT);
9894         /* Wake up worker thread to transport mailbox command from head */
9895         lpfc_worker_wake_up(phba);
9896
9897         return MBX_BUSY;
9898
9899 out_not_finished:
9900         return MBX_NOT_FINISHED;
9901 }
9902
9903 /**
9904  * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
9905  * @phba: Pointer to HBA context object.
9906  *
9907  * This function is called by worker thread to send a mailbox command to
9908  * SLI4 HBA firmware.
9909  *
9910  **/
9911 int
9912 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
9913 {
9914         struct lpfc_sli *psli = &phba->sli;
9915         LPFC_MBOXQ_t *mboxq;
9916         int rc = MBX_SUCCESS;
9917         unsigned long iflags;
9918         struct lpfc_mqe *mqe;
9919         uint32_t mbx_cmnd;
9920
9921         /* Check interrupt mode before post async mailbox command */
9922         if (unlikely(!phba->sli4_hba.intr_enable))
9923                 return MBX_NOT_FINISHED;
9924
9925         /* Check for mailbox command service token */
9926         spin_lock_irqsave(&phba->hbalock, iflags);
9927         if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9928                 spin_unlock_irqrestore(&phba->hbalock, iflags);
9929                 return MBX_NOT_FINISHED;
9930         }
9931         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9932                 spin_unlock_irqrestore(&phba->hbalock, iflags);
9933                 return MBX_NOT_FINISHED;
9934         }
9935         if (unlikely(phba->sli.mbox_active)) {
9936                 spin_unlock_irqrestore(&phba->hbalock, iflags);
9937                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9938                                 "0384 There is pending active mailbox cmd\n");
9939                 return MBX_NOT_FINISHED;
9940         }
9941         /* Take the mailbox command service token */
9942         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9943
9944         /* Get the next mailbox command from head of queue */
9945         mboxq = lpfc_mbox_get(phba);
9946
9947         /* If no more mailbox command waiting for post, we're done */
9948         if (!mboxq) {
9949                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9950                 spin_unlock_irqrestore(&phba->hbalock, iflags);
9951                 return MBX_SUCCESS;
9952         }
9953         phba->sli.mbox_active = mboxq;
9954         spin_unlock_irqrestore(&phba->hbalock, iflags);
9955
9956         /* Check device readiness for posting mailbox command */
9957         rc = lpfc_mbox_dev_check(phba);
9958         if (unlikely(rc))
9959                 /* Driver clean routine will clean up pending mailbox */
9960                 goto out_not_finished;
9961
9962         /* Prepare the mbox command to be posted */
9963         mqe = &mboxq->u.mqe;
9964         mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
9965
9966         /* Start timer for the mbox_tmo and log some mailbox post messages */
9967         mod_timer(&psli->mbox_tmo, (jiffies +
9968                   msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
9969
9970         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9971                         "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
9972                         "x%x x%x\n",
9973                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9974                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9975                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9976                         phba->pport->port_state, psli->sli_flag);
9977
9978         if (mbx_cmnd != MBX_HEARTBEAT) {
9979                 if (mboxq->vport) {
9980                         lpfc_debugfs_disc_trc(mboxq->vport,
9981                                 LPFC_DISC_TRC_MBOX_VPORT,
9982                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9983                                 mbx_cmnd, mqe->un.mb_words[0],
9984                                 mqe->un.mb_words[1]);
9985                 } else {
9986                         lpfc_debugfs_disc_trc(phba->pport,
9987                                 LPFC_DISC_TRC_MBOX,
9988                                 "MBOX Send: cmd:x%x mb:x%x x%x",
9989                                 mbx_cmnd, mqe->un.mb_words[0],
9990                                 mqe->un.mb_words[1]);
9991                 }
9992         }
9993         psli->slistat.mbox_cmd++;
9994
9995         /* Post the mailbox command to the port */
9996         rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
9997         if (rc != MBX_SUCCESS) {
9998                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9999                                 "(%d):2533 Mailbox command x%x (x%x/x%x) "
10000                                 "cannot issue Data: x%x x%x\n",
10001                                 mboxq->vport ? mboxq->vport->vpi : 0,
10002                                 mboxq->u.mb.mbxCommand,
10003                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10004                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10005                                 psli->sli_flag, MBX_NOWAIT);
10006                 goto out_not_finished;
10007         }
10008
10009         return rc;
10010
10011 out_not_finished:
10012         spin_lock_irqsave(&phba->hbalock, iflags);
10013         if (phba->sli.mbox_active) {
10014                 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
10015                 __lpfc_mbox_cmpl_put(phba, mboxq);
10016                 /* Release the token */
10017                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10018                 phba->sli.mbox_active = NULL;
10019         }
10020         spin_unlock_irqrestore(&phba->hbalock, iflags);
10021
10022         return MBX_NOT_FINISHED;
10023 }
10024
10025 /**
10026  * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
10027  * @phba: Pointer to HBA context object.
10028  * @pmbox: Pointer to mailbox object.
10029  * @flag: Flag indicating how the mailbox need to be processed.
10030  *
10031  * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
10032  * the API jump table function pointer from the lpfc_hba struct.
10033  *
10034  * Return codes the caller owns the mailbox command after the return of the
10035  * function.
10036  **/
10037 int
10038 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
10039 {
10040         return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
10041 }
10042
10043 /**
10044  * lpfc_mbox_api_table_setup - Set up mbox api function jump table
10045  * @phba: The hba struct for which this call is being executed.
10046  * @dev_grp: The HBA PCI-Device group number.
10047  *
10048  * This routine sets up the mbox interface API function jump table in @phba
10049  * struct.
10050  * Returns: 0 - success, -ENODEV - failure.
10051  **/
10052 int
10053 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10054 {
10055
10056         switch (dev_grp) {
10057         case LPFC_PCI_DEV_LP:
10058                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
10059                 phba->lpfc_sli_handle_slow_ring_event =
10060                                 lpfc_sli_handle_slow_ring_event_s3;
10061                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
10062                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
10063                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
10064                 break;
10065         case LPFC_PCI_DEV_OC:
10066                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
10067                 phba->lpfc_sli_handle_slow_ring_event =
10068                                 lpfc_sli_handle_slow_ring_event_s4;
10069                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
10070                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
10071                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
10072                 break;
10073         default:
10074                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10075                                 "1420 Invalid HBA PCI-device group: 0x%x\n",
10076                                 dev_grp);
10077                 return -ENODEV;
10078         }
10079         return 0;
10080 }
10081
10082 /**
10083  * __lpfc_sli_ringtx_put - Add an iocb to the txq
10084  * @phba: Pointer to HBA context object.
10085  * @pring: Pointer to driver SLI ring object.
10086  * @piocb: Pointer to address of newly added command iocb.
10087  *
10088  * This function is called with hbalock held for SLI3 ports or
10089  * the ring lock held for SLI4 ports to add a command
10090  * iocb to the txq when SLI layer cannot submit the command iocb
10091  * to the ring.
10092  **/
10093 void
10094 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10095                     struct lpfc_iocbq *piocb)
10096 {
10097         if (phba->sli_rev == LPFC_SLI_REV4)
10098                 lockdep_assert_held(&pring->ring_lock);
10099         else
10100                 lockdep_assert_held(&phba->hbalock);
10101         /* Insert the caller's iocb in the txq tail for later processing. */
10102         list_add_tail(&piocb->list, &pring->txq);
10103 }
10104
10105 /**
10106  * lpfc_sli_next_iocb - Get the next iocb in the txq
10107  * @phba: Pointer to HBA context object.
10108  * @pring: Pointer to driver SLI ring object.
10109  * @piocb: Pointer to address of newly added command iocb.
10110  *
10111  * This function is called with hbalock held before a new
10112  * iocb is submitted to the firmware. This function checks
10113  * txq to flush the iocbs in txq to Firmware before
10114  * submitting new iocbs to the Firmware.
10115  * If there are iocbs in the txq which need to be submitted
10116  * to firmware, lpfc_sli_next_iocb returns the first element
10117  * of the txq after dequeuing it from txq.
10118  * If there is no iocb in the txq then the function will return
10119  * *piocb and *piocb is set to NULL. Caller needs to check
10120  * *piocb to find if there are more commands in the txq.
10121  **/
10122 static struct lpfc_iocbq *
10123 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10124                    struct lpfc_iocbq **piocb)
10125 {
10126         struct lpfc_iocbq * nextiocb;
10127
10128         lockdep_assert_held(&phba->hbalock);
10129
10130         nextiocb = lpfc_sli_ringtx_get(phba, pring);
10131         if (!nextiocb) {
10132                 nextiocb = *piocb;
10133                 *piocb = NULL;
10134         }
10135
10136         return nextiocb;
10137 }
10138
10139 /**
10140  * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
10141  * @phba: Pointer to HBA context object.
10142  * @ring_number: SLI ring number to issue iocb on.
10143  * @piocb: Pointer to command iocb.
10144  * @flag: Flag indicating if this command can be put into txq.
10145  *
10146  * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
10147  * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
10148  * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
10149  * flag is turned on, the function returns IOCB_ERROR. When the link is down,
10150  * this function allows only iocbs for posting buffers. This function finds
10151  * next available slot in the command ring and posts the command to the
10152  * available slot and writes the port attention register to request HBA start
10153  * processing new iocb. If there is no slot available in the ring and
10154  * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
10155  * the function returns IOCB_BUSY.
10156  *
10157  * This function is called with hbalock held. The function will return success
10158  * after it successfully submit the iocb to firmware or after adding to the
10159  * txq.
10160  **/
10161 static int
10162 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
10163                     struct lpfc_iocbq *piocb, uint32_t flag)
10164 {
10165         struct lpfc_iocbq *nextiocb;
10166         IOCB_t *iocb;
10167         struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
10168
10169         lockdep_assert_held(&phba->hbalock);
10170
10171         if (piocb->cmd_cmpl && (!piocb->vport) &&
10172            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
10173            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
10174                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10175                                 "1807 IOCB x%x failed. No vport\n",
10176                                 piocb->iocb.ulpCommand);
10177                 dump_stack();
10178                 return IOCB_ERROR;
10179         }
10180
10181
10182         /* If the PCI channel is in offline state, do not post iocbs. */
10183         if (unlikely(pci_channel_offline(phba->pcidev)))
10184                 return IOCB_ERROR;
10185
10186         /* If HBA has a deferred error attention, fail the iocb. */
10187         if (unlikely(phba->hba_flag & DEFER_ERATT))
10188                 return IOCB_ERROR;
10189
10190         /*
10191          * We should never get an IOCB if we are in a < LINK_DOWN state
10192          */
10193         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10194                 return IOCB_ERROR;
10195
10196         /*
10197          * Check to see if we are blocking IOCB processing because of a
10198          * outstanding event.
10199          */
10200         if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
10201                 goto iocb_busy;
10202
10203         if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
10204                 /*
10205                  * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
10206                  * can be issued if the link is not up.
10207                  */
10208                 switch (piocb->iocb.ulpCommand) {
10209                 case CMD_GEN_REQUEST64_CR:
10210                 case CMD_GEN_REQUEST64_CX:
10211                         if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
10212                                 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
10213                                         FC_RCTL_DD_UNSOL_CMD) ||
10214                                 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
10215                                         MENLO_TRANSPORT_TYPE))
10216
10217                                 goto iocb_busy;
10218                         break;
10219                 case CMD_QUE_RING_BUF_CN:
10220                 case CMD_QUE_RING_BUF64_CN:
10221                         /*
10222                          * For IOCBs, like QUE_RING_BUF, that have no rsp ring
10223                          * completion, cmd_cmpl MUST be 0.
10224                          */
10225                         if (piocb->cmd_cmpl)
10226                                 piocb->cmd_cmpl = NULL;
10227                         fallthrough;
10228                 case CMD_CREATE_XRI_CR:
10229                 case CMD_CLOSE_XRI_CN:
10230                 case CMD_CLOSE_XRI_CX:
10231                         break;
10232                 default:
10233                         goto iocb_busy;
10234                 }
10235
10236         /*
10237          * For FCP commands, we must be in a state where we can process link
10238          * attention events.
10239          */
10240         } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
10241                             !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
10242                 goto iocb_busy;
10243         }
10244
10245         while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
10246                (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
10247                 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
10248
10249         if (iocb)
10250                 lpfc_sli_update_ring(phba, pring);
10251         else
10252                 lpfc_sli_update_full_ring(phba, pring);
10253
10254         if (!piocb)
10255                 return IOCB_SUCCESS;
10256
10257         goto out_busy;
10258
10259  iocb_busy:
10260         pring->stats.iocb_cmd_delay++;
10261
10262  out_busy:
10263
10264         if (!(flag & SLI_IOCB_RET_IOCB)) {
10265                 __lpfc_sli_ringtx_put(phba, pring, piocb);
10266                 return IOCB_SUCCESS;
10267         }
10268
10269         return IOCB_BUSY;
10270 }
10271
10272 /**
10273  * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
10274  * @phba: Pointer to HBA context object.
10275  * @ring_number: SLI ring number to issue wqe on.
10276  * @piocb: Pointer to command iocb.
10277  * @flag: Flag indicating if this command can be put into txq.
10278  *
10279  * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
10280  * send  an iocb command to an HBA with SLI-4 interface spec.
10281  *
10282  * This function takes the hbalock before invoking the lockless version.
10283  * The function will return success after it successfully submit the wqe to
10284  * firmware or after adding to the txq.
10285  **/
10286 static int
10287 __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10288                            struct lpfc_iocbq *piocb, uint32_t flag)
10289 {
10290         unsigned long iflags;
10291         int rc;
10292
10293         spin_lock_irqsave(&phba->hbalock, iflags);
10294         rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
10295         spin_unlock_irqrestore(&phba->hbalock, iflags);
10296
10297         return rc;
10298 }
10299
10300 /**
10301  * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
10302  * @phba: Pointer to HBA context object.
10303  * @ring_number: SLI ring number to issue wqe on.
10304  * @piocb: Pointer to command iocb.
10305  * @flag: Flag indicating if this command can be put into txq.
10306  *
10307  * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
10308  * an wqe command to an HBA with SLI-4 interface spec.
10309  *
10310  * This function is a lockless version. The function will return success
10311  * after it successfully submit the wqe to firmware or after adding to the
10312  * txq.
10313  **/
10314 static int
10315 __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10316                            struct lpfc_iocbq *piocb, uint32_t flag)
10317 {
10318         int rc;
10319         struct lpfc_io_buf *lpfc_cmd =
10320                 (struct lpfc_io_buf *)piocb->context1;
10321
10322         lpfc_prep_embed_io(phba, lpfc_cmd);
10323         rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
10324         return rc;
10325 }
10326
10327 void
10328 lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
10329 {
10330         struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
10331         union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
10332         struct sli4_sge *sgl;
10333
10334         /* 128 byte wqe support here */
10335         sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10336
10337         if (phba->fcp_embed_io) {
10338                 struct fcp_cmnd *fcp_cmnd;
10339                 u32 *ptr;
10340
10341                 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10342
10343                 /* Word 0-2 - FCP_CMND */
10344                 wqe->generic.bde.tus.f.bdeFlags =
10345                         BUFF_TYPE_BDE_IMMED;
10346                 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10347                 wqe->generic.bde.addrHigh = 0;
10348                 wqe->generic.bde.addrLow =  88;  /* Word 22 */
10349
10350                 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10351                 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10352
10353                 /* Word 22-29  FCP CMND Payload */
10354                 ptr = &wqe->words[22];
10355                 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10356         } else {
10357                 /* Word 0-2 - Inline BDE */
10358                 wqe->generic.bde.tus.f.bdeFlags =  BUFF_TYPE_BDE_64;
10359                 wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
10360                 wqe->generic.bde.addrHigh = sgl->addr_hi;
10361                 wqe->generic.bde.addrLow =  sgl->addr_lo;
10362
10363                 /* Word 10 */
10364                 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10365                 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
10366         }
10367
10368         /* add the VMID tags as per switch response */
10369         if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
10370                 if (phba->pport->vmid_priority_tagging) {
10371                         bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
10372                         bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10373                                         (piocb->vmid_tag.cs_ctl_vmid));
10374                 } else {
10375                         bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
10376                         bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10377                         wqe->words[31] = piocb->vmid_tag.app_id;
10378                 }
10379         }
10380 }
10381
10382 /**
10383  * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10384  * @phba: Pointer to HBA context object.
10385  * @ring_number: SLI ring number to issue iocb on.
10386  * @piocb: Pointer to command iocb.
10387  * @flag: Flag indicating if this command can be put into txq.
10388  *
10389  * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10390  * an iocb command to an HBA with SLI-4 interface spec.
10391  *
10392  * This function is called with ringlock held. The function will return success
10393  * after it successfully submit the iocb to firmware or after adding to the
10394  * txq.
10395  **/
10396 static int
10397 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10398                          struct lpfc_iocbq *piocb, uint32_t flag)
10399 {
10400         struct lpfc_sglq *sglq;
10401         union lpfc_wqe128 *wqe;
10402         struct lpfc_queue *wq;
10403         struct lpfc_sli_ring *pring;
10404         u32 ulp_command = get_job_cmnd(phba, piocb);
10405
10406         /* Get the WQ */
10407         if ((piocb->cmd_flag & LPFC_IO_FCP) ||
10408             (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
10409                 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10410         } else {
10411                 wq = phba->sli4_hba.els_wq;
10412         }
10413
10414         /* Get corresponding ring */
10415         pring = wq->pring;
10416
10417         /*
10418          * The WQE can be either 64 or 128 bytes,
10419          */
10420
10421         lockdep_assert_held(&pring->ring_lock);
10422         wqe = &piocb->wqe;
10423         if (piocb->sli4_xritag == NO_XRI) {
10424                 if (ulp_command == CMD_ABORT_XRI_CX)
10425                         sglq = NULL;
10426                 else {
10427                         sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10428                         if (!sglq) {
10429                                 if (!(flag & SLI_IOCB_RET_IOCB)) {
10430                                         __lpfc_sli_ringtx_put(phba,
10431                                                         pring,
10432                                                         piocb);
10433                                         return IOCB_SUCCESS;
10434                                 } else {
10435                                         return IOCB_BUSY;
10436                                 }
10437                         }
10438                 }
10439         } else if (piocb->cmd_flag &  LPFC_IO_FCP) {
10440                 /* These IO's already have an XRI and a mapped sgl. */
10441                 sglq = NULL;
10442         }
10443         else {
10444                 /*
10445                  * This is a continuation of a commandi,(CX) so this
10446                  * sglq is on the active list
10447                  */
10448                 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10449                 if (!sglq)
10450                         return IOCB_ERROR;
10451         }
10452
10453         if (sglq) {
10454                 piocb->sli4_lxritag = sglq->sli4_lxritag;
10455                 piocb->sli4_xritag = sglq->sli4_xritag;
10456
10457                 /* ABTS sent by initiator to CT exchange, the
10458                  * RX_ID field will be filled with the newly
10459                  * allocated responder XRI.
10460                  */
10461                 if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
10462                     piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
10463                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10464                                piocb->sli4_xritag);
10465
10466                 bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
10467                        piocb->sli4_xritag);
10468
10469                 if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
10470                         return IOCB_ERROR;
10471         }
10472
10473         if (lpfc_sli4_wq_put(wq, wqe))
10474                 return IOCB_ERROR;
10475
10476         lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10477
10478         return 0;
10479 }
10480
10481 /*
10482  * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
10483  *
10484  * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
10485  * or IOCB for sli-3  function.
10486  * pointer from the lpfc_hba struct.
10487  *
10488  * Return codes:
10489  * IOCB_ERROR - Error
10490  * IOCB_SUCCESS - Success
10491  * IOCB_BUSY - Busy
10492  **/
10493 int
10494 lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
10495                       struct lpfc_iocbq *piocb, uint32_t flag)
10496 {
10497         return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
10498 }
10499
10500 /*
10501  * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10502  *
10503  * This routine wraps the actual lockless version for issusing IOCB function
10504  * pointer from the lpfc_hba struct.
10505  *
10506  * Return codes:
10507  * IOCB_ERROR - Error
10508  * IOCB_SUCCESS - Success
10509  * IOCB_BUSY - Busy
10510  **/
10511 int
10512 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10513                 struct lpfc_iocbq *piocb, uint32_t flag)
10514 {
10515         return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10516 }
10517
10518 static void
10519 __lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
10520                                struct lpfc_vport *vport,
10521                                struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10522                                u32 elscmd, u8 tmo, u8 expect_rsp)
10523 {
10524         struct lpfc_hba *phba = vport->phba;
10525         IOCB_t *cmd;
10526
10527         cmd = &cmdiocbq->iocb;
10528         memset(cmd, 0, sizeof(*cmd));
10529
10530         cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10531         cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10532         cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10533
10534         if (expect_rsp) {
10535                 cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
10536                 cmd->un.elsreq64.remoteID = did; /* DID */
10537                 cmd->ulpCommand = CMD_ELS_REQUEST64_CR;
10538                 cmd->ulpTimeout = tmo;
10539         } else {
10540                 cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
10541                 cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
10542                 cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
10543         }
10544         cmd->ulpBdeCount = 1;
10545         cmd->ulpLe = 1;
10546         cmd->ulpClass = CLASS3;
10547
10548         /* If we have NPIV enabled, we want to send ELS traffic by VPI. */
10549         if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
10550                 if (expect_rsp) {
10551                         cmd->un.elsreq64.myID = vport->fc_myDID;
10552
10553                         /* For ELS_REQUEST64_CR, use the VPI by default */
10554                         cmd->ulpContext = phba->vpi_ids[vport->vpi];
10555                 }
10556
10557                 cmd->ulpCt_h = 0;
10558                 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10559                 if (elscmd == ELS_CMD_ECHO)
10560                         cmd->ulpCt_l = 0; /* context = invalid RPI */
10561                 else
10562                         cmd->ulpCt_l = 1; /* context = VPI */
10563         }
10564 }
10565
10566 static void
10567 __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
10568                                struct lpfc_vport *vport,
10569                                struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10570                                u32 elscmd, u8 tmo, u8 expect_rsp)
10571 {
10572         struct lpfc_hba  *phba = vport->phba;
10573         union lpfc_wqe128 *wqe;
10574         struct ulp_bde64_le *bde;
10575
10576         wqe = &cmdiocbq->wqe;
10577         memset(wqe, 0, sizeof(*wqe));
10578
10579         /* Word 0 - 2 BDE */
10580         bde = (struct ulp_bde64_le *)&wqe->generic.bde;
10581         bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
10582         bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
10583         bde->type_size = cpu_to_le32(cmd_size);
10584         bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10585
10586         if (expect_rsp) {
10587                 bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_CR);
10588
10589                 /* Transfer length */
10590                 wqe->els_req.payload_len = cmd_size;
10591                 wqe->els_req.max_response_payload_len = FCELSSIZE;
10592
10593                 /* DID */
10594                 bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
10595         } else {
10596                 /* DID */
10597                 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
10598
10599                 /* Transfer length */
10600                 wqe->xmit_els_rsp.response_payload_len = cmd_size;
10601
10602                 bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
10603                        CMD_XMIT_ELS_RSP64_CX);
10604         }
10605
10606         bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
10607         bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag);
10608         bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
10609
10610         /* If we have NPIV enabled, we want to send ELS traffic by VPI.
10611          * For SLI4, since the driver controls VPIs we also want to include
10612          * all ELS pt2pt protocol traffic as well.
10613          */
10614         if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
10615             (vport->fc_flag & FC_PT2PT)) {
10616                 if (expect_rsp) {
10617                         bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
10618
10619                         /* For ELS_REQUEST64_CR, use the VPI by default */
10620                         bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10621                                phba->vpi_ids[vport->vpi]);
10622                 }
10623
10624                 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10625                 if (elscmd == ELS_CMD_ECHO)
10626                         bf_set(wqe_ct, &wqe->generic.wqe_com, 0);
10627                 else
10628                         bf_set(wqe_ct, &wqe->generic.wqe_com, 1);
10629         }
10630 }
10631
10632 void
10633 lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10634                           struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
10635                           u16 cmd_size, u32 did, u32 elscmd, u8 tmo,
10636                           u8 expect_rsp)
10637 {
10638         phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did,
10639                                           elscmd, tmo, expect_rsp);
10640 }
10641
10642 static void
10643 __lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10644                            u16 rpi, u32 num_entry, u8 tmo)
10645 {
10646         IOCB_t *cmd;
10647
10648         cmd = &cmdiocbq->iocb;
10649         memset(cmd, 0, sizeof(*cmd));
10650
10651         cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10652         cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10653         cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10654         cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64);
10655
10656         cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
10657         cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
10658         cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
10659
10660         cmd->ulpContext = rpi;
10661         cmd->ulpClass = CLASS3;
10662         cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
10663         cmd->ulpBdeCount = 1;
10664         cmd->ulpLe = 1;
10665         cmd->ulpOwner = OWN_CHIP;
10666         cmd->ulpTimeout = tmo;
10667 }
10668
10669 static void
10670 __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10671                            u16 rpi, u32 num_entry, u8 tmo)
10672 {
10673         union lpfc_wqe128 *cmdwqe;
10674         struct ulp_bde64_le *bde, *bpl;
10675         u32 xmit_len = 0, total_len = 0, size, type, i;
10676
10677         cmdwqe = &cmdiocbq->wqe;
10678         memset(cmdwqe, 0, sizeof(*cmdwqe));
10679
10680         /* Calculate total_len and xmit_len */
10681         bpl = (struct ulp_bde64_le *)bmp->virt;
10682         for (i = 0; i < num_entry; i++) {
10683                 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10684                 total_len += size;
10685         }
10686         for (i = 0; i < num_entry; i++) {
10687                 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10688                 type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK;
10689                 if (type != ULP_BDE64_TYPE_BDE_64)
10690                         break;
10691                 xmit_len += size;
10692         }
10693
10694         /* Words 0 - 2 */
10695         bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
10696         bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
10697         bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
10698         bde->type_size = cpu_to_le32(xmit_len);
10699         bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BLP_64);
10700
10701         /* Word 3 */
10702         cmdwqe->gen_req.request_payload_len = xmit_len;
10703
10704         /* Word 5 */
10705         bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT);
10706         bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
10707         bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1);
10708         bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1);
10709
10710         /* Word 6 */
10711         bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi);
10712
10713         /* Word 7 */
10714         bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo);
10715         bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3);
10716         bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR);
10717         bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI);
10718
10719         /* Word 12 */
10720         cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len;
10721 }
10722
10723 void
10724 lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10725                       struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo)
10726 {
10727         phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo);
10728 }
10729
10730 static void
10731 __lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq,
10732                               struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
10733                               u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
10734 {
10735         IOCB_t *icmd;
10736
10737         icmd = &cmdiocbq->iocb;
10738         memset(icmd, 0, sizeof(*icmd));
10739
10740         icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10741         icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
10742         icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10743         icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
10744         icmd->un.xseq64.w5.hcsw.Fctl = LA;
10745         if (last_seq)
10746                 icmd->un.xseq64.w5.hcsw.Fctl |= LS;
10747         icmd->un.xseq64.w5.hcsw.Dfctl = 0;
10748         icmd->un.xseq64.w5.hcsw.Rctl = rctl;
10749         icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
10750
10751         icmd->ulpBdeCount = 1;
10752         icmd->ulpLe = 1;
10753         icmd->ulpClass = CLASS3;
10754
10755         switch (cr_cx_cmd) {
10756         case CMD_XMIT_SEQUENCE64_CR:
10757                 icmd->ulpContext = rpi;
10758                 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
10759                 break;
10760         case CMD_XMIT_SEQUENCE64_CX:
10761                 icmd->ulpContext = ox_id;
10762                 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
10763                 break;
10764         default:
10765                 break;
10766         }
10767 }
10768
10769 static void
10770 __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
10771                               struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
10772                               u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
10773 {
10774         union lpfc_wqe128 *wqe;
10775         struct ulp_bde64 *bpl;
10776         struct ulp_bde64_le *bde;
10777
10778         wqe = &cmdiocbq->wqe;
10779         memset(wqe, 0, sizeof(*wqe));
10780
10781         /* Words 0 - 2 */
10782         bpl = (struct ulp_bde64 *)bmp->virt;
10783         if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) {
10784                 wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
10785                 wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
10786                 wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
10787         } else {
10788                 bde = (struct ulp_bde64_le *)&wqe->xmit_sequence.bde;
10789                 bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
10790                 bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
10791                 bde->type_size = cpu_to_le32(bpl->tus.f.bdeSize);
10792                 bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10793         }
10794
10795         /* Word 5 */
10796         bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
10797         bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1);
10798         bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
10799         bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl);
10800         bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT);
10801
10802         /* Word 6 */
10803         bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi);
10804
10805         bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
10806                CMD_XMIT_SEQUENCE64_WQE);
10807
10808         /* Word 7 */
10809         bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
10810
10811         /* Word 9 */
10812         bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
10813
10814         /* Word 12 */
10815         if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK))
10816                 wqe->xmit_sequence.xmit_len = full_size;
10817         else
10818                 wqe->xmit_sequence.xmit_len =
10819                         wqe->xmit_sequence.bde.tus.f.bdeSize;
10820 }
10821
10822 void
10823 lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10824                          struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
10825                          u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
10826 {
10827         phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry,
10828                                          rctl, last_seq, cr_cx_cmd);
10829 }
10830
10831 static void
10832 __lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
10833                              u16 iotag, u8 ulp_class, u16 cqid, bool ia)
10834 {
10835         IOCB_t *icmd = NULL;
10836
10837         icmd = &cmdiocbq->iocb;
10838         memset(icmd, 0, sizeof(*icmd));
10839
10840         /* Word 5 */
10841         icmd->un.acxri.abortContextTag = ulp_context;
10842         icmd->un.acxri.abortIoTag = iotag;
10843
10844         if (ia) {
10845                 /* Word 7 */
10846                 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
10847         } else {
10848                 /* Word 3 */
10849                 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
10850
10851                 /* Word 7 */
10852                 icmd->ulpClass = ulp_class;
10853                 icmd->ulpCommand = CMD_ABORT_XRI_CN;
10854         }
10855
10856         /* Word 7 */
10857         icmd->ulpLe = 1;
10858 }
10859
10860 static void
10861 __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
10862                              u16 iotag, u8 ulp_class, u16 cqid, bool ia)
10863 {
10864         union lpfc_wqe128 *wqe;
10865
10866         wqe = &cmdiocbq->wqe;
10867         memset(wqe, 0, sizeof(*wqe));
10868
10869         /* Word 3 */
10870         bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
10871         if (ia)
10872                 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
10873         else
10874                 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
10875
10876         /* Word 7 */
10877         bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE);
10878
10879         /* Word 8 */
10880         wqe->abort_cmd.wqe_com.abort_tag = ulp_context;
10881
10882         /* Word 9 */
10883         bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag);
10884
10885         /* Word 10 */
10886         bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
10887
10888         /* Word 11 */
10889         bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
10890         bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
10891 }
10892
10893 void
10894 lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10895                         u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
10896                         bool ia)
10897 {
10898         phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
10899                                         cqid, ia);
10900 }
10901
10902 /**
10903  * lpfc_sli_api_table_setup - Set up sli api function jump table
10904  * @phba: The hba struct for which this call is being executed.
10905  * @dev_grp: The HBA PCI-Device group number.
10906  *
10907  * This routine sets up the SLI interface API function jump table in @phba
10908  * struct.
10909  * Returns: 0 - success, -ENODEV - failure.
10910  **/
10911 int
10912 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10913 {
10914
10915         switch (dev_grp) {
10916         case LPFC_PCI_DEV_LP:
10917                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10918                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10919                 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
10920                 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3;
10921                 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3;
10922                 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3;
10923                 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3;
10924                 break;
10925         case LPFC_PCI_DEV_OC:
10926                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10927                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10928                 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
10929                 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4;
10930                 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4;
10931                 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4;
10932                 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4;
10933                 break;
10934         default:
10935                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10936                                 "1419 Invalid HBA PCI-device group: 0x%x\n",
10937                                 dev_grp);
10938                 return -ENODEV;
10939         }
10940         return 0;
10941 }
10942
10943 /**
10944  * lpfc_sli4_calc_ring - Calculates which ring to use
10945  * @phba: Pointer to HBA context object.
10946  * @piocb: Pointer to command iocb.
10947  *
10948  * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10949  * hba_wqidx, thus we need to calculate the corresponding ring.
10950  * Since ABORTS must go on the same WQ of the command they are
10951  * aborting, we use command's hba_wqidx.
10952  */
10953 struct lpfc_sli_ring *
10954 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10955 {
10956         struct lpfc_io_buf *lpfc_cmd;
10957
10958         if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10959                 if (unlikely(!phba->sli4_hba.hdwq))
10960                         return NULL;
10961                 /*
10962                  * for abort iocb hba_wqidx should already
10963                  * be setup based on what work queue we used.
10964                  */
10965                 if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
10966                         lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10967                         piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10968                 }
10969                 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
10970         } else {
10971                 if (unlikely(!phba->sli4_hba.els_wq))
10972                         return NULL;
10973                 piocb->hba_wqidx = 0;
10974                 return phba->sli4_hba.els_wq->pring;
10975         }
10976 }
10977
10978 /**
10979  * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10980  * @phba: Pointer to HBA context object.
10981  * @ring_number: Ring number
10982  * @piocb: Pointer to command iocb.
10983  * @flag: Flag indicating if this command can be put into txq.
10984  *
10985  * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10986  * function. This function gets the hbalock and calls
10987  * __lpfc_sli_issue_iocb function and will return the error returned
10988  * by __lpfc_sli_issue_iocb function. This wrapper is used by
10989  * functions which do not hold hbalock.
10990  **/
10991 int
10992 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10993                     struct lpfc_iocbq *piocb, uint32_t flag)
10994 {
10995         struct lpfc_sli_ring *pring;
10996         struct lpfc_queue *eq;
10997         unsigned long iflags;
10998         int rc;
10999
11000         if (phba->sli_rev == LPFC_SLI_REV4) {
11001                 lpfc_sli_prep_wqe(phba, piocb);
11002
11003                 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
11004
11005                 pring = lpfc_sli4_calc_ring(phba, piocb);
11006                 if (unlikely(pring == NULL))
11007                         return IOCB_ERROR;
11008
11009                 spin_lock_irqsave(&pring->ring_lock, iflags);
11010                 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11011                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11012
11013                 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
11014         } else {
11015                 /* For now, SLI2/3 will still use hbalock */
11016                 spin_lock_irqsave(&phba->hbalock, iflags);
11017                 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11018                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11019         }
11020         return rc;
11021 }
11022
11023 /**
11024  * lpfc_extra_ring_setup - Extra ring setup function
11025  * @phba: Pointer to HBA context object.
11026  *
11027  * This function is called while driver attaches with the
11028  * HBA to setup the extra ring. The extra ring is used
11029  * only when driver needs to support target mode functionality
11030  * or IP over FC functionalities.
11031  *
11032  * This function is called with no lock held. SLI3 only.
11033  **/
11034 static int
11035 lpfc_extra_ring_setup( struct lpfc_hba *phba)
11036 {
11037         struct lpfc_sli *psli;
11038         struct lpfc_sli_ring *pring;
11039
11040         psli = &phba->sli;
11041
11042         /* Adjust cmd/rsp ring iocb entries more evenly */
11043
11044         /* Take some away from the FCP ring */
11045         pring = &psli->sli3_ring[LPFC_FCP_RING];
11046         pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11047         pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11048         pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11049         pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11050
11051         /* and give them to the extra ring */
11052         pring = &psli->sli3_ring[LPFC_EXTRA_RING];
11053
11054         pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11055         pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11056         pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11057         pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11058
11059         /* Setup default profile for this ring */
11060         pring->iotag_max = 4096;
11061         pring->num_mask = 1;
11062         pring->prt[0].profile = 0;      /* Mask 0 */
11063         pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
11064         pring->prt[0].type = phba->cfg_multi_ring_type;
11065         pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
11066         return 0;
11067 }
11068
11069 static void
11070 lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
11071                              struct lpfc_nodelist *ndlp)
11072 {
11073         unsigned long iflags;
11074         struct lpfc_work_evt  *evtp = &ndlp->recovery_evt;
11075
11076         spin_lock_irqsave(&phba->hbalock, iflags);
11077         if (!list_empty(&evtp->evt_listp)) {
11078                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11079                 return;
11080         }
11081
11082         /* Incrementing the reference count until the queued work is done. */
11083         evtp->evt_arg1  = lpfc_nlp_get(ndlp);
11084         if (!evtp->evt_arg1) {
11085                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11086                 return;
11087         }
11088         evtp->evt = LPFC_EVT_RECOVER_PORT;
11089         list_add_tail(&evtp->evt_listp, &phba->work_list);
11090         spin_unlock_irqrestore(&phba->hbalock, iflags);
11091
11092         lpfc_worker_wake_up(phba);
11093 }
11094
11095 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
11096  * @phba: Pointer to HBA context object.
11097  * @iocbq: Pointer to iocb object.
11098  *
11099  * The async_event handler calls this routine when it receives
11100  * an ASYNC_STATUS_CN event from the port.  The port generates
11101  * this event when an Abort Sequence request to an rport fails
11102  * twice in succession.  The abort could be originated by the
11103  * driver or by the port.  The ABTS could have been for an ELS
11104  * or FCP IO.  The port only generates this event when an ABTS
11105  * fails to complete after one retry.
11106  */
11107 static void
11108 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
11109                           struct lpfc_iocbq *iocbq)
11110 {
11111         struct lpfc_nodelist *ndlp = NULL;
11112         uint16_t rpi = 0, vpi = 0;
11113         struct lpfc_vport *vport = NULL;
11114
11115         /* The rpi in the ulpContext is vport-sensitive. */
11116         vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
11117         rpi = iocbq->iocb.ulpContext;
11118
11119         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11120                         "3092 Port generated ABTS async event "
11121                         "on vpi %d rpi %d status 0x%x\n",
11122                         vpi, rpi, iocbq->iocb.ulpStatus);
11123
11124         vport = lpfc_find_vport_by_vpid(phba, vpi);
11125         if (!vport)
11126                 goto err_exit;
11127         ndlp = lpfc_findnode_rpi(vport, rpi);
11128         if (!ndlp)
11129                 goto err_exit;
11130
11131         if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
11132                 lpfc_sli_abts_recover_port(vport, ndlp);
11133         return;
11134
11135  err_exit:
11136         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11137                         "3095 Event Context not found, no "
11138                         "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
11139                         vpi, rpi, iocbq->iocb.ulpStatus,
11140                         iocbq->iocb.ulpContext);
11141 }
11142
11143 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
11144  * @phba: pointer to HBA context object.
11145  * @ndlp: nodelist pointer for the impacted rport.
11146  * @axri: pointer to the wcqe containing the failed exchange.
11147  *
11148  * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
11149  * port.  The port generates this event when an abort exchange request to an
11150  * rport fails twice in succession with no reply.  The abort could be originated
11151  * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
11152  */
11153 void
11154 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
11155                            struct lpfc_nodelist *ndlp,
11156                            struct sli4_wcqe_xri_aborted *axri)
11157 {
11158         uint32_t ext_status = 0;
11159
11160         if (!ndlp) {
11161                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11162                                 "3115 Node Context not found, driver "
11163                                 "ignoring abts err event\n");
11164                 return;
11165         }
11166
11167         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11168                         "3116 Port generated FCP XRI ABORT event on "
11169                         "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
11170                         ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
11171                         bf_get(lpfc_wcqe_xa_xri, axri),
11172                         bf_get(lpfc_wcqe_xa_status, axri),
11173                         axri->parameter);
11174
11175         /*
11176          * Catch the ABTS protocol failure case.  Older OCe FW releases returned
11177          * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
11178          * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
11179          */
11180         ext_status = axri->parameter & IOERR_PARAM_MASK;
11181         if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
11182             ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
11183                 lpfc_sli_post_recovery_event(phba, ndlp);
11184 }
11185
11186 /**
11187  * lpfc_sli_async_event_handler - ASYNC iocb handler function
11188  * @phba: Pointer to HBA context object.
11189  * @pring: Pointer to driver SLI ring object.
11190  * @iocbq: Pointer to iocb object.
11191  *
11192  * This function is called by the slow ring event handler
11193  * function when there is an ASYNC event iocb in the ring.
11194  * This function is called with no lock held.
11195  * Currently this function handles only temperature related
11196  * ASYNC events. The function decodes the temperature sensor
11197  * event message and posts events for the management applications.
11198  **/
11199 static void
11200 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
11201         struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
11202 {
11203         IOCB_t *icmd;
11204         uint16_t evt_code;
11205         struct temp_event temp_event_data;
11206         struct Scsi_Host *shost;
11207         uint32_t *iocb_w;
11208
11209         icmd = &iocbq->iocb;
11210         evt_code = icmd->un.asyncstat.evt_code;
11211
11212         switch (evt_code) {
11213         case ASYNC_TEMP_WARN:
11214         case ASYNC_TEMP_SAFE:
11215                 temp_event_data.data = (uint32_t) icmd->ulpContext;
11216                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
11217                 if (evt_code == ASYNC_TEMP_WARN) {
11218                         temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
11219                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11220                                 "0347 Adapter is very hot, please take "
11221                                 "corrective action. temperature : %d Celsius\n",
11222                                 (uint32_t) icmd->ulpContext);
11223                 } else {
11224                         temp_event_data.event_code = LPFC_NORMAL_TEMP;
11225                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11226                                 "0340 Adapter temperature is OK now. "
11227                                 "temperature : %d Celsius\n",
11228                                 (uint32_t) icmd->ulpContext);
11229                 }
11230
11231                 /* Send temperature change event to applications */
11232                 shost = lpfc_shost_from_vport(phba->pport);
11233                 fc_host_post_vendor_event(shost, fc_get_event_number(),
11234                         sizeof(temp_event_data), (char *) &temp_event_data,
11235                         LPFC_NL_VENDOR_ID);
11236                 break;
11237         case ASYNC_STATUS_CN:
11238                 lpfc_sli_abts_err_handler(phba, iocbq);
11239                 break;
11240         default:
11241                 iocb_w = (uint32_t *) icmd;
11242                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11243                         "0346 Ring %d handler: unexpected ASYNC_STATUS"
11244                         " evt_code 0x%x\n"
11245                         "W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
11246                         "W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
11247                         "W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
11248                         "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
11249                         pring->ringno, icmd->un.asyncstat.evt_code,
11250                         iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
11251                         iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
11252                         iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
11253                         iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
11254
11255                 break;
11256         }
11257 }
11258
11259
11260 /**
11261  * lpfc_sli4_setup - SLI ring setup function
11262  * @phba: Pointer to HBA context object.
11263  *
11264  * lpfc_sli_setup sets up rings of the SLI interface with
11265  * number of iocbs per ring and iotags. This function is
11266  * called while driver attach to the HBA and before the
11267  * interrupts are enabled. So there is no need for locking.
11268  *
11269  * This function always returns 0.
11270  **/
11271 int
11272 lpfc_sli4_setup(struct lpfc_hba *phba)
11273 {
11274         struct lpfc_sli_ring *pring;
11275
11276         pring = phba->sli4_hba.els_wq->pring;
11277         pring->num_mask = LPFC_MAX_RING_MASK;
11278         pring->prt[0].profile = 0;      /* Mask 0 */
11279         pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11280         pring->prt[0].type = FC_TYPE_ELS;
11281         pring->prt[0].lpfc_sli_rcv_unsol_event =
11282             lpfc_els_unsol_event;
11283         pring->prt[1].profile = 0;      /* Mask 1 */
11284         pring->prt[1].rctl = FC_RCTL_ELS_REP;
11285         pring->prt[1].type = FC_TYPE_ELS;
11286         pring->prt[1].lpfc_sli_rcv_unsol_event =
11287             lpfc_els_unsol_event;
11288         pring->prt[2].profile = 0;      /* Mask 2 */
11289         /* NameServer Inquiry */
11290         pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11291         /* NameServer */
11292         pring->prt[2].type = FC_TYPE_CT;
11293         pring->prt[2].lpfc_sli_rcv_unsol_event =
11294             lpfc_ct_unsol_event;
11295         pring->prt[3].profile = 0;      /* Mask 3 */
11296         /* NameServer response */
11297         pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11298         /* NameServer */
11299         pring->prt[3].type = FC_TYPE_CT;
11300         pring->prt[3].lpfc_sli_rcv_unsol_event =
11301             lpfc_ct_unsol_event;
11302         return 0;
11303 }
11304
11305 /**
11306  * lpfc_sli_setup - SLI ring setup function
11307  * @phba: Pointer to HBA context object.
11308  *
11309  * lpfc_sli_setup sets up rings of the SLI interface with
11310  * number of iocbs per ring and iotags. This function is
11311  * called while driver attach to the HBA and before the
11312  * interrupts are enabled. So there is no need for locking.
11313  *
11314  * This function always returns 0. SLI3 only.
11315  **/
11316 int
11317 lpfc_sli_setup(struct lpfc_hba *phba)
11318 {
11319         int i, totiocbsize = 0;
11320         struct lpfc_sli *psli = &phba->sli;
11321         struct lpfc_sli_ring *pring;
11322
11323         psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
11324         psli->sli_flag = 0;
11325
11326         psli->iocbq_lookup = NULL;
11327         psli->iocbq_lookup_len = 0;
11328         psli->last_iotag = 0;
11329
11330         for (i = 0; i < psli->num_rings; i++) {
11331                 pring = &psli->sli3_ring[i];
11332                 switch (i) {
11333                 case LPFC_FCP_RING:     /* ring 0 - FCP */
11334                         /* numCiocb and numRiocb are used in config_port */
11335                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
11336                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
11337                         pring->sli.sli3.numCiocb +=
11338                                 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11339                         pring->sli.sli3.numRiocb +=
11340                                 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11341                         pring->sli.sli3.numCiocb +=
11342                                 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11343                         pring->sli.sli3.numRiocb +=
11344                                 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11345                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11346                                                         SLI3_IOCB_CMD_SIZE :
11347                                                         SLI2_IOCB_CMD_SIZE;
11348                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11349                                                         SLI3_IOCB_RSP_SIZE :
11350                                                         SLI2_IOCB_RSP_SIZE;
11351                         pring->iotag_ctr = 0;
11352                         pring->iotag_max =
11353                             (phba->cfg_hba_queue_depth * 2);
11354                         pring->fast_iotag = pring->iotag_max;
11355                         pring->num_mask = 0;
11356                         break;
11357                 case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
11358                         /* numCiocb and numRiocb are used in config_port */
11359                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
11360                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
11361                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11362                                                         SLI3_IOCB_CMD_SIZE :
11363                                                         SLI2_IOCB_CMD_SIZE;
11364                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11365                                                         SLI3_IOCB_RSP_SIZE :
11366                                                         SLI2_IOCB_RSP_SIZE;
11367                         pring->iotag_max = phba->cfg_hba_queue_depth;
11368                         pring->num_mask = 0;
11369                         break;
11370                 case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
11371                         /* numCiocb and numRiocb are used in config_port */
11372                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
11373                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
11374                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11375                                                         SLI3_IOCB_CMD_SIZE :
11376                                                         SLI2_IOCB_CMD_SIZE;
11377                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11378                                                         SLI3_IOCB_RSP_SIZE :
11379                                                         SLI2_IOCB_RSP_SIZE;
11380                         pring->fast_iotag = 0;
11381                         pring->iotag_ctr = 0;
11382                         pring->iotag_max = 4096;
11383                         pring->lpfc_sli_rcv_async_status =
11384                                 lpfc_sli_async_event_handler;
11385                         pring->num_mask = LPFC_MAX_RING_MASK;
11386                         pring->prt[0].profile = 0;      /* Mask 0 */
11387                         pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11388                         pring->prt[0].type = FC_TYPE_ELS;
11389                         pring->prt[0].lpfc_sli_rcv_unsol_event =
11390                             lpfc_els_unsol_event;
11391                         pring->prt[1].profile = 0;      /* Mask 1 */
11392                         pring->prt[1].rctl = FC_RCTL_ELS_REP;
11393                         pring->prt[1].type = FC_TYPE_ELS;
11394                         pring->prt[1].lpfc_sli_rcv_unsol_event =
11395                             lpfc_els_unsol_event;
11396                         pring->prt[2].profile = 0;      /* Mask 2 */
11397                         /* NameServer Inquiry */
11398                         pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11399                         /* NameServer */
11400                         pring->prt[2].type = FC_TYPE_CT;
11401                         pring->prt[2].lpfc_sli_rcv_unsol_event =
11402                             lpfc_ct_unsol_event;
11403                         pring->prt[3].profile = 0;      /* Mask 3 */
11404                         /* NameServer response */
11405                         pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11406                         /* NameServer */
11407                         pring->prt[3].type = FC_TYPE_CT;
11408                         pring->prt[3].lpfc_sli_rcv_unsol_event =
11409                             lpfc_ct_unsol_event;
11410                         break;
11411                 }
11412                 totiocbsize += (pring->sli.sli3.numCiocb *
11413                         pring->sli.sli3.sizeCiocb) +
11414                         (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
11415         }
11416         if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
11417                 /* Too many cmd / rsp ring entries in SLI2 SLIM */
11418                 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
11419                        "SLI2 SLIM Data: x%x x%lx\n",
11420                        phba->brd_no, totiocbsize,
11421                        (unsigned long) MAX_SLIM_IOCB_SIZE);
11422         }
11423         if (phba->cfg_multi_ring_support == 2)
11424                 lpfc_extra_ring_setup(phba);
11425
11426         return 0;
11427 }
11428
11429 /**
11430  * lpfc_sli4_queue_init - Queue initialization function
11431  * @phba: Pointer to HBA context object.
11432  *
11433  * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
11434  * ring. This function also initializes ring indices of each ring.
11435  * This function is called during the initialization of the SLI
11436  * interface of an HBA.
11437  * This function is called with no lock held and always returns
11438  * 1.
11439  **/
11440 void
11441 lpfc_sli4_queue_init(struct lpfc_hba *phba)
11442 {
11443         struct lpfc_sli *psli;
11444         struct lpfc_sli_ring *pring;
11445         int i;
11446
11447         psli = &phba->sli;
11448         spin_lock_irq(&phba->hbalock);
11449         INIT_LIST_HEAD(&psli->mboxq);
11450         INIT_LIST_HEAD(&psli->mboxq_cmpl);
11451         /* Initialize list headers for txq and txcmplq as double linked lists */
11452         for (i = 0; i < phba->cfg_hdw_queue; i++) {
11453                 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
11454                 pring->flag = 0;
11455                 pring->ringno = LPFC_FCP_RING;
11456                 pring->txcmplq_cnt = 0;
11457                 INIT_LIST_HEAD(&pring->txq);
11458                 INIT_LIST_HEAD(&pring->txcmplq);
11459                 INIT_LIST_HEAD(&pring->iocb_continueq);
11460                 spin_lock_init(&pring->ring_lock);
11461         }
11462         pring = phba->sli4_hba.els_wq->pring;
11463         pring->flag = 0;
11464         pring->ringno = LPFC_ELS_RING;
11465         pring->txcmplq_cnt = 0;
11466         INIT_LIST_HEAD(&pring->txq);
11467         INIT_LIST_HEAD(&pring->txcmplq);
11468         INIT_LIST_HEAD(&pring->iocb_continueq);
11469         spin_lock_init(&pring->ring_lock);
11470
11471         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11472                 pring = phba->sli4_hba.nvmels_wq->pring;
11473                 pring->flag = 0;
11474                 pring->ringno = LPFC_ELS_RING;
11475                 pring->txcmplq_cnt = 0;
11476                 INIT_LIST_HEAD(&pring->txq);
11477                 INIT_LIST_HEAD(&pring->txcmplq);
11478                 INIT_LIST_HEAD(&pring->iocb_continueq);
11479                 spin_lock_init(&pring->ring_lock);
11480         }
11481
11482         spin_unlock_irq(&phba->hbalock);
11483 }
11484
11485 /**
11486  * lpfc_sli_queue_init - Queue initialization function
11487  * @phba: Pointer to HBA context object.
11488  *
11489  * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
11490  * ring. This function also initializes ring indices of each ring.
11491  * This function is called during the initialization of the SLI
11492  * interface of an HBA.
11493  * This function is called with no lock held and always returns
11494  * 1.
11495  **/
11496 void
11497 lpfc_sli_queue_init(struct lpfc_hba *phba)
11498 {
11499         struct lpfc_sli *psli;
11500         struct lpfc_sli_ring *pring;
11501         int i;
11502
11503         psli = &phba->sli;
11504         spin_lock_irq(&phba->hbalock);
11505         INIT_LIST_HEAD(&psli->mboxq);
11506         INIT_LIST_HEAD(&psli->mboxq_cmpl);
11507         /* Initialize list headers for txq and txcmplq as double linked lists */
11508         for (i = 0; i < psli->num_rings; i++) {
11509                 pring = &psli->sli3_ring[i];
11510                 pring->ringno = i;
11511                 pring->sli.sli3.next_cmdidx  = 0;
11512                 pring->sli.sli3.local_getidx = 0;
11513                 pring->sli.sli3.cmdidx = 0;
11514                 INIT_LIST_HEAD(&pring->iocb_continueq);
11515                 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
11516                 INIT_LIST_HEAD(&pring->postbufq);
11517                 pring->flag = 0;
11518                 INIT_LIST_HEAD(&pring->txq);
11519                 INIT_LIST_HEAD(&pring->txcmplq);
11520                 spin_lock_init(&pring->ring_lock);
11521         }
11522         spin_unlock_irq(&phba->hbalock);
11523 }
11524
11525 /**
11526  * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
11527  * @phba: Pointer to HBA context object.
11528  *
11529  * This routine flushes the mailbox command subsystem. It will unconditionally
11530  * flush all the mailbox commands in the three possible stages in the mailbox
11531  * command sub-system: pending mailbox command queue; the outstanding mailbox
11532  * command; and completed mailbox command queue. It is caller's responsibility
11533  * to make sure that the driver is in the proper state to flush the mailbox
11534  * command sub-system. Namely, the posting of mailbox commands into the
11535  * pending mailbox command queue from the various clients must be stopped;
11536  * either the HBA is in a state that it will never works on the outstanding
11537  * mailbox command (such as in EEH or ERATT conditions) or the outstanding
11538  * mailbox command has been completed.
11539  **/
11540 static void
11541 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11542 {
11543         LIST_HEAD(completions);
11544         struct lpfc_sli *psli = &phba->sli;
11545         LPFC_MBOXQ_t *pmb;
11546         unsigned long iflag;
11547
11548         /* Disable softirqs, including timers from obtaining phba->hbalock */
11549         local_bh_disable();
11550
11551         /* Flush all the mailbox commands in the mbox system */
11552         spin_lock_irqsave(&phba->hbalock, iflag);
11553
11554         /* The pending mailbox command queue */
11555         list_splice_init(&phba->sli.mboxq, &completions);
11556         /* The outstanding active mailbox command */
11557         if (psli->mbox_active) {
11558                 list_add_tail(&psli->mbox_active->list, &completions);
11559                 psli->mbox_active = NULL;
11560                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11561         }
11562         /* The completed mailbox command queue */
11563         list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11564         spin_unlock_irqrestore(&phba->hbalock, iflag);
11565
11566         /* Enable softirqs again, done with phba->hbalock */
11567         local_bh_enable();
11568
11569         /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
11570         while (!list_empty(&completions)) {
11571                 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11572                 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11573                 if (pmb->mbox_cmpl)
11574                         pmb->mbox_cmpl(phba, pmb);
11575         }
11576 }
11577
11578 /**
11579  * lpfc_sli_host_down - Vport cleanup function
11580  * @vport: Pointer to virtual port object.
11581  *
11582  * lpfc_sli_host_down is called to clean up the resources
11583  * associated with a vport before destroying virtual
11584  * port data structures.
11585  * This function does following operations:
11586  * - Free discovery resources associated with this virtual
11587  *   port.
11588  * - Free iocbs associated with this virtual port in
11589  *   the txq.
11590  * - Send abort for all iocb commands associated with this
11591  *   vport in txcmplq.
11592  *
11593  * This function is called with no lock held and always returns 1.
11594  **/
11595 int
11596 lpfc_sli_host_down(struct lpfc_vport *vport)
11597 {
11598         LIST_HEAD(completions);
11599         struct lpfc_hba *phba = vport->phba;
11600         struct lpfc_sli *psli = &phba->sli;
11601         struct lpfc_queue *qp = NULL;
11602         struct lpfc_sli_ring *pring;
11603         struct lpfc_iocbq *iocb, *next_iocb;
11604         int i;
11605         unsigned long flags = 0;
11606         uint16_t prev_pring_flag;
11607
11608         lpfc_cleanup_discovery_resources(vport);
11609
11610         spin_lock_irqsave(&phba->hbalock, flags);
11611
11612         /*
11613          * Error everything on the txq since these iocbs
11614          * have not been given to the FW yet.
11615          * Also issue ABTS for everything on the txcmplq
11616          */
11617         if (phba->sli_rev != LPFC_SLI_REV4) {
11618                 for (i = 0; i < psli->num_rings; i++) {
11619                         pring = &psli->sli3_ring[i];
11620                         prev_pring_flag = pring->flag;
11621                         /* Only slow rings */
11622                         if (pring->ringno == LPFC_ELS_RING) {
11623                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11624                                 /* Set the lpfc data pending flag */
11625                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
11626                         }
11627                         list_for_each_entry_safe(iocb, next_iocb,
11628                                                  &pring->txq, list) {
11629                                 if (iocb->vport != vport)
11630                                         continue;
11631                                 list_move_tail(&iocb->list, &completions);
11632                         }
11633                         list_for_each_entry_safe(iocb, next_iocb,
11634                                                  &pring->txcmplq, list) {
11635                                 if (iocb->vport != vport)
11636                                         continue;
11637                                 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11638                                                            NULL);
11639                         }
11640                         pring->flag = prev_pring_flag;
11641                 }
11642         } else {
11643                 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11644                         pring = qp->pring;
11645                         if (!pring)
11646                                 continue;
11647                         if (pring == phba->sli4_hba.els_wq->pring) {
11648                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11649                                 /* Set the lpfc data pending flag */
11650                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
11651                         }
11652                         prev_pring_flag = pring->flag;
11653                         spin_lock(&pring->ring_lock);
11654                         list_for_each_entry_safe(iocb, next_iocb,
11655                                                  &pring->txq, list) {
11656                                 if (iocb->vport != vport)
11657                                         continue;
11658                                 list_move_tail(&iocb->list, &completions);
11659                         }
11660                         spin_unlock(&pring->ring_lock);
11661                         list_for_each_entry_safe(iocb, next_iocb,
11662                                                  &pring->txcmplq, list) {
11663                                 if (iocb->vport != vport)
11664                                         continue;
11665                                 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11666                                                            NULL);
11667                         }
11668                         pring->flag = prev_pring_flag;
11669                 }
11670         }
11671         spin_unlock_irqrestore(&phba->hbalock, flags);
11672
11673         /* Make sure HBA is alive */
11674         lpfc_issue_hb_tmo(phba);
11675
11676         /* Cancel all the IOCBs from the completions list */
11677         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11678                               IOERR_SLI_DOWN);
11679         return 1;
11680 }
11681
11682 /**
11683  * lpfc_sli_hba_down - Resource cleanup function for the HBA
11684  * @phba: Pointer to HBA context object.
11685  *
11686  * This function cleans up all iocb, buffers, mailbox commands
11687  * while shutting down the HBA. This function is called with no
11688  * lock held and always returns 1.
11689  * This function does the following to cleanup driver resources:
11690  * - Free discovery resources for each virtual port
11691  * - Cleanup any pending fabric iocbs
11692  * - Iterate through the iocb txq and free each entry
11693  *   in the list.
11694  * - Free up any buffer posted to the HBA
11695  * - Free mailbox commands in the mailbox queue.
11696  **/
11697 int
11698 lpfc_sli_hba_down(struct lpfc_hba *phba)
11699 {
11700         LIST_HEAD(completions);
11701         struct lpfc_sli *psli = &phba->sli;
11702         struct lpfc_queue *qp = NULL;
11703         struct lpfc_sli_ring *pring;
11704         struct lpfc_dmabuf *buf_ptr;
11705         unsigned long flags = 0;
11706         int i;
11707
11708         /* Shutdown the mailbox command sub-system */
11709         lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
11710
11711         lpfc_hba_down_prep(phba);
11712
11713         /* Disable softirqs, including timers from obtaining phba->hbalock */
11714         local_bh_disable();
11715
11716         lpfc_fabric_abort_hba(phba);
11717
11718         spin_lock_irqsave(&phba->hbalock, flags);
11719
11720         /*
11721          * Error everything on the txq since these iocbs
11722          * have not been given to the FW yet.
11723          */
11724         if (phba->sli_rev != LPFC_SLI_REV4) {
11725                 for (i = 0; i < psli->num_rings; i++) {
11726                         pring = &psli->sli3_ring[i];
11727                         /* Only slow rings */
11728                         if (pring->ringno == LPFC_ELS_RING) {
11729                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11730                                 /* Set the lpfc data pending flag */
11731                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
11732                         }
11733                         list_splice_init(&pring->txq, &completions);
11734                 }
11735         } else {
11736                 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11737                         pring = qp->pring;
11738                         if (!pring)
11739                                 continue;
11740                         spin_lock(&pring->ring_lock);
11741                         list_splice_init(&pring->txq, &completions);
11742                         spin_unlock(&pring->ring_lock);
11743                         if (pring == phba->sli4_hba.els_wq->pring) {
11744                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11745                                 /* Set the lpfc data pending flag */
11746                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
11747                         }
11748                 }
11749         }
11750         spin_unlock_irqrestore(&phba->hbalock, flags);
11751
11752         /* Cancel all the IOCBs from the completions list */
11753         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11754                               IOERR_SLI_DOWN);
11755
11756         spin_lock_irqsave(&phba->hbalock, flags);
11757         list_splice_init(&phba->elsbuf, &completions);
11758         phba->elsbuf_cnt = 0;
11759         phba->elsbuf_prev_cnt = 0;
11760         spin_unlock_irqrestore(&phba->hbalock, flags);
11761
11762         while (!list_empty(&completions)) {
11763                 list_remove_head(&completions, buf_ptr,
11764                         struct lpfc_dmabuf, list);
11765                 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
11766                 kfree(buf_ptr);
11767         }
11768
11769         /* Enable softirqs again, done with phba->hbalock */
11770         local_bh_enable();
11771
11772         /* Return any active mbox cmds */
11773         del_timer_sync(&psli->mbox_tmo);
11774
11775         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
11776         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11777         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
11778
11779         return 1;
11780 }
11781
11782 /**
11783  * lpfc_sli_pcimem_bcopy - SLI memory copy function
11784  * @srcp: Source memory pointer.
11785  * @destp: Destination memory pointer.
11786  * @cnt: Number of words required to be copied.
11787  *
11788  * This function is used for copying data between driver memory
11789  * and the SLI memory. This function also changes the endianness
11790  * of each word if native endianness is different from SLI
11791  * endianness. This function can be called with or without
11792  * lock.
11793  **/
11794 void
11795 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
11796 {
11797         uint32_t *src = srcp;
11798         uint32_t *dest = destp;
11799         uint32_t ldata;
11800         int i;
11801
11802         for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
11803                 ldata = *src;
11804                 ldata = le32_to_cpu(ldata);
11805                 *dest = ldata;
11806                 src++;
11807                 dest++;
11808         }
11809 }
11810
11811
11812 /**
11813  * lpfc_sli_bemem_bcopy - SLI memory copy function
11814  * @srcp: Source memory pointer.
11815  * @destp: Destination memory pointer.
11816  * @cnt: Number of words required to be copied.
11817  *
11818  * This function is used for copying data between a data structure
11819  * with big endian representation to local endianness.
11820  * This function can be called with or without lock.
11821  **/
11822 void
11823 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
11824 {
11825         uint32_t *src = srcp;
11826         uint32_t *dest = destp;
11827         uint32_t ldata;
11828         int i;
11829
11830         for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
11831                 ldata = *src;
11832                 ldata = be32_to_cpu(ldata);
11833                 *dest = ldata;
11834                 src++;
11835                 dest++;
11836         }
11837 }
11838
11839 /**
11840  * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
11841  * @phba: Pointer to HBA context object.
11842  * @pring: Pointer to driver SLI ring object.
11843  * @mp: Pointer to driver buffer object.
11844  *
11845  * This function is called with no lock held.
11846  * It always return zero after adding the buffer to the postbufq
11847  * buffer list.
11848  **/
11849 int
11850 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11851                          struct lpfc_dmabuf *mp)
11852 {
11853         /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
11854            later */
11855         spin_lock_irq(&phba->hbalock);
11856         list_add_tail(&mp->list, &pring->postbufq);
11857         pring->postbufq_cnt++;
11858         spin_unlock_irq(&phba->hbalock);
11859         return 0;
11860 }
11861
11862 /**
11863  * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
11864  * @phba: Pointer to HBA context object.
11865  *
11866  * When HBQ is enabled, buffers are searched based on tags. This function
11867  * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
11868  * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
11869  * does not conflict with tags of buffer posted for unsolicited events.
11870  * The function returns the allocated tag. The function is called with
11871  * no locks held.
11872  **/
11873 uint32_t
11874 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
11875 {
11876         spin_lock_irq(&phba->hbalock);
11877         phba->buffer_tag_count++;
11878         /*
11879          * Always set the QUE_BUFTAG_BIT to distiguish between
11880          * a tag assigned by HBQ.
11881          */
11882         phba->buffer_tag_count |= QUE_BUFTAG_BIT;
11883         spin_unlock_irq(&phba->hbalock);
11884         return phba->buffer_tag_count;
11885 }
11886
11887 /**
11888  * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
11889  * @phba: Pointer to HBA context object.
11890  * @pring: Pointer to driver SLI ring object.
11891  * @tag: Buffer tag.
11892  *
11893  * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
11894  * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
11895  * iocb is posted to the response ring with the tag of the buffer.
11896  * This function searches the pring->postbufq list using the tag
11897  * to find buffer associated with CMD_IOCB_RET_XRI64_CX
11898  * iocb. If the buffer is found then lpfc_dmabuf object of the
11899  * buffer is returned to the caller else NULL is returned.
11900  * This function is called with no lock held.
11901  **/
11902 struct lpfc_dmabuf *
11903 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11904                         uint32_t tag)
11905 {
11906         struct lpfc_dmabuf *mp, *next_mp;
11907         struct list_head *slp = &pring->postbufq;
11908
11909         /* Search postbufq, from the beginning, looking for a match on tag */
11910         spin_lock_irq(&phba->hbalock);
11911         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11912                 if (mp->buffer_tag == tag) {
11913                         list_del_init(&mp->list);
11914                         pring->postbufq_cnt--;
11915                         spin_unlock_irq(&phba->hbalock);
11916                         return mp;
11917                 }
11918         }
11919
11920         spin_unlock_irq(&phba->hbalock);
11921         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11922                         "0402 Cannot find virtual addr for buffer tag on "
11923                         "ring %d Data x%lx x%px x%px x%x\n",
11924                         pring->ringno, (unsigned long) tag,
11925                         slp->next, slp->prev, pring->postbufq_cnt);
11926
11927         return NULL;
11928 }
11929
11930 /**
11931  * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
11932  * @phba: Pointer to HBA context object.
11933  * @pring: Pointer to driver SLI ring object.
11934  * @phys: DMA address of the buffer.
11935  *
11936  * This function searches the buffer list using the dma_address
11937  * of unsolicited event to find the driver's lpfc_dmabuf object
11938  * corresponding to the dma_address. The function returns the
11939  * lpfc_dmabuf object if a buffer is found else it returns NULL.
11940  * This function is called by the ct and els unsolicited event
11941  * handlers to get the buffer associated with the unsolicited
11942  * event.
11943  *
11944  * This function is called with no lock held.
11945  **/
11946 struct lpfc_dmabuf *
11947 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11948                          dma_addr_t phys)
11949 {
11950         struct lpfc_dmabuf *mp, *next_mp;
11951         struct list_head *slp = &pring->postbufq;
11952
11953         /* Search postbufq, from the beginning, looking for a match on phys */
11954         spin_lock_irq(&phba->hbalock);
11955         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11956                 if (mp->phys == phys) {
11957                         list_del_init(&mp->list);
11958                         pring->postbufq_cnt--;
11959                         spin_unlock_irq(&phba->hbalock);
11960                         return mp;
11961                 }
11962         }
11963
11964         spin_unlock_irq(&phba->hbalock);
11965         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11966                         "0410 Cannot find virtual addr for mapped buf on "
11967                         "ring %d Data x%llx x%px x%px x%x\n",
11968                         pring->ringno, (unsigned long long)phys,
11969                         slp->next, slp->prev, pring->postbufq_cnt);
11970         return NULL;
11971 }
11972
11973 /**
11974  * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
11975  * @phba: Pointer to HBA context object.
11976  * @cmdiocb: Pointer to driver command iocb object.
11977  * @rspiocb: Pointer to driver response iocb object.
11978  *
11979  * This function is the completion handler for the abort iocbs for
11980  * ELS commands. This function is called from the ELS ring event
11981  * handler with no lock held. This function frees memory resources
11982  * associated with the abort iocb.
11983  **/
11984 static void
11985 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11986                         struct lpfc_iocbq *rspiocb)
11987 {
11988         u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
11989         u32 ulp_word4 = get_job_word4(phba, rspiocb);
11990         u8 cmnd = get_job_cmnd(phba, cmdiocb);
11991
11992         if (ulp_status) {
11993                 /*
11994                  * Assume that the port already completed and returned, or
11995                  * will return the iocb. Just Log the message.
11996                  */
11997                 if (phba->sli_rev < LPFC_SLI_REV4) {
11998                         if (cmnd == CMD_ABORT_XRI_CX &&
11999                             ulp_status == IOSTAT_LOCAL_REJECT &&
12000                             ulp_word4 == IOERR_ABORT_REQUESTED) {
12001                                 goto release_iocb;
12002                         }
12003                 }
12004
12005                 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
12006                                 "0327 Cannot abort els iocb x%px "
12007                                 "with io cmd xri %x abort tag : x%x, "
12008                                 "abort status %x abort code %x\n",
12009                                 cmdiocb, get_job_abtsiotag(phba, cmdiocb),
12010                                 (phba->sli_rev == LPFC_SLI_REV4) ?
12011                                 get_wqe_reqtag(cmdiocb) :
12012                                 cmdiocb->iocb.un.acxri.abortContextTag,
12013                                 ulp_status, ulp_word4);
12014
12015         }
12016 release_iocb:
12017         lpfc_sli_release_iocbq(phba, cmdiocb);
12018         return;
12019 }
12020
12021 /**
12022  * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
12023  * @phba: Pointer to HBA context object.
12024  * @cmdiocb: Pointer to driver command iocb object.
12025  * @rspiocb: Pointer to driver response iocb object.
12026  *
12027  * The function is called from SLI ring event handler with no
12028  * lock held. This function is the completion handler for ELS commands
12029  * which are aborted. The function frees memory resources used for
12030  * the aborted ELS commands.
12031  **/
12032 void
12033 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12034                      struct lpfc_iocbq *rspiocb)
12035 {
12036         struct lpfc_nodelist *ndlp = NULL;
12037         IOCB_t *irsp;
12038         u32 ulp_command, ulp_status, ulp_word4, iotag;
12039
12040         ulp_command = get_job_cmnd(phba, cmdiocb);
12041         ulp_status = get_job_ulpstatus(phba, rspiocb);
12042         ulp_word4 = get_job_word4(phba, rspiocb);
12043
12044         if (phba->sli_rev == LPFC_SLI_REV4) {
12045                 iotag = get_wqe_reqtag(cmdiocb);
12046         } else {
12047                 irsp = &rspiocb->iocb;
12048                 iotag = irsp->ulpIoTag;
12049         }
12050
12051         /* ELS cmd tag <ulpIoTag> completes */
12052         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
12053                         "0139 Ignoring ELS cmd code x%x completion Data: "
12054                         "x%x x%x x%x\n",
12055                         ulp_command, ulp_status, ulp_word4, iotag);
12056
12057         /*
12058          * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
12059          * if exchange is busy.
12060          */
12061         if (ulp_command == CMD_GEN_REQUEST64_CR) {
12062                 ndlp = cmdiocb->context_un.ndlp;
12063                 lpfc_ct_free_iocb(phba, cmdiocb);
12064         } else {
12065                 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
12066                 lpfc_els_free_iocb(phba, cmdiocb);
12067         }
12068
12069         lpfc_nlp_put(ndlp);
12070 }
12071
12072 /**
12073  * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
12074  * @phba: Pointer to HBA context object.
12075  * @pring: Pointer to driver SLI ring object.
12076  * @cmdiocb: Pointer to driver command iocb object.
12077  * @cmpl: completion function.
12078  *
12079  * This function issues an abort iocb for the provided command iocb. In case
12080  * of unloading, the abort iocb will not be issued to commands on the ELS
12081  * ring. Instead, the callback function shall be changed to those commands
12082  * so that nothing happens when them finishes. This function is called with
12083  * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
12084  * when the command iocb is an abort request.
12085  *
12086  **/
12087 int
12088 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12089                            struct lpfc_iocbq *cmdiocb, void *cmpl)
12090 {
12091         struct lpfc_vport *vport = cmdiocb->vport;
12092         struct lpfc_iocbq *abtsiocbp;
12093         int retval = IOCB_ERROR;
12094         unsigned long iflags;
12095         struct lpfc_nodelist *ndlp = NULL;
12096         u32 ulp_command = get_job_cmnd(phba, cmdiocb);
12097         u16 ulp_context, iotag;
12098         bool ia;
12099
12100         /*
12101          * There are certain command types we don't want to abort.  And we
12102          * don't want to abort commands that are already in the process of
12103          * being aborted.
12104          */
12105         if (ulp_command == CMD_ABORT_XRI_WQE ||
12106             ulp_command == CMD_ABORT_XRI_CN ||
12107             ulp_command == CMD_CLOSE_XRI_CN ||
12108             cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
12109                 return IOCB_ABORTING;
12110
12111         if (!pring) {
12112                 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12113                         cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12114                 else
12115                         cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12116                 return retval;
12117         }
12118
12119         /*
12120          * If we're unloading, don't abort iocb on the ELS ring, but change
12121          * the callback so that nothing happens when it finishes.
12122          */
12123         if ((vport->load_flag & FC_UNLOADING) &&
12124             pring->ringno == LPFC_ELS_RING) {
12125                 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12126                         cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12127                 else
12128                         cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12129                 return retval;
12130         }
12131
12132         /* issue ABTS for this IOCB based on iotag */
12133         abtsiocbp = __lpfc_sli_get_iocbq(phba);
12134         if (abtsiocbp == NULL)
12135                 return IOCB_NORESOURCE;
12136
12137         /* This signals the response to set the correct status
12138          * before calling the completion handler
12139          */
12140         cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
12141
12142         if (phba->sli_rev == LPFC_SLI_REV4) {
12143                 ulp_context = cmdiocb->sli4_xritag;
12144                 iotag = abtsiocbp->iotag;
12145         } else {
12146                 iotag = cmdiocb->iocb.ulpIoTag;
12147                 if (pring->ringno == LPFC_ELS_RING) {
12148                         ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
12149                         ulp_context = ndlp->nlp_rpi;
12150                 } else {
12151                         ulp_context = cmdiocb->iocb.ulpContext;
12152                 }
12153         }
12154
12155         if (phba->link_state < LPFC_LINK_UP ||
12156             (phba->sli_rev == LPFC_SLI_REV4 &&
12157              phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN))
12158                 ia = true;
12159         else
12160                 ia = false;
12161
12162         lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
12163                                 cmdiocb->iocb.ulpClass,
12164                                 LPFC_WQE_CQ_ID_DEFAULT, ia);
12165
12166         abtsiocbp->vport = vport;
12167
12168         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12169         abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
12170         if (cmdiocb->cmd_flag & LPFC_IO_FCP)
12171                 abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
12172
12173         if (cmdiocb->cmd_flag & LPFC_IO_FOF)
12174                 abtsiocbp->cmd_flag |= LPFC_IO_FOF;
12175
12176         if (cmpl)
12177                 abtsiocbp->cmd_cmpl = cmpl;
12178         else
12179                 abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
12180         abtsiocbp->vport = vport;
12181
12182         if (phba->sli_rev == LPFC_SLI_REV4) {
12183                 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
12184                 if (unlikely(pring == NULL))
12185                         goto abort_iotag_exit;
12186                 /* Note: both hbalock and ring_lock need to be set here */
12187                 spin_lock_irqsave(&pring->ring_lock, iflags);
12188                 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12189                         abtsiocbp, 0);
12190                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12191         } else {
12192                 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12193                         abtsiocbp, 0);
12194         }
12195
12196 abort_iotag_exit:
12197
12198         lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
12199                          "0339 Abort IO XRI x%x, Original iotag x%x, "
12200                          "abort tag x%x Cmdjob : x%px Abortjob : x%px "
12201                          "retval x%x\n",
12202                          ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
12203                          cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
12204                          retval);
12205         if (retval) {
12206                 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
12207                 __lpfc_sli_release_iocbq(phba, abtsiocbp);
12208         }
12209
12210         /*
12211          * Caller to this routine should check for IOCB_ERROR
12212          * and handle it properly.  This routine no longer removes
12213          * iocb off txcmplq and call compl in case of IOCB_ERROR.
12214          */
12215         return retval;
12216 }
12217
12218 /**
12219  * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
12220  * @phba: pointer to lpfc HBA data structure.
12221  *
12222  * This routine will abort all pending and outstanding iocbs to an HBA.
12223  **/
12224 void
12225 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
12226 {
12227         struct lpfc_sli *psli = &phba->sli;
12228         struct lpfc_sli_ring *pring;
12229         struct lpfc_queue *qp = NULL;
12230         int i;
12231
12232         if (phba->sli_rev != LPFC_SLI_REV4) {
12233                 for (i = 0; i < psli->num_rings; i++) {
12234                         pring = &psli->sli3_ring[i];
12235                         lpfc_sli_abort_iocb_ring(phba, pring);
12236                 }
12237                 return;
12238         }
12239         list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12240                 pring = qp->pring;
12241                 if (!pring)
12242                         continue;
12243                 lpfc_sli_abort_iocb_ring(phba, pring);
12244         }
12245 }
12246
12247 /**
12248  * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts
12249  * @iocbq: Pointer to iocb object.
12250  * @vport: Pointer to driver virtual port object.
12251  *
12252  * This function acts as an iocb filter for functions which abort FCP iocbs.
12253  *
12254  * Return values
12255  * -ENODEV, if a null iocb or vport ptr is encountered
12256  * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as
12257  *          driver already started the abort process, or is an abort iocb itself
12258  * 0, passes criteria for aborting the FCP I/O iocb
12259  **/
12260 static int
12261 lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
12262                                      struct lpfc_vport *vport)
12263 {
12264         u8 ulp_command;
12265
12266         /* No null ptr vports */
12267         if (!iocbq || iocbq->vport != vport)
12268                 return -ENODEV;
12269
12270         /* iocb must be for FCP IO, already exists on the TX cmpl queue,
12271          * can't be premarked as driver aborted, nor be an ABORT iocb itself
12272          */
12273         ulp_command = get_job_cmnd(vport->phba, iocbq);
12274         if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12275             !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
12276             (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12277             (ulp_command == CMD_ABORT_XRI_CN ||
12278              ulp_command == CMD_CLOSE_XRI_CN ||
12279              ulp_command == CMD_ABORT_XRI_WQE))
12280                 return -EINVAL;
12281
12282         return 0;
12283 }
12284
12285 /**
12286  * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target
12287  * @iocbq: Pointer to driver iocb object.
12288  * @vport: Pointer to driver virtual port object.
12289  * @tgt_id: SCSI ID of the target.
12290  * @lun_id: LUN ID of the scsi device.
12291  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
12292  *
12293  * This function acts as an iocb filter for validating a lun/SCSI target/SCSI
12294  * host.
12295  *
12296  * It will return
12297  * 0 if the filtering criteria is met for the given iocb and will return
12298  * 1 if the filtering criteria is not met.
12299  * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
12300  * given iocb is for the SCSI device specified by vport, tgt_id and
12301  * lun_id parameter.
12302  * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
12303  * given iocb is for the SCSI target specified by vport and tgt_id
12304  * parameters.
12305  * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
12306  * given iocb is for the SCSI host associated with the given vport.
12307  * This function is called with no locks held.
12308  **/
12309 static int
12310 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
12311                            uint16_t tgt_id, uint64_t lun_id,
12312                            lpfc_ctx_cmd ctx_cmd)
12313 {
12314         struct lpfc_io_buf *lpfc_cmd;
12315         int rc = 1;
12316
12317         lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12318
12319         if (lpfc_cmd->pCmd == NULL)
12320                 return rc;
12321
12322         switch (ctx_cmd) {
12323         case LPFC_CTX_LUN:
12324                 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12325                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
12326                     (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
12327                         rc = 0;
12328                 break;
12329         case LPFC_CTX_TGT:
12330                 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12331                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
12332                         rc = 0;
12333                 break;
12334         case LPFC_CTX_HOST:
12335                 rc = 0;
12336                 break;
12337         default:
12338                 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
12339                         __func__, ctx_cmd);
12340                 break;
12341         }
12342
12343         return rc;
12344 }
12345
12346 /**
12347  * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
12348  * @vport: Pointer to virtual port.
12349  * @tgt_id: SCSI ID of the target.
12350  * @lun_id: LUN ID of the scsi device.
12351  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12352  *
12353  * This function returns number of FCP commands pending for the vport.
12354  * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
12355  * commands pending on the vport associated with SCSI device specified
12356  * by tgt_id and lun_id parameters.
12357  * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
12358  * commands pending on the vport associated with SCSI target specified
12359  * by tgt_id parameter.
12360  * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
12361  * commands pending on the vport.
12362  * This function returns the number of iocbs which satisfy the filter.
12363  * This function is called without any lock held.
12364  **/
12365 int
12366 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
12367                   lpfc_ctx_cmd ctx_cmd)
12368 {
12369         struct lpfc_hba *phba = vport->phba;
12370         struct lpfc_iocbq *iocbq;
12371         int sum, i;
12372         unsigned long iflags;
12373         u8 ulp_command;
12374
12375         spin_lock_irqsave(&phba->hbalock, iflags);
12376         for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
12377                 iocbq = phba->sli.iocbq_lookup[i];
12378
12379                 if (!iocbq || iocbq->vport != vport)
12380                         continue;
12381                 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12382                     !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
12383                         continue;
12384
12385                 /* Include counting outstanding aborts */
12386                 ulp_command = get_job_cmnd(phba, iocbq);
12387                 if (ulp_command == CMD_ABORT_XRI_CN ||
12388                     ulp_command == CMD_CLOSE_XRI_CN ||
12389                     ulp_command == CMD_ABORT_XRI_WQE) {
12390                         sum++;
12391                         continue;
12392                 }
12393
12394                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12395                                                ctx_cmd) == 0)
12396                         sum++;
12397         }
12398         spin_unlock_irqrestore(&phba->hbalock, iflags);
12399
12400         return sum;
12401 }
12402
12403 /**
12404  * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
12405  * @phba: Pointer to HBA context object
12406  * @cmdiocb: Pointer to command iocb object.
12407  * @rspiocb: Pointer to response iocb object.
12408  *
12409  * This function is called when an aborted FCP iocb completes. This
12410  * function is called by the ring event handler with no lock held.
12411  * This function frees the iocb.
12412  **/
12413 void
12414 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12415                         struct lpfc_iocbq *rspiocb)
12416 {
12417         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12418                         "3096 ABORT_XRI_CX completing on rpi x%x "
12419                         "original iotag x%x, abort cmd iotag x%x "
12420                         "status 0x%x, reason 0x%x\n",
12421                         (phba->sli_rev == LPFC_SLI_REV4) ?
12422                         cmdiocb->sli4_xritag :
12423                         cmdiocb->iocb.un.acxri.abortContextTag,
12424                         get_job_abtsiotag(phba, cmdiocb),
12425                         cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb),
12426                         get_job_word4(phba, rspiocb));
12427         lpfc_sli_release_iocbq(phba, cmdiocb);
12428         return;
12429 }
12430
12431 /**
12432  * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
12433  * @vport: Pointer to virtual port.
12434  * @tgt_id: SCSI ID of the target.
12435  * @lun_id: LUN ID of the scsi device.
12436  * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12437  *
12438  * This function sends an abort command for every SCSI command
12439  * associated with the given virtual port pending on the ring
12440  * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12441  * lpfc_sli_validate_fcp_iocb function.  The ordering for validation before
12442  * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12443  * followed by lpfc_sli_validate_fcp_iocb.
12444  *
12445  * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
12446  * FCP iocbs associated with lun specified by tgt_id and lun_id
12447  * parameters
12448  * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
12449  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12450  * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
12451  * FCP iocbs associated with virtual port.
12452  * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
12453  * lpfc_sli4_calc_ring is used.
12454  * This function returns number of iocbs it failed to abort.
12455  * This function is called with no locks held.
12456  **/
12457 int
12458 lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
12459                     lpfc_ctx_cmd abort_cmd)
12460 {
12461         struct lpfc_hba *phba = vport->phba;
12462         struct lpfc_sli_ring *pring = NULL;
12463         struct lpfc_iocbq *iocbq;
12464         int errcnt = 0, ret_val = 0;
12465         unsigned long iflags;
12466         int i;
12467
12468         /* all I/Os are in process of being flushed */
12469         if (phba->hba_flag & HBA_IOQ_FLUSH)
12470                 return errcnt;
12471
12472         for (i = 1; i <= phba->sli.last_iotag; i++) {
12473                 iocbq = phba->sli.iocbq_lookup[i];
12474
12475                 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12476                         continue;
12477
12478                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12479                                                abort_cmd) != 0)
12480                         continue;
12481
12482                 spin_lock_irqsave(&phba->hbalock, iflags);
12483                 if (phba->sli_rev == LPFC_SLI_REV3) {
12484                         pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12485                 } else if (phba->sli_rev == LPFC_SLI_REV4) {
12486                         pring = lpfc_sli4_calc_ring(phba, iocbq);
12487                 }
12488                 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
12489                                                      lpfc_sli_abort_fcp_cmpl);
12490                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12491                 if (ret_val != IOCB_SUCCESS)
12492                         errcnt++;
12493         }
12494
12495         return errcnt;
12496 }
12497
12498 /**
12499  * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
12500  * @vport: Pointer to virtual port.
12501  * @pring: Pointer to driver SLI ring object.
12502  * @tgt_id: SCSI ID of the target.
12503  * @lun_id: LUN ID of the scsi device.
12504  * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12505  *
12506  * This function sends an abort command for every SCSI command
12507  * associated with the given virtual port pending on the ring
12508  * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12509  * lpfc_sli_validate_fcp_iocb function.  The ordering for validation before
12510  * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12511  * followed by lpfc_sli_validate_fcp_iocb.
12512  *
12513  * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
12514  * FCP iocbs associated with lun specified by tgt_id and lun_id
12515  * parameters
12516  * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
12517  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12518  * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
12519  * FCP iocbs associated with virtual port.
12520  * This function returns number of iocbs it aborted .
12521  * This function is called with no locks held right after a taskmgmt
12522  * command is sent.
12523  **/
12524 int
12525 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12526                         uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12527 {
12528         struct lpfc_hba *phba = vport->phba;
12529         struct lpfc_io_buf *lpfc_cmd;
12530         struct lpfc_iocbq *abtsiocbq;
12531         struct lpfc_nodelist *ndlp = NULL;
12532         struct lpfc_iocbq *iocbq;
12533         int sum, i, ret_val;
12534         unsigned long iflags;
12535         struct lpfc_sli_ring *pring_s4 = NULL;
12536         u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT;
12537         bool ia;
12538
12539         spin_lock_irqsave(&phba->hbalock, iflags);
12540
12541         /* all I/Os are in process of being flushed */
12542         if (phba->hba_flag & HBA_IOQ_FLUSH) {
12543                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12544                 return 0;
12545         }
12546         sum = 0;
12547
12548         for (i = 1; i <= phba->sli.last_iotag; i++) {
12549                 iocbq = phba->sli.iocbq_lookup[i];
12550
12551                 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12552                         continue;
12553
12554                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12555                                                cmd) != 0)
12556                         continue;
12557
12558                 /* Guard against IO completion being called at same time */
12559                 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12560                 spin_lock(&lpfc_cmd->buf_lock);
12561
12562                 if (!lpfc_cmd->pCmd) {
12563                         spin_unlock(&lpfc_cmd->buf_lock);
12564                         continue;
12565                 }
12566
12567                 if (phba->sli_rev == LPFC_SLI_REV4) {
12568                         pring_s4 =
12569                             phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
12570                         if (!pring_s4) {
12571                                 spin_unlock(&lpfc_cmd->buf_lock);
12572                                 continue;
12573                         }
12574                         /* Note: both hbalock and ring_lock must be set here */
12575                         spin_lock(&pring_s4->ring_lock);
12576                 }
12577
12578                 /*
12579                  * If the iocbq is already being aborted, don't take a second
12580                  * action, but do count it.
12581                  */
12582                 if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12583                     !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
12584                         if (phba->sli_rev == LPFC_SLI_REV4)
12585                                 spin_unlock(&pring_s4->ring_lock);
12586                         spin_unlock(&lpfc_cmd->buf_lock);
12587                         continue;
12588                 }
12589
12590                 /* issue ABTS for this IOCB based on iotag */
12591                 abtsiocbq = __lpfc_sli_get_iocbq(phba);
12592                 if (!abtsiocbq) {
12593                         if (phba->sli_rev == LPFC_SLI_REV4)
12594                                 spin_unlock(&pring_s4->ring_lock);
12595                         spin_unlock(&lpfc_cmd->buf_lock);
12596                         continue;
12597                 }
12598
12599                 if (phba->sli_rev == LPFC_SLI_REV4) {
12600                         iotag = abtsiocbq->iotag;
12601                         ulp_context = iocbq->sli4_xritag;
12602                         cqid = lpfc_cmd->hdwq->io_cq_map;
12603                 } else {
12604                         iotag = iocbq->iocb.ulpIoTag;
12605                         if (pring->ringno == LPFC_ELS_RING) {
12606                                 ndlp = (struct lpfc_nodelist *)(iocbq->context1);
12607                                 ulp_context = ndlp->nlp_rpi;
12608                         } else {
12609                                 ulp_context = iocbq->iocb.ulpContext;
12610                         }
12611                 }
12612
12613                 ndlp = lpfc_cmd->rdata->pnode;
12614
12615                 if (lpfc_is_link_up(phba) &&
12616                     (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
12617                         ia = false;
12618                 else
12619                         ia = true;
12620
12621                 lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
12622                                         iocbq->iocb.ulpClass, cqid,
12623                                         ia);
12624
12625                 abtsiocbq->vport = vport;
12626
12627                 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12628                 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
12629                 if (iocbq->cmd_flag & LPFC_IO_FCP)
12630                         abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
12631                 if (iocbq->cmd_flag & LPFC_IO_FOF)
12632                         abtsiocbq->cmd_flag |= LPFC_IO_FOF;
12633
12634                 /* Setup callback routine and issue the command. */
12635                 abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
12636
12637                 /*
12638                  * Indicate the IO is being aborted by the driver and set
12639                  * the caller's flag into the aborted IO.
12640                  */
12641                 iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
12642
12643                 if (phba->sli_rev == LPFC_SLI_REV4) {
12644                         ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12645                                                         abtsiocbq, 0);
12646                         spin_unlock(&pring_s4->ring_lock);
12647                 } else {
12648                         ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12649                                                         abtsiocbq, 0);
12650                 }
12651
12652                 spin_unlock(&lpfc_cmd->buf_lock);
12653
12654                 if (ret_val == IOCB_ERROR)
12655                         __lpfc_sli_release_iocbq(phba, abtsiocbq);
12656                 else
12657                         sum++;
12658         }
12659         spin_unlock_irqrestore(&phba->hbalock, iflags);
12660         return sum;
12661 }
12662
12663 /**
12664  * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
12665  * @phba: Pointer to HBA context object.
12666  * @cmdiocbq: Pointer to command iocb.
12667  * @rspiocbq: Pointer to response iocb.
12668  *
12669  * This function is the completion handler for iocbs issued using
12670  * lpfc_sli_issue_iocb_wait function. This function is called by the
12671  * ring event handler function without any lock held. This function
12672  * can be called from both worker thread context and interrupt
12673  * context. This function also can be called from other thread which
12674  * cleans up the SLI layer objects.
12675  * This function copy the contents of the response iocb to the
12676  * response iocb memory object provided by the caller of
12677  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
12678  * sleeps for the iocb completion.
12679  **/
12680 static void
12681 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
12682                         struct lpfc_iocbq *cmdiocbq,
12683                         struct lpfc_iocbq *rspiocbq)
12684 {
12685         wait_queue_head_t *pdone_q;
12686         unsigned long iflags;
12687         struct lpfc_io_buf *lpfc_cmd;
12688         size_t offset = offsetof(struct lpfc_iocbq, wqe);
12689
12690         spin_lock_irqsave(&phba->hbalock, iflags);
12691         if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
12692
12693                 /*
12694                  * A time out has occurred for the iocb.  If a time out
12695                  * completion handler has been supplied, call it.  Otherwise,
12696                  * just free the iocbq.
12697                  */
12698
12699                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12700                 cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
12701                 cmdiocbq->wait_cmd_cmpl = NULL;
12702                 if (cmdiocbq->cmd_cmpl)
12703                         (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, NULL);
12704                 else
12705                         lpfc_sli_release_iocbq(phba, cmdiocbq);
12706                 return;
12707         }
12708
12709         /* Copy the contents of the local rspiocb into the caller's buffer. */
12710         cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
12711         if (cmdiocbq->context2 && rspiocbq)
12712                 memcpy((char *)cmdiocbq->context2 + offset,
12713                        (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
12714
12715         /* Set the exchange busy flag for task management commands */
12716         if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
12717                 !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
12718                 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
12719                         cur_iocbq);
12720                 if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
12721                         lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
12722                 else
12723                         lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
12724         }
12725
12726         pdone_q = cmdiocbq->context_un.wait_queue;
12727         if (pdone_q)
12728                 wake_up(pdone_q);
12729         spin_unlock_irqrestore(&phba->hbalock, iflags);
12730         return;
12731 }
12732
12733 /**
12734  * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
12735  * @phba: Pointer to HBA context object..
12736  * @piocbq: Pointer to command iocb.
12737  * @flag: Flag to test.
12738  *
12739  * This routine grabs the hbalock and then test the cmd_flag to
12740  * see if the passed in flag is set.
12741  * Returns:
12742  * 1 if flag is set.
12743  * 0 if flag is not set.
12744  **/
12745 static int
12746 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
12747                  struct lpfc_iocbq *piocbq, uint32_t flag)
12748 {
12749         unsigned long iflags;
12750         int ret;
12751
12752         spin_lock_irqsave(&phba->hbalock, iflags);
12753         ret = piocbq->cmd_flag & flag;
12754         spin_unlock_irqrestore(&phba->hbalock, iflags);
12755         return ret;
12756
12757 }
12758
12759 /**
12760  * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
12761  * @phba: Pointer to HBA context object..
12762  * @ring_number: Ring number
12763  * @piocb: Pointer to command iocb.
12764  * @prspiocbq: Pointer to response iocb.
12765  * @timeout: Timeout in number of seconds.
12766  *
12767  * This function issues the iocb to firmware and waits for the
12768  * iocb to complete. The cmd_cmpl field of the shall be used
12769  * to handle iocbs which time out. If the field is NULL, the
12770  * function shall free the iocbq structure.  If more clean up is
12771  * needed, the caller is expected to provide a completion function
12772  * that will provide the needed clean up.  If the iocb command is
12773  * not completed within timeout seconds, the function will either
12774  * free the iocbq structure (if cmd_cmpl == NULL) or execute the
12775  * completion function set in the cmd_cmpl field and then return
12776  * a status of IOCB_TIMEDOUT.  The caller should not free the iocb
12777  * resources if this function returns IOCB_TIMEDOUT.
12778  * The function waits for the iocb completion using an
12779  * non-interruptible wait.
12780  * This function will sleep while waiting for iocb completion.
12781  * So, this function should not be called from any context which
12782  * does not allow sleeping. Due to the same reason, this function
12783  * cannot be called with interrupt disabled.
12784  * This function assumes that the iocb completions occur while
12785  * this function sleep. So, this function cannot be called from
12786  * the thread which process iocb completion for this ring.
12787  * This function clears the cmd_flag of the iocb object before
12788  * issuing the iocb and the iocb completion handler sets this
12789  * flag and wakes this thread when the iocb completes.
12790  * The contents of the response iocb will be copied to prspiocbq
12791  * by the completion handler when the command completes.
12792  * This function returns IOCB_SUCCESS when success.
12793  * This function is called with no lock held.
12794  **/
12795 int
12796 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
12797                          uint32_t ring_number,
12798                          struct lpfc_iocbq *piocb,
12799                          struct lpfc_iocbq *prspiocbq,
12800                          uint32_t timeout)
12801 {
12802         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
12803         long timeleft, timeout_req = 0;
12804         int retval = IOCB_SUCCESS;
12805         uint32_t creg_val;
12806         struct lpfc_iocbq *iocb;
12807         int txq_cnt = 0;
12808         int txcmplq_cnt = 0;
12809         struct lpfc_sli_ring *pring;
12810         unsigned long iflags;
12811         bool iocb_completed = true;
12812
12813         if (phba->sli_rev >= LPFC_SLI_REV4) {
12814                 lpfc_sli_prep_wqe(phba, piocb);
12815
12816                 pring = lpfc_sli4_calc_ring(phba, piocb);
12817         } else
12818                 pring = &phba->sli.sli3_ring[ring_number];
12819         /*
12820          * If the caller has provided a response iocbq buffer, then context2
12821          * is NULL or its an error.
12822          */
12823         if (prspiocbq) {
12824                 if (piocb->context2)
12825                         return IOCB_ERROR;
12826                 piocb->context2 = prspiocbq;
12827         }
12828
12829         piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
12830         piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
12831         piocb->context_un.wait_queue = &done_q;
12832         piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
12833
12834         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12835                 if (lpfc_readl(phba->HCregaddr, &creg_val))
12836                         return IOCB_ERROR;
12837                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
12838                 writel(creg_val, phba->HCregaddr);
12839                 readl(phba->HCregaddr); /* flush */
12840         }
12841
12842         retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
12843                                      SLI_IOCB_RET_IOCB);
12844         if (retval == IOCB_SUCCESS) {
12845                 timeout_req = msecs_to_jiffies(timeout * 1000);
12846                 timeleft = wait_event_timeout(done_q,
12847                                 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
12848                                 timeout_req);
12849                 spin_lock_irqsave(&phba->hbalock, iflags);
12850                 if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
12851
12852                         /*
12853                          * IOCB timed out.  Inform the wake iocb wait
12854                          * completion function and set local status
12855                          */
12856
12857                         iocb_completed = false;
12858                         piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
12859                 }
12860                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12861                 if (iocb_completed) {
12862                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12863                                         "0331 IOCB wake signaled\n");
12864                         /* Note: we are not indicating if the IOCB has a success
12865                          * status or not - that's for the caller to check.
12866                          * IOCB_SUCCESS means just that the command was sent and
12867                          * completed. Not that it completed successfully.
12868                          * */
12869                 } else if (timeleft == 0) {
12870                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12871                                         "0338 IOCB wait timeout error - no "
12872                                         "wake response Data x%x\n", timeout);
12873                         retval = IOCB_TIMEDOUT;
12874                 } else {
12875                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12876                                         "0330 IOCB wake NOT set, "
12877                                         "Data x%x x%lx\n",
12878                                         timeout, (timeleft / jiffies));
12879                         retval = IOCB_TIMEDOUT;
12880                 }
12881         } else if (retval == IOCB_BUSY) {
12882                 if (phba->cfg_log_verbose & LOG_SLI) {
12883                         list_for_each_entry(iocb, &pring->txq, list) {
12884                                 txq_cnt++;
12885                         }
12886                         list_for_each_entry(iocb, &pring->txcmplq, list) {
12887                                 txcmplq_cnt++;
12888                         }
12889                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12890                                 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12891                                 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12892                 }
12893                 return retval;
12894         } else {
12895                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12896                                 "0332 IOCB wait issue failed, Data x%x\n",
12897                                 retval);
12898                 retval = IOCB_ERROR;
12899         }
12900
12901         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12902                 if (lpfc_readl(phba->HCregaddr, &creg_val))
12903                         return IOCB_ERROR;
12904                 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12905                 writel(creg_val, phba->HCregaddr);
12906                 readl(phba->HCregaddr); /* flush */
12907         }
12908
12909         if (prspiocbq)
12910                 piocb->context2 = NULL;
12911
12912         piocb->context_un.wait_queue = NULL;
12913         piocb->cmd_cmpl = NULL;
12914         return retval;
12915 }
12916
12917 /**
12918  * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
12919  * @phba: Pointer to HBA context object.
12920  * @pmboxq: Pointer to driver mailbox object.
12921  * @timeout: Timeout in number of seconds.
12922  *
12923  * This function issues the mailbox to firmware and waits for the
12924  * mailbox command to complete. If the mailbox command is not
12925  * completed within timeout seconds, it returns MBX_TIMEOUT.
12926  * The function waits for the mailbox completion using an
12927  * interruptible wait. If the thread is woken up due to a
12928  * signal, MBX_TIMEOUT error is returned to the caller. Caller
12929  * should not free the mailbox resources, if this function returns
12930  * MBX_TIMEOUT.
12931  * This function will sleep while waiting for mailbox completion.
12932  * So, this function should not be called from any context which
12933  * does not allow sleeping. Due to the same reason, this function
12934  * cannot be called with interrupt disabled.
12935  * This function assumes that the mailbox completion occurs while
12936  * this function sleep. So, this function cannot be called from
12937  * the worker thread which processes mailbox completion.
12938  * This function is called in the context of HBA management
12939  * applications.
12940  * This function returns MBX_SUCCESS when successful.
12941  * This function is called with no lock held.
12942  **/
12943 int
12944 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
12945                          uint32_t timeout)
12946 {
12947         struct completion mbox_done;
12948         int retval;
12949         unsigned long flag;
12950
12951         pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12952         /* setup wake call as IOCB callback */
12953         pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12954
12955         /* setup context3 field to pass wait_queue pointer to wake function  */
12956         init_completion(&mbox_done);
12957         pmboxq->context3 = &mbox_done;
12958         /* now issue the command */
12959         retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12960         if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
12961                 wait_for_completion_timeout(&mbox_done,
12962                                             msecs_to_jiffies(timeout * 1000));
12963
12964                 spin_lock_irqsave(&phba->hbalock, flag);
12965                 pmboxq->context3 = NULL;
12966                 /*
12967                  * if LPFC_MBX_WAKE flag is set the mailbox is completed
12968                  * else do not free the resources.
12969                  */
12970                 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12971                         retval = MBX_SUCCESS;
12972                 } else {
12973                         retval = MBX_TIMEOUT;
12974                         pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12975                 }
12976                 spin_unlock_irqrestore(&phba->hbalock, flag);
12977         }
12978         return retval;
12979 }
12980
12981 /**
12982  * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
12983  * @phba: Pointer to HBA context.
12984  * @mbx_action: Mailbox shutdown options.
12985  *
12986  * This function is called to shutdown the driver's mailbox sub-system.
12987  * It first marks the mailbox sub-system is in a block state to prevent
12988  * the asynchronous mailbox command from issued off the pending mailbox
12989  * command queue. If the mailbox command sub-system shutdown is due to
12990  * HBA error conditions such as EEH or ERATT, this routine shall invoke
12991  * the mailbox sub-system flush routine to forcefully bring down the
12992  * mailbox sub-system. Otherwise, if it is due to normal condition (such
12993  * as with offline or HBA function reset), this routine will wait for the
12994  * outstanding mailbox command to complete before invoking the mailbox
12995  * sub-system flush routine to gracefully bring down mailbox sub-system.
12996  **/
12997 void
12998 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12999 {
13000         struct lpfc_sli *psli = &phba->sli;
13001         unsigned long timeout;
13002
13003         if (mbx_action == LPFC_MBX_NO_WAIT) {
13004                 /* delay 100ms for port state */
13005                 msleep(100);
13006                 lpfc_sli_mbox_sys_flush(phba);
13007                 return;
13008         }
13009         timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
13010
13011         /* Disable softirqs, including timers from obtaining phba->hbalock */
13012         local_bh_disable();
13013
13014         spin_lock_irq(&phba->hbalock);
13015         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13016
13017         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
13018                 /* Determine how long we might wait for the active mailbox
13019                  * command to be gracefully completed by firmware.
13020                  */
13021                 if (phba->sli.mbox_active)
13022                         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
13023                                                 phba->sli.mbox_active) *
13024                                                 1000) + jiffies;
13025                 spin_unlock_irq(&phba->hbalock);
13026
13027                 /* Enable softirqs again, done with phba->hbalock */
13028                 local_bh_enable();
13029
13030                 while (phba->sli.mbox_active) {
13031                         /* Check active mailbox complete status every 2ms */
13032                         msleep(2);
13033                         if (time_after(jiffies, timeout))
13034                                 /* Timeout, let the mailbox flush routine to
13035                                  * forcefully release active mailbox command
13036                                  */
13037                                 break;
13038                 }
13039         } else {
13040                 spin_unlock_irq(&phba->hbalock);
13041
13042                 /* Enable softirqs again, done with phba->hbalock */
13043                 local_bh_enable();
13044         }
13045
13046         lpfc_sli_mbox_sys_flush(phba);
13047 }
13048
13049 /**
13050  * lpfc_sli_eratt_read - read sli-3 error attention events
13051  * @phba: Pointer to HBA context.
13052  *
13053  * This function is called to read the SLI3 device error attention registers
13054  * for possible error attention events. The caller must hold the hostlock
13055  * with spin_lock_irq().
13056  *
13057  * This function returns 1 when there is Error Attention in the Host Attention
13058  * Register and returns 0 otherwise.
13059  **/
13060 static int
13061 lpfc_sli_eratt_read(struct lpfc_hba *phba)
13062 {
13063         uint32_t ha_copy;
13064
13065         /* Read chip Host Attention (HA) register */
13066         if (lpfc_readl(phba->HAregaddr, &ha_copy))
13067                 goto unplug_err;
13068
13069         if (ha_copy & HA_ERATT) {
13070                 /* Read host status register to retrieve error event */
13071                 if (lpfc_sli_read_hs(phba))
13072                         goto unplug_err;
13073
13074                 /* Check if there is a deferred error condition is active */
13075                 if ((HS_FFER1 & phba->work_hs) &&
13076                     ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13077                       HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
13078                         phba->hba_flag |= DEFER_ERATT;
13079                         /* Clear all interrupt enable conditions */
13080                         writel(0, phba->HCregaddr);
13081                         readl(phba->HCregaddr);
13082                 }
13083
13084                 /* Set the driver HA work bitmap */
13085                 phba->work_ha |= HA_ERATT;
13086                 /* Indicate polling handles this ERATT */
13087                 phba->hba_flag |= HBA_ERATT_HANDLED;
13088                 return 1;
13089         }
13090         return 0;
13091
13092 unplug_err:
13093         /* Set the driver HS work bitmap */
13094         phba->work_hs |= UNPLUG_ERR;
13095         /* Set the driver HA work bitmap */
13096         phba->work_ha |= HA_ERATT;
13097         /* Indicate polling handles this ERATT */
13098         phba->hba_flag |= HBA_ERATT_HANDLED;
13099         return 1;
13100 }
13101
13102 /**
13103  * lpfc_sli4_eratt_read - read sli-4 error attention events
13104  * @phba: Pointer to HBA context.
13105  *
13106  * This function is called to read the SLI4 device error attention registers
13107  * for possible error attention events. The caller must hold the hostlock
13108  * with spin_lock_irq().
13109  *
13110  * This function returns 1 when there is Error Attention in the Host Attention
13111  * Register and returns 0 otherwise.
13112  **/
13113 static int
13114 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
13115 {
13116         uint32_t uerr_sta_hi, uerr_sta_lo;
13117         uint32_t if_type, portsmphr;
13118         struct lpfc_register portstat_reg;
13119         u32 logmask;
13120
13121         /*
13122          * For now, use the SLI4 device internal unrecoverable error
13123          * registers for error attention. This can be changed later.
13124          */
13125         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
13126         switch (if_type) {
13127         case LPFC_SLI_INTF_IF_TYPE_0:
13128                 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
13129                         &uerr_sta_lo) ||
13130                         lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
13131                         &uerr_sta_hi)) {
13132                         phba->work_hs |= UNPLUG_ERR;
13133                         phba->work_ha |= HA_ERATT;
13134                         phba->hba_flag |= HBA_ERATT_HANDLED;
13135                         return 1;
13136                 }
13137                 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
13138                     (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
13139                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13140                                         "1423 HBA Unrecoverable error: "
13141                                         "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
13142                                         "ue_mask_lo_reg=0x%x, "
13143                                         "ue_mask_hi_reg=0x%x\n",
13144                                         uerr_sta_lo, uerr_sta_hi,
13145                                         phba->sli4_hba.ue_mask_lo,
13146                                         phba->sli4_hba.ue_mask_hi);
13147                         phba->work_status[0] = uerr_sta_lo;
13148                         phba->work_status[1] = uerr_sta_hi;
13149                         phba->work_ha |= HA_ERATT;
13150                         phba->hba_flag |= HBA_ERATT_HANDLED;
13151                         return 1;
13152                 }
13153                 break;
13154         case LPFC_SLI_INTF_IF_TYPE_2:
13155         case LPFC_SLI_INTF_IF_TYPE_6:
13156                 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
13157                         &portstat_reg.word0) ||
13158                         lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
13159                         &portsmphr)){
13160                         phba->work_hs |= UNPLUG_ERR;
13161                         phba->work_ha |= HA_ERATT;
13162                         phba->hba_flag |= HBA_ERATT_HANDLED;
13163                         return 1;
13164                 }
13165                 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
13166                         phba->work_status[0] =
13167                                 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
13168                         phba->work_status[1] =
13169                                 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
13170                         logmask = LOG_TRACE_EVENT;
13171                         if (phba->work_status[0] ==
13172                                 SLIPORT_ERR1_REG_ERR_CODE_2 &&
13173                             phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
13174                                 logmask = LOG_SLI;
13175                         lpfc_printf_log(phba, KERN_ERR, logmask,
13176                                         "2885 Port Status Event: "
13177                                         "port status reg 0x%x, "
13178                                         "port smphr reg 0x%x, "
13179                                         "error 1=0x%x, error 2=0x%x\n",
13180                                         portstat_reg.word0,
13181                                         portsmphr,
13182                                         phba->work_status[0],
13183                                         phba->work_status[1]);
13184                         phba->work_ha |= HA_ERATT;
13185                         phba->hba_flag |= HBA_ERATT_HANDLED;
13186                         return 1;
13187                 }
13188                 break;
13189         case LPFC_SLI_INTF_IF_TYPE_1:
13190         default:
13191                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13192                                 "2886 HBA Error Attention on unsupported "
13193                                 "if type %d.", if_type);
13194                 return 1;
13195         }
13196
13197         return 0;
13198 }
13199
13200 /**
13201  * lpfc_sli_check_eratt - check error attention events
13202  * @phba: Pointer to HBA context.
13203  *
13204  * This function is called from timer soft interrupt context to check HBA's
13205  * error attention register bit for error attention events.
13206  *
13207  * This function returns 1 when there is Error Attention in the Host Attention
13208  * Register and returns 0 otherwise.
13209  **/
13210 int
13211 lpfc_sli_check_eratt(struct lpfc_hba *phba)
13212 {
13213         uint32_t ha_copy;
13214
13215         /* If somebody is waiting to handle an eratt, don't process it
13216          * here. The brdkill function will do this.
13217          */
13218         if (phba->link_flag & LS_IGNORE_ERATT)
13219                 return 0;
13220
13221         /* Check if interrupt handler handles this ERATT */
13222         spin_lock_irq(&phba->hbalock);
13223         if (phba->hba_flag & HBA_ERATT_HANDLED) {
13224                 /* Interrupt handler has handled ERATT */
13225                 spin_unlock_irq(&phba->hbalock);
13226                 return 0;
13227         }
13228
13229         /*
13230          * If there is deferred error attention, do not check for error
13231          * attention
13232          */
13233         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13234                 spin_unlock_irq(&phba->hbalock);
13235                 return 0;
13236         }
13237
13238         /* If PCI channel is offline, don't process it */
13239         if (unlikely(pci_channel_offline(phba->pcidev))) {
13240                 spin_unlock_irq(&phba->hbalock);
13241                 return 0;
13242         }
13243
13244         switch (phba->sli_rev) {
13245         case LPFC_SLI_REV2:
13246         case LPFC_SLI_REV3:
13247                 /* Read chip Host Attention (HA) register */
13248                 ha_copy = lpfc_sli_eratt_read(phba);
13249                 break;
13250         case LPFC_SLI_REV4:
13251                 /* Read device Uncoverable Error (UERR) registers */
13252                 ha_copy = lpfc_sli4_eratt_read(phba);
13253                 break;
13254         default:
13255                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13256                                 "0299 Invalid SLI revision (%d)\n",
13257                                 phba->sli_rev);
13258                 ha_copy = 0;
13259                 break;
13260         }
13261         spin_unlock_irq(&phba->hbalock);
13262
13263         return ha_copy;
13264 }
13265
13266 /**
13267  * lpfc_intr_state_check - Check device state for interrupt handling
13268  * @phba: Pointer to HBA context.
13269  *
13270  * This inline routine checks whether a device or its PCI slot is in a state
13271  * that the interrupt should be handled.
13272  *
13273  * This function returns 0 if the device or the PCI slot is in a state that
13274  * interrupt should be handled, otherwise -EIO.
13275  */
13276 static inline int
13277 lpfc_intr_state_check(struct lpfc_hba *phba)
13278 {
13279         /* If the pci channel is offline, ignore all the interrupts */
13280         if (unlikely(pci_channel_offline(phba->pcidev)))
13281                 return -EIO;
13282
13283         /* Update device level interrupt statistics */
13284         phba->sli.slistat.sli_intr++;
13285
13286         /* Ignore all interrupts during initialization. */
13287         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
13288                 return -EIO;
13289
13290         return 0;
13291 }
13292
13293 /**
13294  * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
13295  * @irq: Interrupt number.
13296  * @dev_id: The device context pointer.
13297  *
13298  * This function is directly called from the PCI layer as an interrupt
13299  * service routine when device with SLI-3 interface spec is enabled with
13300  * MSI-X multi-message interrupt mode and there are slow-path events in
13301  * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
13302  * interrupt mode, this function is called as part of the device-level
13303  * interrupt handler. When the PCI slot is in error recovery or the HBA
13304  * is undergoing initialization, the interrupt handler will not process
13305  * the interrupt. The link attention and ELS ring attention events are
13306  * handled by the worker thread. The interrupt handler signals the worker
13307  * thread and returns for these events. This function is called without
13308  * any lock held. It gets the hbalock to access and update SLI data
13309  * structures.
13310  *
13311  * This function returns IRQ_HANDLED when interrupt is handled else it
13312  * returns IRQ_NONE.
13313  **/
13314 irqreturn_t
13315 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
13316 {
13317         struct lpfc_hba  *phba;
13318         uint32_t ha_copy, hc_copy;
13319         uint32_t work_ha_copy;
13320         unsigned long status;
13321         unsigned long iflag;
13322         uint32_t control;
13323
13324         MAILBOX_t *mbox, *pmbox;
13325         struct lpfc_vport *vport;
13326         struct lpfc_nodelist *ndlp;
13327         struct lpfc_dmabuf *mp;
13328         LPFC_MBOXQ_t *pmb;
13329         int rc;
13330
13331         /*
13332          * Get the driver's phba structure from the dev_id and
13333          * assume the HBA is not interrupting.
13334          */
13335         phba = (struct lpfc_hba *)dev_id;
13336
13337         if (unlikely(!phba))
13338                 return IRQ_NONE;
13339
13340         /*
13341          * Stuff needs to be attented to when this function is invoked as an
13342          * individual interrupt handler in MSI-X multi-message interrupt mode
13343          */
13344         if (phba->intr_type == MSIX) {
13345                 /* Check device state for handling interrupt */
13346                 if (lpfc_intr_state_check(phba))
13347                         return IRQ_NONE;
13348                 /* Need to read HA REG for slow-path events */
13349                 spin_lock_irqsave(&phba->hbalock, iflag);
13350                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13351                         goto unplug_error;
13352                 /* If somebody is waiting to handle an eratt don't process it
13353                  * here. The brdkill function will do this.
13354                  */
13355                 if (phba->link_flag & LS_IGNORE_ERATT)
13356                         ha_copy &= ~HA_ERATT;
13357                 /* Check the need for handling ERATT in interrupt handler */
13358                 if (ha_copy & HA_ERATT) {
13359                         if (phba->hba_flag & HBA_ERATT_HANDLED)
13360                                 /* ERATT polling has handled ERATT */
13361                                 ha_copy &= ~HA_ERATT;
13362                         else
13363                                 /* Indicate interrupt handler handles ERATT */
13364                                 phba->hba_flag |= HBA_ERATT_HANDLED;
13365                 }
13366
13367                 /*
13368                  * If there is deferred error attention, do not check for any
13369                  * interrupt.
13370                  */
13371                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13372                         spin_unlock_irqrestore(&phba->hbalock, iflag);
13373                         return IRQ_NONE;
13374                 }
13375
13376                 /* Clear up only attention source related to slow-path */
13377                 if (lpfc_readl(phba->HCregaddr, &hc_copy))
13378                         goto unplug_error;
13379
13380                 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
13381                         HC_LAINT_ENA | HC_ERINT_ENA),
13382                         phba->HCregaddr);
13383                 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
13384                         phba->HAregaddr);
13385                 writel(hc_copy, phba->HCregaddr);
13386                 readl(phba->HAregaddr); /* flush */
13387                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13388         } else
13389                 ha_copy = phba->ha_copy;
13390
13391         work_ha_copy = ha_copy & phba->work_ha_mask;
13392
13393         if (work_ha_copy) {
13394                 if (work_ha_copy & HA_LATT) {
13395                         if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
13396                                 /*
13397                                  * Turn off Link Attention interrupts
13398                                  * until CLEAR_LA done
13399                                  */
13400                                 spin_lock_irqsave(&phba->hbalock, iflag);
13401                                 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
13402                                 if (lpfc_readl(phba->HCregaddr, &control))
13403                                         goto unplug_error;
13404                                 control &= ~HC_LAINT_ENA;
13405                                 writel(control, phba->HCregaddr);
13406                                 readl(phba->HCregaddr); /* flush */
13407                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13408                         }
13409                         else
13410                                 work_ha_copy &= ~HA_LATT;
13411                 }
13412
13413                 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
13414                         /*
13415                          * Turn off Slow Rings interrupts, LPFC_ELS_RING is
13416                          * the only slow ring.
13417                          */
13418                         status = (work_ha_copy &
13419                                 (HA_RXMASK  << (4*LPFC_ELS_RING)));
13420                         status >>= (4*LPFC_ELS_RING);
13421                         if (status & HA_RXMASK) {
13422                                 spin_lock_irqsave(&phba->hbalock, iflag);
13423                                 if (lpfc_readl(phba->HCregaddr, &control))
13424                                         goto unplug_error;
13425
13426                                 lpfc_debugfs_slow_ring_trc(phba,
13427                                 "ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
13428                                 control, status,
13429                                 (uint32_t)phba->sli.slistat.sli_intr);
13430
13431                                 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
13432                                         lpfc_debugfs_slow_ring_trc(phba,
13433                                                 "ISR Disable ring:"
13434                                                 "pwork:x%x hawork:x%x wait:x%x",
13435                                                 phba->work_ha, work_ha_copy,
13436                                                 (uint32_t)((unsigned long)
13437                                                 &phba->work_waitq));
13438
13439                                         control &=
13440                                             ~(HC_R0INT_ENA << LPFC_ELS_RING);
13441                                         writel(control, phba->HCregaddr);
13442                                         readl(phba->HCregaddr); /* flush */
13443                                 }
13444                                 else {
13445                                         lpfc_debugfs_slow_ring_trc(phba,
13446                                                 "ISR slow ring:   pwork:"
13447                                                 "x%x hawork:x%x wait:x%x",
13448                                                 phba->work_ha, work_ha_copy,
13449                                                 (uint32_t)((unsigned long)
13450                                                 &phba->work_waitq));
13451                                 }
13452                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13453                         }
13454                 }
13455                 spin_lock_irqsave(&phba->hbalock, iflag);
13456                 if (work_ha_copy & HA_ERATT) {
13457                         if (lpfc_sli_read_hs(phba))
13458                                 goto unplug_error;
13459                         /*
13460                          * Check if there is a deferred error condition
13461                          * is active
13462                          */
13463                         if ((HS_FFER1 & phba->work_hs) &&
13464                                 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13465                                   HS_FFER6 | HS_FFER7 | HS_FFER8) &
13466                                   phba->work_hs)) {
13467                                 phba->hba_flag |= DEFER_ERATT;
13468                                 /* Clear all interrupt enable conditions */
13469                                 writel(0, phba->HCregaddr);
13470                                 readl(phba->HCregaddr);
13471                         }
13472                 }
13473
13474                 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
13475                         pmb = phba->sli.mbox_active;
13476                         pmbox = &pmb->u.mb;
13477                         mbox = phba->mbox;
13478                         vport = pmb->vport;
13479
13480                         /* First check out the status word */
13481                         lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
13482                         if (pmbox->mbxOwner != OWN_HOST) {
13483                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13484                                 /*
13485                                  * Stray Mailbox Interrupt, mbxCommand <cmd>
13486                                  * mbxStatus <status>
13487                                  */
13488                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13489                                                 "(%d):0304 Stray Mailbox "
13490                                                 "Interrupt mbxCommand x%x "
13491                                                 "mbxStatus x%x\n",
13492                                                 (vport ? vport->vpi : 0),
13493                                                 pmbox->mbxCommand,
13494                                                 pmbox->mbxStatus);
13495                                 /* clear mailbox attention bit */
13496                                 work_ha_copy &= ~HA_MBATT;
13497                         } else {
13498                                 phba->sli.mbox_active = NULL;
13499                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13500                                 phba->last_completion_time = jiffies;
13501                                 del_timer(&phba->sli.mbox_tmo);
13502                                 if (pmb->mbox_cmpl) {
13503                                         lpfc_sli_pcimem_bcopy(mbox, pmbox,
13504                                                         MAILBOX_CMD_SIZE);
13505                                         if (pmb->out_ext_byte_len &&
13506                                                 pmb->ctx_buf)
13507                                                 lpfc_sli_pcimem_bcopy(
13508                                                 phba->mbox_ext,
13509                                                 pmb->ctx_buf,
13510                                                 pmb->out_ext_byte_len);
13511                                 }
13512                                 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13513                                         pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13514
13515                                         lpfc_debugfs_disc_trc(vport,
13516                                                 LPFC_DISC_TRC_MBOX_VPORT,
13517                                                 "MBOX dflt rpi: : "
13518                                                 "status:x%x rpi:x%x",
13519                                                 (uint32_t)pmbox->mbxStatus,
13520                                                 pmbox->un.varWords[0], 0);
13521
13522                                         if (!pmbox->mbxStatus) {
13523                                                 mp = (struct lpfc_dmabuf *)
13524                                                         (pmb->ctx_buf);
13525                                                 ndlp = (struct lpfc_nodelist *)
13526                                                         pmb->ctx_ndlp;
13527
13528                                                 /* Reg_LOGIN of dflt RPI was
13529                                                  * successful. new lets get
13530                                                  * rid of the RPI using the
13531                                                  * same mbox buffer.
13532                                                  */
13533                                                 lpfc_unreg_login(phba,
13534                                                         vport->vpi,
13535                                                         pmbox->un.varWords[0],
13536                                                         pmb);
13537                                                 pmb->mbox_cmpl =
13538                                                         lpfc_mbx_cmpl_dflt_rpi;
13539                                                 pmb->ctx_buf = mp;
13540                                                 pmb->ctx_ndlp = ndlp;
13541                                                 pmb->vport = vport;
13542                                                 rc = lpfc_sli_issue_mbox(phba,
13543                                                                 pmb,
13544                                                                 MBX_NOWAIT);
13545                                                 if (rc != MBX_BUSY)
13546                                                         lpfc_printf_log(phba,
13547                                                         KERN_ERR,
13548                                                         LOG_TRACE_EVENT,
13549                                                         "0350 rc should have"
13550                                                         "been MBX_BUSY\n");
13551                                                 if (rc != MBX_NOT_FINISHED)
13552                                                         goto send_current_mbox;
13553                                         }
13554                                 }
13555                                 spin_lock_irqsave(
13556                                                 &phba->pport->work_port_lock,
13557                                                 iflag);
13558                                 phba->pport->work_port_events &=
13559                                         ~WORKER_MBOX_TMO;
13560                                 spin_unlock_irqrestore(
13561                                                 &phba->pport->work_port_lock,
13562                                                 iflag);
13563
13564                                 /* Do NOT queue MBX_HEARTBEAT to the worker
13565                                  * thread for processing.
13566                                  */
13567                                 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13568                                         /* Process mbox now */
13569                                         phba->sli.mbox_active = NULL;
13570                                         phba->sli.sli_flag &=
13571                                                 ~LPFC_SLI_MBOX_ACTIVE;
13572                                         if (pmb->mbox_cmpl)
13573                                                 pmb->mbox_cmpl(phba, pmb);
13574                                 } else {
13575                                         /* Queue to worker thread to process */
13576                                         lpfc_mbox_cmpl_put(phba, pmb);
13577                                 }
13578                         }
13579                 } else
13580                         spin_unlock_irqrestore(&phba->hbalock, iflag);
13581
13582                 if ((work_ha_copy & HA_MBATT) &&
13583                     (phba->sli.mbox_active == NULL)) {
13584 send_current_mbox:
13585                         /* Process next mailbox command if there is one */
13586                         do {
13587                                 rc = lpfc_sli_issue_mbox(phba, NULL,
13588                                                          MBX_NOWAIT);
13589                         } while (rc == MBX_NOT_FINISHED);
13590                         if (rc != MBX_SUCCESS)
13591                                 lpfc_printf_log(phba, KERN_ERR,
13592                                                 LOG_TRACE_EVENT,
13593                                                 "0349 rc should be "
13594                                                 "MBX_SUCCESS\n");
13595                 }
13596
13597                 spin_lock_irqsave(&phba->hbalock, iflag);
13598                 phba->work_ha |= work_ha_copy;
13599                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13600                 lpfc_worker_wake_up(phba);
13601         }
13602         return IRQ_HANDLED;
13603 unplug_error:
13604         spin_unlock_irqrestore(&phba->hbalock, iflag);
13605         return IRQ_HANDLED;
13606
13607 } /* lpfc_sli_sp_intr_handler */
13608
13609 /**
13610  * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
13611  * @irq: Interrupt number.
13612  * @dev_id: The device context pointer.
13613  *
13614  * This function is directly called from the PCI layer as an interrupt
13615  * service routine when device with SLI-3 interface spec is enabled with
13616  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13617  * ring event in the HBA. However, when the device is enabled with either
13618  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13619  * device-level interrupt handler. When the PCI slot is in error recovery
13620  * or the HBA is undergoing initialization, the interrupt handler will not
13621  * process the interrupt. The SCSI FCP fast-path ring event are handled in
13622  * the intrrupt context. This function is called without any lock held.
13623  * It gets the hbalock to access and update SLI data structures.
13624  *
13625  * This function returns IRQ_HANDLED when interrupt is handled else it
13626  * returns IRQ_NONE.
13627  **/
13628 irqreturn_t
13629 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
13630 {
13631         struct lpfc_hba  *phba;
13632         uint32_t ha_copy;
13633         unsigned long status;
13634         unsigned long iflag;
13635         struct lpfc_sli_ring *pring;
13636
13637         /* Get the driver's phba structure from the dev_id and
13638          * assume the HBA is not interrupting.
13639          */
13640         phba = (struct lpfc_hba *) dev_id;
13641
13642         if (unlikely(!phba))
13643                 return IRQ_NONE;
13644
13645         /*
13646          * Stuff needs to be attented to when this function is invoked as an
13647          * individual interrupt handler in MSI-X multi-message interrupt mode
13648          */
13649         if (phba->intr_type == MSIX) {
13650                 /* Check device state for handling interrupt */
13651                 if (lpfc_intr_state_check(phba))
13652                         return IRQ_NONE;
13653                 /* Need to read HA REG for FCP ring and other ring events */
13654                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13655                         return IRQ_HANDLED;
13656                 /* Clear up only attention source related to fast-path */
13657                 spin_lock_irqsave(&phba->hbalock, iflag);
13658                 /*
13659                  * If there is deferred error attention, do not check for
13660                  * any interrupt.
13661                  */
13662                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13663                         spin_unlock_irqrestore(&phba->hbalock, iflag);
13664                         return IRQ_NONE;
13665                 }
13666                 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
13667                         phba->HAregaddr);
13668                 readl(phba->HAregaddr); /* flush */
13669                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13670         } else
13671                 ha_copy = phba->ha_copy;
13672
13673         /*
13674          * Process all events on FCP ring. Take the optimized path for FCP IO.
13675          */
13676         ha_copy &= ~(phba->work_ha_mask);
13677
13678         status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13679         status >>= (4*LPFC_FCP_RING);
13680         pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
13681         if (status & HA_RXMASK)
13682                 lpfc_sli_handle_fast_ring_event(phba, pring, status);
13683
13684         if (phba->cfg_multi_ring_support == 2) {
13685                 /*
13686                  * Process all events on extra ring. Take the optimized path
13687                  * for extra ring IO.
13688                  */
13689                 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13690                 status >>= (4*LPFC_EXTRA_RING);
13691                 if (status & HA_RXMASK) {
13692                         lpfc_sli_handle_fast_ring_event(phba,
13693                                         &phba->sli.sli3_ring[LPFC_EXTRA_RING],
13694                                         status);
13695                 }
13696         }
13697         return IRQ_HANDLED;
13698 }  /* lpfc_sli_fp_intr_handler */
13699
13700 /**
13701  * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
13702  * @irq: Interrupt number.
13703  * @dev_id: The device context pointer.
13704  *
13705  * This function is the HBA device-level interrupt handler to device with
13706  * SLI-3 interface spec, called from the PCI layer when either MSI or
13707  * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
13708  * requires driver attention. This function invokes the slow-path interrupt
13709  * attention handling function and fast-path interrupt attention handling
13710  * function in turn to process the relevant HBA attention events. This
13711  * function is called without any lock held. It gets the hbalock to access
13712  * and update SLI data structures.
13713  *
13714  * This function returns IRQ_HANDLED when interrupt is handled, else it
13715  * returns IRQ_NONE.
13716  **/
13717 irqreturn_t
13718 lpfc_sli_intr_handler(int irq, void *dev_id)
13719 {
13720         struct lpfc_hba  *phba;
13721         irqreturn_t sp_irq_rc, fp_irq_rc;
13722         unsigned long status1, status2;
13723         uint32_t hc_copy;
13724
13725         /*
13726          * Get the driver's phba structure from the dev_id and
13727          * assume the HBA is not interrupting.
13728          */
13729         phba = (struct lpfc_hba *) dev_id;
13730
13731         if (unlikely(!phba))
13732                 return IRQ_NONE;
13733
13734         /* Check device state for handling interrupt */
13735         if (lpfc_intr_state_check(phba))
13736                 return IRQ_NONE;
13737
13738         spin_lock(&phba->hbalock);
13739         if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
13740                 spin_unlock(&phba->hbalock);
13741                 return IRQ_HANDLED;
13742         }
13743
13744         if (unlikely(!phba->ha_copy)) {
13745                 spin_unlock(&phba->hbalock);
13746                 return IRQ_NONE;
13747         } else if (phba->ha_copy & HA_ERATT) {
13748                 if (phba->hba_flag & HBA_ERATT_HANDLED)
13749                         /* ERATT polling has handled ERATT */
13750                         phba->ha_copy &= ~HA_ERATT;
13751                 else
13752                         /* Indicate interrupt handler handles ERATT */
13753                         phba->hba_flag |= HBA_ERATT_HANDLED;
13754         }
13755
13756         /*
13757          * If there is deferred error attention, do not check for any interrupt.
13758          */
13759         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13760                 spin_unlock(&phba->hbalock);
13761                 return IRQ_NONE;
13762         }
13763
13764         /* Clear attention sources except link and error attentions */
13765         if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
13766                 spin_unlock(&phba->hbalock);
13767                 return IRQ_HANDLED;
13768         }
13769         writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
13770                 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
13771                 phba->HCregaddr);
13772         writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
13773         writel(hc_copy, phba->HCregaddr);
13774         readl(phba->HAregaddr); /* flush */
13775         spin_unlock(&phba->hbalock);
13776
13777         /*
13778          * Invokes slow-path host attention interrupt handling as appropriate.
13779          */
13780
13781         /* status of events with mailbox and link attention */
13782         status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
13783
13784         /* status of events with ELS ring */
13785         status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
13786         status2 >>= (4*LPFC_ELS_RING);
13787
13788         if (status1 || (status2 & HA_RXMASK))
13789                 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
13790         else
13791                 sp_irq_rc = IRQ_NONE;
13792
13793         /*
13794          * Invoke fast-path host attention interrupt handling as appropriate.
13795          */
13796
13797         /* status of events with FCP ring */
13798         status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13799         status1 >>= (4*LPFC_FCP_RING);
13800
13801         /* status of events with extra ring */
13802         if (phba->cfg_multi_ring_support == 2) {
13803                 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13804                 status2 >>= (4*LPFC_EXTRA_RING);
13805         } else
13806                 status2 = 0;
13807
13808         if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
13809                 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
13810         else
13811                 fp_irq_rc = IRQ_NONE;
13812
13813         /* Return device-level interrupt handling status */
13814         return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
13815 }  /* lpfc_sli_intr_handler */
13816
13817 /**
13818  * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
13819  * @phba: pointer to lpfc hba data structure.
13820  *
13821  * This routine is invoked by the worker thread to process all the pending
13822  * SLI4 els abort xri events.
13823  **/
13824 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
13825 {
13826         struct lpfc_cq_event *cq_event;
13827         unsigned long iflags;
13828
13829         /* First, declare the els xri abort event has been handled */
13830         spin_lock_irqsave(&phba->hbalock, iflags);
13831         phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
13832         spin_unlock_irqrestore(&phba->hbalock, iflags);
13833
13834         /* Now, handle all the els xri abort events */
13835         spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13836         while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
13837                 /* Get the first event from the head of the event queue */
13838                 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
13839                                  cq_event, struct lpfc_cq_event, list);
13840                 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13841                                        iflags);
13842                 /* Notify aborted XRI for ELS work queue */
13843                 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
13844
13845                 /* Free the event processed back to the free pool */
13846                 lpfc_sli4_cq_event_release(phba, cq_event);
13847                 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13848                                   iflags);
13849         }
13850         spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13851 }
13852
13853 /**
13854  * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe
13855  * @phba: Pointer to HBA context object.
13856  * @irspiocbq: Pointer to work-queue completion queue entry.
13857  *
13858  * This routine handles an ELS work-queue completion event and construct
13859  * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13860  * discovery engine to handle.
13861  *
13862  * Return: Pointer to the receive IOCBQ, NULL otherwise.
13863  **/
13864 static struct lpfc_iocbq *
13865 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
13866                                   struct lpfc_iocbq *irspiocbq)
13867 {
13868         struct lpfc_sli_ring *pring;
13869         struct lpfc_iocbq *cmdiocbq;
13870         struct lpfc_wcqe_complete *wcqe;
13871         unsigned long iflags;
13872
13873         pring = lpfc_phba_elsring(phba);
13874         if (unlikely(!pring))
13875                 return NULL;
13876
13877         wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13878         spin_lock_irqsave(&pring->ring_lock, iflags);
13879         pring->stats.iocb_event++;
13880         /* Look up the ELS command IOCB and create pseudo response IOCB */
13881         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13882                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13883         if (unlikely(!cmdiocbq)) {
13884                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13885                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13886                                 "0386 ELS complete with no corresponding "
13887                                 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13888                                 wcqe->word0, wcqe->total_data_placed,
13889                                 wcqe->parameter, wcqe->word3);
13890                 lpfc_sli_release_iocbq(phba, irspiocbq);
13891                 return NULL;
13892         }
13893
13894         memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
13895         memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
13896
13897         /* Put the iocb back on the txcmplq */
13898         lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13899         spin_unlock_irqrestore(&pring->ring_lock, iflags);
13900
13901         if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13902                 spin_lock_irqsave(&phba->hbalock, iflags);
13903                 cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
13904                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13905         }
13906
13907         return irspiocbq;
13908 }
13909
13910 inline struct lpfc_cq_event *
13911 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13912 {
13913         struct lpfc_cq_event *cq_event;
13914
13915         /* Allocate a new internal CQ_EVENT entry */
13916         cq_event = lpfc_sli4_cq_event_alloc(phba);
13917         if (!cq_event) {
13918                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13919                                 "0602 Failed to alloc CQ_EVENT entry\n");
13920                 return NULL;
13921         }
13922
13923         /* Move the CQE into the event */
13924         memcpy(&cq_event->cqe, entry, size);
13925         return cq_event;
13926 }
13927
13928 /**
13929  * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
13930  * @phba: Pointer to HBA context object.
13931  * @mcqe: Pointer to mailbox completion queue entry.
13932  *
13933  * This routine process a mailbox completion queue entry with asynchronous
13934  * event.
13935  *
13936  * Return: true if work posted to worker thread, otherwise false.
13937  **/
13938 static bool
13939 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13940 {
13941         struct lpfc_cq_event *cq_event;
13942         unsigned long iflags;
13943
13944         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13945                         "0392 Async Event: word0:x%x, word1:x%x, "
13946                         "word2:x%x, word3:x%x\n", mcqe->word0,
13947                         mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13948
13949         cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13950         if (!cq_event)
13951                 return false;
13952
13953         spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
13954         list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13955         spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
13956
13957         /* Set the async event flag */
13958         spin_lock_irqsave(&phba->hbalock, iflags);
13959         phba->hba_flag |= ASYNC_EVENT;
13960         spin_unlock_irqrestore(&phba->hbalock, iflags);
13961
13962         return true;
13963 }
13964
13965 /**
13966  * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13967  * @phba: Pointer to HBA context object.
13968  * @mcqe: Pointer to mailbox completion queue entry.
13969  *
13970  * This routine process a mailbox completion queue entry with mailbox
13971  * completion event.
13972  *
13973  * Return: true if work posted to worker thread, otherwise false.
13974  **/
13975 static bool
13976 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13977 {
13978         uint32_t mcqe_status;
13979         MAILBOX_t *mbox, *pmbox;
13980         struct lpfc_mqe *mqe;
13981         struct lpfc_vport *vport;
13982         struct lpfc_nodelist *ndlp;
13983         struct lpfc_dmabuf *mp;
13984         unsigned long iflags;
13985         LPFC_MBOXQ_t *pmb;
13986         bool workposted = false;
13987         int rc;
13988
13989         /* If not a mailbox complete MCQE, out by checking mailbox consume */
13990         if (!bf_get(lpfc_trailer_completed, mcqe))
13991                 goto out_no_mqe_complete;
13992
13993         /* Get the reference to the active mbox command */
13994         spin_lock_irqsave(&phba->hbalock, iflags);
13995         pmb = phba->sli.mbox_active;
13996         if (unlikely(!pmb)) {
13997                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13998                                 "1832 No pending MBOX command to handle\n");
13999                 spin_unlock_irqrestore(&phba->hbalock, iflags);
14000                 goto out_no_mqe_complete;
14001         }
14002         spin_unlock_irqrestore(&phba->hbalock, iflags);
14003         mqe = &pmb->u.mqe;
14004         pmbox = (MAILBOX_t *)&pmb->u.mqe;
14005         mbox = phba->mbox;
14006         vport = pmb->vport;
14007
14008         /* Reset heartbeat timer */
14009         phba->last_completion_time = jiffies;
14010         del_timer(&phba->sli.mbox_tmo);
14011
14012         /* Move mbox data to caller's mailbox region, do endian swapping */
14013         if (pmb->mbox_cmpl && mbox)
14014                 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
14015
14016         /*
14017          * For mcqe errors, conditionally move a modified error code to
14018          * the mbox so that the error will not be missed.
14019          */
14020         mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
14021         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
14022                 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
14023                         bf_set(lpfc_mqe_status, mqe,
14024                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
14025         }
14026         if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
14027                 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
14028                 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
14029                                       "MBOX dflt rpi: status:x%x rpi:x%x",
14030                                       mcqe_status,
14031                                       pmbox->un.varWords[0], 0);
14032                 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
14033                         mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
14034                         ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
14035
14036                         /* Reg_LOGIN of dflt RPI was successful. Mark the
14037                          * node as having an UNREG_LOGIN in progress to stop
14038                          * an unsolicited PLOGI from the same NPortId from
14039                          * starting another mailbox transaction.
14040                          */
14041                         spin_lock_irqsave(&ndlp->lock, iflags);
14042                         ndlp->nlp_flag |= NLP_UNREG_INP;
14043                         spin_unlock_irqrestore(&ndlp->lock, iflags);
14044                         lpfc_unreg_login(phba, vport->vpi,
14045                                          pmbox->un.varWords[0], pmb);
14046                         pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
14047                         pmb->ctx_buf = mp;
14048
14049                         /* No reference taken here.  This is a default
14050                          * RPI reg/immediate unreg cycle. The reference was
14051                          * taken in the reg rpi path and is released when
14052                          * this mailbox completes.
14053                          */
14054                         pmb->ctx_ndlp = ndlp;
14055                         pmb->vport = vport;
14056                         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
14057                         if (rc != MBX_BUSY)
14058                                 lpfc_printf_log(phba, KERN_ERR,
14059                                                 LOG_TRACE_EVENT,
14060                                                 "0385 rc should "
14061                                                 "have been MBX_BUSY\n");
14062                         if (rc != MBX_NOT_FINISHED)
14063                                 goto send_current_mbox;
14064                 }
14065         }
14066         spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
14067         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
14068         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
14069
14070         /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
14071         if (pmbox->mbxCommand == MBX_HEARTBEAT) {
14072                 spin_lock_irqsave(&phba->hbalock, iflags);
14073                 /* Release the mailbox command posting token */
14074                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14075                 phba->sli.mbox_active = NULL;
14076                 if (bf_get(lpfc_trailer_consumed, mcqe))
14077                         lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14078                 spin_unlock_irqrestore(&phba->hbalock, iflags);
14079
14080                 /* Post the next mbox command, if there is one */
14081                 lpfc_sli4_post_async_mbox(phba);
14082
14083                 /* Process cmpl now */
14084                 if (pmb->mbox_cmpl)
14085                         pmb->mbox_cmpl(phba, pmb);
14086                 return false;
14087         }
14088
14089         /* There is mailbox completion work to queue to the worker thread */
14090         spin_lock_irqsave(&phba->hbalock, iflags);
14091         __lpfc_mbox_cmpl_put(phba, pmb);
14092         phba->work_ha |= HA_MBATT;
14093         spin_unlock_irqrestore(&phba->hbalock, iflags);
14094         workposted = true;
14095
14096 send_current_mbox:
14097         spin_lock_irqsave(&phba->hbalock, iflags);
14098         /* Release the mailbox command posting token */
14099         phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14100         /* Setting active mailbox pointer need to be in sync to flag clear */
14101         phba->sli.mbox_active = NULL;
14102         if (bf_get(lpfc_trailer_consumed, mcqe))
14103                 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14104         spin_unlock_irqrestore(&phba->hbalock, iflags);
14105         /* Wake up worker thread to post the next pending mailbox command */
14106         lpfc_worker_wake_up(phba);
14107         return workposted;
14108
14109 out_no_mqe_complete:
14110         spin_lock_irqsave(&phba->hbalock, iflags);
14111         if (bf_get(lpfc_trailer_consumed, mcqe))
14112                 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14113         spin_unlock_irqrestore(&phba->hbalock, iflags);
14114         return false;
14115 }
14116
14117 /**
14118  * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
14119  * @phba: Pointer to HBA context object.
14120  * @cq: Pointer to associated CQ
14121  * @cqe: Pointer to mailbox completion queue entry.
14122  *
14123  * This routine process a mailbox completion queue entry, it invokes the
14124  * proper mailbox complete handling or asynchronous event handling routine
14125  * according to the MCQE's async bit.
14126  *
14127  * Return: true if work posted to worker thread, otherwise false.
14128  **/
14129 static bool
14130 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14131                          struct lpfc_cqe *cqe)
14132 {
14133         struct lpfc_mcqe mcqe;
14134         bool workposted;
14135
14136         cq->CQ_mbox++;
14137
14138         /* Copy the mailbox MCQE and convert endian order as needed */
14139         lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
14140
14141         /* Invoke the proper event handling routine */
14142         if (!bf_get(lpfc_trailer_async, &mcqe))
14143                 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
14144         else
14145                 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
14146         return workposted;
14147 }
14148
14149 /**
14150  * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
14151  * @phba: Pointer to HBA context object.
14152  * @cq: Pointer to associated CQ
14153  * @wcqe: Pointer to work-queue completion queue entry.
14154  *
14155  * This routine handles an ELS work-queue completion event.
14156  *
14157  * Return: true if work posted to worker thread, otherwise false.
14158  **/
14159 static bool
14160 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14161                              struct lpfc_wcqe_complete *wcqe)
14162 {
14163         struct lpfc_iocbq *irspiocbq;
14164         unsigned long iflags;
14165         struct lpfc_sli_ring *pring = cq->pring;
14166         int txq_cnt = 0;
14167         int txcmplq_cnt = 0;
14168
14169         /* Check for response status */
14170         if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14171                 /* Log the error status */
14172                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14173                                 "0357 ELS CQE error: status=x%x: "
14174                                 "CQE: %08x %08x %08x %08x\n",
14175                                 bf_get(lpfc_wcqe_c_status, wcqe),
14176                                 wcqe->word0, wcqe->total_data_placed,
14177                                 wcqe->parameter, wcqe->word3);
14178         }
14179
14180         /* Get an irspiocbq for later ELS response processing use */
14181         irspiocbq = lpfc_sli_get_iocbq(phba);
14182         if (!irspiocbq) {
14183                 if (!list_empty(&pring->txq))
14184                         txq_cnt++;
14185                 if (!list_empty(&pring->txcmplq))
14186                         txcmplq_cnt++;
14187                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14188                         "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
14189                         "els_txcmplq_cnt=%d\n",
14190                         txq_cnt, phba->iocb_cnt,
14191                         txcmplq_cnt);
14192                 return false;
14193         }
14194
14195         /* Save off the slow-path queue event for work thread to process */
14196         memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
14197         spin_lock_irqsave(&phba->hbalock, iflags);
14198         list_add_tail(&irspiocbq->cq_event.list,
14199                       &phba->sli4_hba.sp_queue_event);
14200         phba->hba_flag |= HBA_SP_QUEUE_EVT;
14201         spin_unlock_irqrestore(&phba->hbalock, iflags);
14202
14203         return true;
14204 }
14205
14206 /**
14207  * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
14208  * @phba: Pointer to HBA context object.
14209  * @wcqe: Pointer to work-queue completion queue entry.
14210  *
14211  * This routine handles slow-path WQ entry consumed event by invoking the
14212  * proper WQ release routine to the slow-path WQ.
14213  **/
14214 static void
14215 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
14216                              struct lpfc_wcqe_release *wcqe)
14217 {
14218         /* sanity check on queue memory */
14219         if (unlikely(!phba->sli4_hba.els_wq))
14220                 return;
14221         /* Check for the slow-path ELS work queue */
14222         if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
14223                 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
14224                                      bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14225         else
14226                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14227                                 "2579 Slow-path wqe consume event carries "
14228                                 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
14229                                 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
14230                                 phba->sli4_hba.els_wq->queue_id);
14231 }
14232
14233 /**
14234  * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
14235  * @phba: Pointer to HBA context object.
14236  * @cq: Pointer to a WQ completion queue.
14237  * @wcqe: Pointer to work-queue completion queue entry.
14238  *
14239  * This routine handles an XRI abort event.
14240  *
14241  * Return: true if work posted to worker thread, otherwise false.
14242  **/
14243 static bool
14244 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
14245                                    struct lpfc_queue *cq,
14246                                    struct sli4_wcqe_xri_aborted *wcqe)
14247 {
14248         bool workposted = false;
14249         struct lpfc_cq_event *cq_event;
14250         unsigned long iflags;
14251
14252         switch (cq->subtype) {
14253         case LPFC_IO:
14254                 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
14255                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14256                         /* Notify aborted XRI for NVME work queue */
14257                         if (phba->nvmet_support)
14258                                 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
14259                 }
14260                 workposted = false;
14261                 break;
14262         case LPFC_NVME_LS: /* NVME LS uses ELS resources */
14263         case LPFC_ELS:
14264                 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
14265                 if (!cq_event) {
14266                         workposted = false;
14267                         break;
14268                 }
14269                 cq_event->hdwq = cq->hdwq;
14270                 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14271                                   iflags);
14272                 list_add_tail(&cq_event->list,
14273                               &phba->sli4_hba.sp_els_xri_aborted_work_queue);
14274                 /* Set the els xri abort event flag */
14275                 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
14276                 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14277                                        iflags);
14278                 workposted = true;
14279                 break;
14280         default:
14281                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14282                                 "0603 Invalid CQ subtype %d: "
14283                                 "%08x %08x %08x %08x\n",
14284                                 cq->subtype, wcqe->word0, wcqe->parameter,
14285                                 wcqe->word2, wcqe->word3);
14286                 workposted = false;
14287                 break;
14288         }
14289         return workposted;
14290 }
14291
14292 #define FC_RCTL_MDS_DIAGS       0xF4
14293
14294 /**
14295  * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
14296  * @phba: Pointer to HBA context object.
14297  * @rcqe: Pointer to receive-queue completion queue entry.
14298  *
14299  * This routine process a receive-queue completion queue entry.
14300  *
14301  * Return: true if work posted to worker thread, otherwise false.
14302  **/
14303 static bool
14304 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
14305 {
14306         bool workposted = false;
14307         struct fc_frame_header *fc_hdr;
14308         struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
14309         struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
14310         struct lpfc_nvmet_tgtport *tgtp;
14311         struct hbq_dmabuf *dma_buf;
14312         uint32_t status, rq_id;
14313         unsigned long iflags;
14314
14315         /* sanity check on queue memory */
14316         if (unlikely(!hrq) || unlikely(!drq))
14317                 return workposted;
14318
14319         if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14320                 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14321         else
14322                 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14323         if (rq_id != hrq->queue_id)
14324                 goto out;
14325
14326         status = bf_get(lpfc_rcqe_status, rcqe);
14327         switch (status) {
14328         case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14329                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14330                                 "2537 Receive Frame Truncated!!\n");
14331                 fallthrough;
14332         case FC_STATUS_RQ_SUCCESS:
14333                 spin_lock_irqsave(&phba->hbalock, iflags);
14334                 lpfc_sli4_rq_release(hrq, drq);
14335                 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14336                 if (!dma_buf) {
14337                         hrq->RQ_no_buf_found++;
14338                         spin_unlock_irqrestore(&phba->hbalock, iflags);
14339                         goto out;
14340                 }
14341                 hrq->RQ_rcv_buf++;
14342                 hrq->RQ_buf_posted--;
14343                 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
14344
14345                 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14346
14347                 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
14348                     fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
14349                         spin_unlock_irqrestore(&phba->hbalock, iflags);
14350                         /* Handle MDS Loopback frames */
14351                         if  (!(phba->pport->load_flag & FC_UNLOADING))
14352                                 lpfc_sli4_handle_mds_loopback(phba->pport,
14353                                                               dma_buf);
14354                         else
14355                                 lpfc_in_buf_free(phba, &dma_buf->dbuf);
14356                         break;
14357                 }
14358
14359                 /* save off the frame for the work thread to process */
14360                 list_add_tail(&dma_buf->cq_event.list,
14361                               &phba->sli4_hba.sp_queue_event);
14362                 /* Frame received */
14363                 phba->hba_flag |= HBA_SP_QUEUE_EVT;
14364                 spin_unlock_irqrestore(&phba->hbalock, iflags);
14365                 workposted = true;
14366                 break;
14367         case FC_STATUS_INSUFF_BUF_FRM_DISC:
14368                 if (phba->nvmet_support) {
14369                         tgtp = phba->targetport->private;
14370                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14371                                         "6402 RQE Error x%x, posted %d err_cnt "
14372                                         "%d: %x %x %x\n",
14373                                         status, hrq->RQ_buf_posted,
14374                                         hrq->RQ_no_posted_buf,
14375                                         atomic_read(&tgtp->rcv_fcp_cmd_in),
14376                                         atomic_read(&tgtp->rcv_fcp_cmd_out),
14377                                         atomic_read(&tgtp->xmt_fcp_release));
14378                 }
14379                 fallthrough;
14380
14381         case FC_STATUS_INSUFF_BUF_NEED_BUF:
14382                 hrq->RQ_no_posted_buf++;
14383                 /* Post more buffers if possible */
14384                 spin_lock_irqsave(&phba->hbalock, iflags);
14385                 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
14386                 spin_unlock_irqrestore(&phba->hbalock, iflags);
14387                 workposted = true;
14388                 break;
14389         }
14390 out:
14391         return workposted;
14392 }
14393
14394 /**
14395  * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
14396  * @phba: Pointer to HBA context object.
14397  * @cq: Pointer to the completion queue.
14398  * @cqe: Pointer to a completion queue entry.
14399  *
14400  * This routine process a slow-path work-queue or receive queue completion queue
14401  * entry.
14402  *
14403  * Return: true if work posted to worker thread, otherwise false.
14404  **/
14405 static bool
14406 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14407                          struct lpfc_cqe *cqe)
14408 {
14409         struct lpfc_cqe cqevt;
14410         bool workposted = false;
14411
14412         /* Copy the work queue CQE and convert endian order if needed */
14413         lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
14414
14415         /* Check and process for different type of WCQE and dispatch */
14416         switch (bf_get(lpfc_cqe_code, &cqevt)) {
14417         case CQE_CODE_COMPL_WQE:
14418                 /* Process the WQ/RQ complete event */
14419                 phba->last_completion_time = jiffies;
14420                 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
14421                                 (struct lpfc_wcqe_complete *)&cqevt);
14422                 break;
14423         case CQE_CODE_RELEASE_WQE:
14424                 /* Process the WQ release event */
14425                 lpfc_sli4_sp_handle_rel_wcqe(phba,
14426                                 (struct lpfc_wcqe_release *)&cqevt);
14427                 break;
14428         case CQE_CODE_XRI_ABORTED:
14429                 /* Process the WQ XRI abort event */
14430                 phba->last_completion_time = jiffies;
14431                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14432                                 (struct sli4_wcqe_xri_aborted *)&cqevt);
14433                 break;
14434         case CQE_CODE_RECEIVE:
14435         case CQE_CODE_RECEIVE_V1:
14436                 /* Process the RQ event */
14437                 phba->last_completion_time = jiffies;
14438                 workposted = lpfc_sli4_sp_handle_rcqe(phba,
14439                                 (struct lpfc_rcqe *)&cqevt);
14440                 break;
14441         default:
14442                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14443                                 "0388 Not a valid WCQE code: x%x\n",
14444                                 bf_get(lpfc_cqe_code, &cqevt));
14445                 break;
14446         }
14447         return workposted;
14448 }
14449
14450 /**
14451  * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
14452  * @phba: Pointer to HBA context object.
14453  * @eqe: Pointer to fast-path event queue entry.
14454  * @speq: Pointer to slow-path event queue.
14455  *
14456  * This routine process a event queue entry from the slow-path event queue.
14457  * It will check the MajorCode and MinorCode to determine this is for a
14458  * completion event on a completion queue, if not, an error shall be logged
14459  * and just return. Otherwise, it will get to the corresponding completion
14460  * queue and process all the entries on that completion queue, rearm the
14461  * completion queue, and then return.
14462  *
14463  **/
14464 static void
14465 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14466         struct lpfc_queue *speq)
14467 {
14468         struct lpfc_queue *cq = NULL, *childq;
14469         uint16_t cqid;
14470         int ret = 0;
14471
14472         /* Get the reference to the corresponding CQ */
14473         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14474
14475         list_for_each_entry(childq, &speq->child_list, list) {
14476                 if (childq->queue_id == cqid) {
14477                         cq = childq;
14478                         break;
14479                 }
14480         }
14481         if (unlikely(!cq)) {
14482                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14483                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14484                                         "0365 Slow-path CQ identifier "
14485                                         "(%d) does not exist\n", cqid);
14486                 return;
14487         }
14488
14489         /* Save EQ associated with this CQ */
14490         cq->assoc_qp = speq;
14491
14492         if (is_kdump_kernel())
14493                 ret = queue_work(phba->wq, &cq->spwork);
14494         else
14495                 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14496
14497         if (!ret)
14498                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14499                                 "0390 Cannot schedule queue work "
14500                                 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14501                                 cqid, cq->queue_id, raw_smp_processor_id());
14502 }
14503
14504 /**
14505  * __lpfc_sli4_process_cq - Process elements of a CQ
14506  * @phba: Pointer to HBA context object.
14507  * @cq: Pointer to CQ to be processed
14508  * @handler: Routine to process each cqe
14509  * @delay: Pointer to usdelay to set in case of rescheduling of the handler
14510  * @poll_mode: Polling mode we were called from
14511  *
14512  * This routine processes completion queue entries in a CQ. While a valid
14513  * queue element is found, the handler is called. During processing checks
14514  * are made for periodic doorbell writes to let the hardware know of
14515  * element consumption.
14516  *
14517  * If the max limit on cqes to process is hit, or there are no more valid
14518  * entries, the loop stops. If we processed a sufficient number of elements,
14519  * meaning there is sufficient load, rather than rearming and generating
14520  * another interrupt, a cq rescheduling delay will be set. A delay of 0
14521  * indicates no rescheduling.
14522  *
14523  * Returns True if work scheduled, False otherwise.
14524  **/
14525 static bool
14526 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14527         bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
14528                         struct lpfc_cqe *), unsigned long *delay,
14529                         enum lpfc_poll_mode poll_mode)
14530 {
14531         struct lpfc_cqe *cqe;
14532         bool workposted = false;
14533         int count = 0, consumed = 0;
14534         bool arm = true;
14535
14536         /* default - no reschedule */
14537         *delay = 0;
14538
14539         if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14540                 goto rearm_and_exit;
14541
14542         /* Process all the entries to the CQ */
14543         cq->q_flag = 0;
14544         cqe = lpfc_sli4_cq_get(cq);
14545         while (cqe) {
14546                 workposted |= handler(phba, cq, cqe);
14547                 __lpfc_sli4_consume_cqe(phba, cq, cqe);
14548
14549                 consumed++;
14550                 if (!(++count % cq->max_proc_limit))
14551                         break;
14552
14553                 if (!(count % cq->notify_interval)) {
14554                         phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14555                                                 LPFC_QUEUE_NOARM);
14556                         consumed = 0;
14557                         cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
14558                 }
14559
14560                 if (count == LPFC_NVMET_CQ_NOTIFY)
14561                         cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14562
14563                 cqe = lpfc_sli4_cq_get(cq);
14564         }
14565         if (count >= phba->cfg_cq_poll_threshold) {
14566                 *delay = 1;
14567                 arm = false;
14568         }
14569
14570         /* Note: complete the irq_poll softirq before rearming CQ */
14571         if (poll_mode == LPFC_IRQ_POLL)
14572                 irq_poll_complete(&cq->iop);
14573
14574         /* Track the max number of CQEs processed in 1 EQ */
14575         if (count > cq->CQ_max_cqe)
14576                 cq->CQ_max_cqe = count;
14577
14578         cq->assoc_qp->EQ_cqe_cnt += count;
14579
14580         /* Catch the no cq entry condition */
14581         if (unlikely(count == 0))
14582                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14583                                 "0369 No entry from completion queue "
14584                                 "qid=%d\n", cq->queue_id);
14585
14586         xchg(&cq->queue_claimed, 0);
14587
14588 rearm_and_exit:
14589         phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14590                         arm ?  LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14591
14592         return workposted;
14593 }
14594
14595 /**
14596  * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
14597  * @cq: pointer to CQ to process
14598  *
14599  * This routine calls the cq processing routine with a handler specific
14600  * to the type of queue bound to it.
14601  *
14602  * The CQ routine returns two values: the first is the calling status,
14603  * which indicates whether work was queued to the  background discovery
14604  * thread. If true, the routine should wakeup the discovery thread;
14605  * the second is the delay parameter. If non-zero, rather than rearming
14606  * the CQ and yet another interrupt, the CQ handler should be queued so
14607  * that it is processed in a subsequent polling action. The value of
14608  * the delay indicates when to reschedule it.
14609  **/
14610 static void
14611 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14612 {
14613         struct lpfc_hba *phba = cq->phba;
14614         unsigned long delay;
14615         bool workposted = false;
14616         int ret = 0;
14617
14618         /* Process and rearm the CQ */
14619         switch (cq->type) {
14620         case LPFC_MCQ:
14621                 workposted |= __lpfc_sli4_process_cq(phba, cq,
14622                                                 lpfc_sli4_sp_handle_mcqe,
14623                                                 &delay, LPFC_QUEUE_WORK);
14624                 break;
14625         case LPFC_WCQ:
14626                 if (cq->subtype == LPFC_IO)
14627                         workposted |= __lpfc_sli4_process_cq(phba, cq,
14628                                                 lpfc_sli4_fp_handle_cqe,
14629                                                 &delay, LPFC_QUEUE_WORK);
14630                 else
14631                         workposted |= __lpfc_sli4_process_cq(phba, cq,
14632                                                 lpfc_sli4_sp_handle_cqe,
14633                                                 &delay, LPFC_QUEUE_WORK);
14634                 break;
14635         default:
14636                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14637                                 "0370 Invalid completion queue type (%d)\n",
14638                                 cq->type);
14639                 return;
14640         }
14641
14642         if (delay) {
14643                 if (is_kdump_kernel())
14644                         ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14645                                                 delay);
14646                 else
14647                         ret = queue_delayed_work_on(cq->chann, phba->wq,
14648                                                 &cq->sched_spwork, delay);
14649                 if (!ret)
14650                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14651                                 "0394 Cannot schedule queue work "
14652                                 "for cqid=%d on CPU %d\n",
14653                                 cq->queue_id, cq->chann);
14654         }
14655
14656         /* wake up worker thread if there are works to be done */
14657         if (workposted)
14658                 lpfc_worker_wake_up(phba);
14659 }
14660
14661 /**
14662  * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14663  *   interrupt
14664  * @work: pointer to work element
14665  *
14666  * translates from the work handler and calls the slow-path handler.
14667  **/
14668 static void
14669 lpfc_sli4_sp_process_cq(struct work_struct *work)
14670 {
14671         struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
14672
14673         __lpfc_sli4_sp_process_cq(cq);
14674 }
14675
14676 /**
14677  * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
14678  * @work: pointer to work element
14679  *
14680  * translates from the work handler and calls the slow-path handler.
14681  **/
14682 static void
14683 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
14684 {
14685         struct lpfc_queue *cq = container_of(to_delayed_work(work),
14686                                         struct lpfc_queue, sched_spwork);
14687
14688         __lpfc_sli4_sp_process_cq(cq);
14689 }
14690
14691 /**
14692  * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
14693  * @phba: Pointer to HBA context object.
14694  * @cq: Pointer to associated CQ
14695  * @wcqe: Pointer to work-queue completion queue entry.
14696  *
14697  * This routine process a fast-path work queue completion entry from fast-path
14698  * event queue for FCP command response completion.
14699  **/
14700 static void
14701 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14702                              struct lpfc_wcqe_complete *wcqe)
14703 {
14704         struct lpfc_sli_ring *pring = cq->pring;
14705         struct lpfc_iocbq *cmdiocbq;
14706         unsigned long iflags;
14707
14708         /* Check for response status */
14709         if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14710                 /* If resource errors reported from HBA, reduce queue
14711                  * depth of the SCSI device.
14712                  */
14713                 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
14714                      IOSTAT_LOCAL_REJECT)) &&
14715                     ((wcqe->parameter & IOERR_PARAM_MASK) ==
14716                      IOERR_NO_RESOURCES))
14717                         phba->lpfc_rampdown_queue_depth(phba);
14718
14719                 /* Log the cmpl status */
14720                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14721                                 "0373 FCP CQE cmpl: status=x%x: "
14722                                 "CQE: %08x %08x %08x %08x\n",
14723                                 bf_get(lpfc_wcqe_c_status, wcqe),
14724                                 wcqe->word0, wcqe->total_data_placed,
14725                                 wcqe->parameter, wcqe->word3);
14726         }
14727
14728         /* Look up the FCP command IOCB and create pseudo response IOCB */
14729         spin_lock_irqsave(&pring->ring_lock, iflags);
14730         pring->stats.iocb_event++;
14731         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14732                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14733         spin_unlock_irqrestore(&pring->ring_lock, iflags);
14734         if (unlikely(!cmdiocbq)) {
14735                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14736                                 "0374 FCP complete with no corresponding "
14737                                 "cmdiocb: iotag (%d)\n",
14738                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14739                 return;
14740         }
14741 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
14742         cmdiocbq->isr_timestamp = cq->isr_timestamp;
14743 #endif
14744         if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
14745                 spin_lock_irqsave(&phba->hbalock, iflags);
14746                 cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
14747                 spin_unlock_irqrestore(&phba->hbalock, iflags);
14748         }
14749
14750         if (cmdiocbq->cmd_cmpl) {
14751                 /* For FCP the flag is cleared in cmd_cmpl */
14752                 if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
14753                     cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
14754                         spin_lock_irqsave(&phba->hbalock, iflags);
14755                         cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
14756                         spin_unlock_irqrestore(&phba->hbalock, iflags);
14757                 }
14758
14759                 /* Pass the cmd_iocb and the wcqe to the upper layer */
14760                 memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
14761                        sizeof(struct lpfc_wcqe_complete));
14762                 (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, cmdiocbq);
14763         } else {
14764                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14765                                 "0375 FCP cmdiocb not callback function "
14766                                 "iotag: (%d)\n",
14767                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14768         }
14769 }
14770
14771 /**
14772  * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
14773  * @phba: Pointer to HBA context object.
14774  * @cq: Pointer to completion queue.
14775  * @wcqe: Pointer to work-queue completion queue entry.
14776  *
14777  * This routine handles an fast-path WQ entry consumed event by invoking the
14778  * proper WQ release routine to the slow-path WQ.
14779  **/
14780 static void
14781 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14782                              struct lpfc_wcqe_release *wcqe)
14783 {
14784         struct lpfc_queue *childwq;
14785         bool wqid_matched = false;
14786         uint16_t hba_wqid;
14787
14788         /* Check for fast-path FCP work queue release */
14789         hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
14790         list_for_each_entry(childwq, &cq->child_list, list) {
14791                 if (childwq->queue_id == hba_wqid) {
14792                         lpfc_sli4_wq_release(childwq,
14793                                         bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14794                         if (childwq->q_flag & HBA_NVMET_WQFULL)
14795                                 lpfc_nvmet_wqfull_process(phba, childwq);
14796                         wqid_matched = true;
14797                         break;
14798                 }
14799         }
14800         /* Report warning log message if no match found */
14801         if (wqid_matched != true)
14802                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14803                                 "2580 Fast-path wqe consume event carries "
14804                                 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
14805 }
14806
14807 /**
14808  * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
14809  * @phba: Pointer to HBA context object.
14810  * @cq: Pointer to completion queue.
14811  * @rcqe: Pointer to receive-queue completion queue entry.
14812  *
14813  * This routine process a receive-queue completion queue entry.
14814  *
14815  * Return: true if work posted to worker thread, otherwise false.
14816  **/
14817 static bool
14818 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14819                             struct lpfc_rcqe *rcqe)
14820 {
14821         bool workposted = false;
14822         struct lpfc_queue *hrq;
14823         struct lpfc_queue *drq;
14824         struct rqb_dmabuf *dma_buf;
14825         struct fc_frame_header *fc_hdr;
14826         struct lpfc_nvmet_tgtport *tgtp;
14827         uint32_t status, rq_id;
14828         unsigned long iflags;
14829         uint32_t fctl, idx;
14830
14831         if ((phba->nvmet_support == 0) ||
14832             (phba->sli4_hba.nvmet_cqset == NULL))
14833                 return workposted;
14834
14835         idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
14836         hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
14837         drq = phba->sli4_hba.nvmet_mrq_data[idx];
14838
14839         /* sanity check on queue memory */
14840         if (unlikely(!hrq) || unlikely(!drq))
14841                 return workposted;
14842
14843         if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14844                 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14845         else
14846                 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14847
14848         if ((phba->nvmet_support == 0) ||
14849             (rq_id != hrq->queue_id))
14850                 return workposted;
14851
14852         status = bf_get(lpfc_rcqe_status, rcqe);
14853         switch (status) {
14854         case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14855                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14856                                 "6126 Receive Frame Truncated!!\n");
14857                 fallthrough;
14858         case FC_STATUS_RQ_SUCCESS:
14859                 spin_lock_irqsave(&phba->hbalock, iflags);
14860                 lpfc_sli4_rq_release(hrq, drq);
14861                 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
14862                 if (!dma_buf) {
14863                         hrq->RQ_no_buf_found++;
14864                         spin_unlock_irqrestore(&phba->hbalock, iflags);
14865                         goto out;
14866                 }
14867                 spin_unlock_irqrestore(&phba->hbalock, iflags);
14868                 hrq->RQ_rcv_buf++;
14869                 hrq->RQ_buf_posted--;
14870                 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14871
14872                 /* Just some basic sanity checks on FCP Command frame */
14873                 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
14874                         fc_hdr->fh_f_ctl[1] << 8 |
14875                         fc_hdr->fh_f_ctl[2]);
14876                 if (((fctl &
14877                     (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14878                     (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14879                     (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
14880                         goto drop;
14881
14882                 if (fc_hdr->fh_type == FC_TYPE_FCP) {
14883                         dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
14884                         lpfc_nvmet_unsol_fcp_event(
14885                                 phba, idx, dma_buf, cq->isr_timestamp,
14886                                 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
14887                         return false;
14888                 }
14889 drop:
14890                 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
14891                 break;
14892         case FC_STATUS_INSUFF_BUF_FRM_DISC:
14893                 if (phba->nvmet_support) {
14894                         tgtp = phba->targetport->private;
14895                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14896                                         "6401 RQE Error x%x, posted %d err_cnt "
14897                                         "%d: %x %x %x\n",
14898                                         status, hrq->RQ_buf_posted,
14899                                         hrq->RQ_no_posted_buf,
14900                                         atomic_read(&tgtp->rcv_fcp_cmd_in),
14901                                         atomic_read(&tgtp->rcv_fcp_cmd_out),
14902                                         atomic_read(&tgtp->xmt_fcp_release));
14903                 }
14904                 fallthrough;
14905
14906         case FC_STATUS_INSUFF_BUF_NEED_BUF:
14907                 hrq->RQ_no_posted_buf++;
14908                 /* Post more buffers if possible */
14909                 break;
14910         }
14911 out:
14912         return workposted;
14913 }
14914
14915 /**
14916  * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
14917  * @phba: adapter with cq
14918  * @cq: Pointer to the completion queue.
14919  * @cqe: Pointer to fast-path completion queue entry.
14920  *
14921  * This routine process a fast-path work queue completion entry from fast-path
14922  * event queue for FCP command response completion.
14923  *
14924  * Return: true if work posted to worker thread, otherwise false.
14925  **/
14926 static bool
14927 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14928                          struct lpfc_cqe *cqe)
14929 {
14930         struct lpfc_wcqe_release wcqe;
14931         bool workposted = false;
14932
14933         /* Copy the work queue CQE and convert endian order if needed */
14934         lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14935
14936         /* Check and process for different type of WCQE and dispatch */
14937         switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14938         case CQE_CODE_COMPL_WQE:
14939         case CQE_CODE_NVME_ERSP:
14940                 cq->CQ_wq++;
14941                 /* Process the WQ complete event */
14942                 phba->last_completion_time = jiffies;
14943                 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
14944                         lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14945                                 (struct lpfc_wcqe_complete *)&wcqe);
14946                 break;
14947         case CQE_CODE_RELEASE_WQE:
14948                 cq->CQ_release_wqe++;
14949                 /* Process the WQ release event */
14950                 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14951                                 (struct lpfc_wcqe_release *)&wcqe);
14952                 break;
14953         case CQE_CODE_XRI_ABORTED:
14954                 cq->CQ_xri_aborted++;
14955                 /* Process the WQ XRI abort event */
14956                 phba->last_completion_time = jiffies;
14957                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14958                                 (struct sli4_wcqe_xri_aborted *)&wcqe);
14959                 break;
14960         case CQE_CODE_RECEIVE_V1:
14961         case CQE_CODE_RECEIVE:
14962                 phba->last_completion_time = jiffies;
14963                 if (cq->subtype == LPFC_NVMET) {
14964                         workposted = lpfc_sli4_nvmet_handle_rcqe(
14965                                 phba, cq, (struct lpfc_rcqe *)&wcqe);
14966                 }
14967                 break;
14968         default:
14969                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14970                                 "0144 Not a valid CQE code: x%x\n",
14971                                 bf_get(lpfc_wcqe_c_code, &wcqe));
14972                 break;
14973         }
14974         return workposted;
14975 }
14976
14977 /**
14978  * lpfc_sli4_sched_cq_work - Schedules cq work
14979  * @phba: Pointer to HBA context object.
14980  * @cq: Pointer to CQ
14981  * @cqid: CQ ID
14982  *
14983  * This routine checks the poll mode of the CQ corresponding to
14984  * cq->chann, then either schedules a softirq or queue_work to complete
14985  * cq work.
14986  *
14987  * queue_work path is taken if in NVMET mode, or if poll_mode is in
14988  * LPFC_QUEUE_WORK mode.  Otherwise, softirq path is taken.
14989  *
14990  **/
14991 static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
14992                                     struct lpfc_queue *cq, uint16_t cqid)
14993 {
14994         int ret = 0;
14995
14996         switch (cq->poll_mode) {
14997         case LPFC_IRQ_POLL:
14998                 /* CGN mgmt is mutually exclusive from softirq processing */
14999                 if (phba->cmf_active_mode == LPFC_CFG_OFF) {
15000                         irq_poll_sched(&cq->iop);
15001                         break;
15002                 }
15003                 fallthrough;
15004         case LPFC_QUEUE_WORK:
15005         default:
15006                 if (is_kdump_kernel())
15007                         ret = queue_work(phba->wq, &cq->irqwork);
15008                 else
15009                         ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
15010                 if (!ret)
15011                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15012                                         "0383 Cannot schedule queue work "
15013                                         "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
15014                                         cqid, cq->queue_id,
15015                                         raw_smp_processor_id());
15016         }
15017 }
15018
15019 /**
15020  * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
15021  * @phba: Pointer to HBA context object.
15022  * @eq: Pointer to the queue structure.
15023  * @eqe: Pointer to fast-path event queue entry.
15024  *
15025  * This routine process a event queue entry from the fast-path event queue.
15026  * It will check the MajorCode and MinorCode to determine this is for a
15027  * completion event on a completion queue, if not, an error shall be logged
15028  * and just return. Otherwise, it will get to the corresponding completion
15029  * queue and process all the entries on the completion queue, rearm the
15030  * completion queue, and then return.
15031  **/
15032 static void
15033 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
15034                          struct lpfc_eqe *eqe)
15035 {
15036         struct lpfc_queue *cq = NULL;
15037         uint32_t qidx = eq->hdwq;
15038         uint16_t cqid, id;
15039
15040         if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
15041                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15042                                 "0366 Not a valid completion "
15043                                 "event: majorcode=x%x, minorcode=x%x\n",
15044                                 bf_get_le32(lpfc_eqe_major_code, eqe),
15045                                 bf_get_le32(lpfc_eqe_minor_code, eqe));
15046                 return;
15047         }
15048
15049         /* Get the reference to the corresponding CQ */
15050         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
15051
15052         /* Use the fast lookup method first */
15053         if (cqid <= phba->sli4_hba.cq_max) {
15054                 cq = phba->sli4_hba.cq_lookup[cqid];
15055                 if (cq)
15056                         goto  work_cq;
15057         }
15058
15059         /* Next check for NVMET completion */
15060         if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
15061                 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
15062                 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
15063                         /* Process NVMET unsol rcv */
15064                         cq = phba->sli4_hba.nvmet_cqset[cqid - id];
15065                         goto  process_cq;
15066                 }
15067         }
15068
15069         if (phba->sli4_hba.nvmels_cq &&
15070             (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
15071                 /* Process NVME unsol rcv */
15072                 cq = phba->sli4_hba.nvmels_cq;
15073         }
15074
15075         /* Otherwise this is a Slow path event */
15076         if (cq == NULL) {
15077                 lpfc_sli4_sp_handle_eqe(phba, eqe,
15078                                         phba->sli4_hba.hdwq[qidx].hba_eq);
15079                 return;
15080         }
15081
15082 process_cq:
15083         if (unlikely(cqid != cq->queue_id)) {
15084                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15085                                 "0368 Miss-matched fast-path completion "
15086                                 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
15087                                 cqid, cq->queue_id);
15088                 return;
15089         }
15090
15091 work_cq:
15092 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
15093         if (phba->ktime_on)
15094                 cq->isr_timestamp = ktime_get_ns();
15095         else
15096                 cq->isr_timestamp = 0;
15097 #endif
15098         lpfc_sli4_sched_cq_work(phba, cq, cqid);
15099 }
15100
15101 /**
15102  * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
15103  * @cq: Pointer to CQ to be processed
15104  * @poll_mode: Enum lpfc_poll_state to determine poll mode
15105  *
15106  * This routine calls the cq processing routine with the handler for
15107  * fast path CQEs.
15108  *
15109  * The CQ routine returns two values: the first is the calling status,
15110  * which indicates whether work was queued to the  background discovery
15111  * thread. If true, the routine should wakeup the discovery thread;
15112  * the second is the delay parameter. If non-zero, rather than rearming
15113  * the CQ and yet another interrupt, the CQ handler should be queued so
15114  * that it is processed in a subsequent polling action. The value of
15115  * the delay indicates when to reschedule it.
15116  **/
15117 static void
15118 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
15119                            enum lpfc_poll_mode poll_mode)
15120 {
15121         struct lpfc_hba *phba = cq->phba;
15122         unsigned long delay;
15123         bool workposted = false;
15124         int ret = 0;
15125
15126         /* process and rearm the CQ */
15127         workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
15128                                              &delay, poll_mode);
15129
15130         if (delay) {
15131                 if (is_kdump_kernel())
15132                         ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
15133                                                 delay);
15134                 else
15135                         ret = queue_delayed_work_on(cq->chann, phba->wq,
15136                                                 &cq->sched_irqwork, delay);
15137                 if (!ret)
15138                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15139                                         "0367 Cannot schedule queue work "
15140                                         "for cqid=%d on CPU %d\n",
15141                                         cq->queue_id, cq->chann);
15142         }
15143
15144         /* wake up worker thread if there are works to be done */
15145         if (workposted)
15146                 lpfc_worker_wake_up(phba);
15147 }
15148
15149 /**
15150  * lpfc_sli4_hba_process_cq - fast-path work handler when started by
15151  *   interrupt
15152  * @work: pointer to work element
15153  *
15154  * translates from the work handler and calls the fast-path handler.
15155  **/
15156 static void
15157 lpfc_sli4_hba_process_cq(struct work_struct *work)
15158 {
15159         struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
15160
15161         __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
15162 }
15163
15164 /**
15165  * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
15166  * @work: pointer to work element
15167  *
15168  * translates from the work handler and calls the fast-path handler.
15169  **/
15170 static void
15171 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
15172 {
15173         struct lpfc_queue *cq = container_of(to_delayed_work(work),
15174                                         struct lpfc_queue, sched_irqwork);
15175
15176         __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
15177 }
15178
15179 /**
15180  * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
15181  * @irq: Interrupt number.
15182  * @dev_id: The device context pointer.
15183  *
15184  * This function is directly called from the PCI layer as an interrupt
15185  * service routine when device with SLI-4 interface spec is enabled with
15186  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
15187  * ring event in the HBA. However, when the device is enabled with either
15188  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
15189  * device-level interrupt handler. When the PCI slot is in error recovery
15190  * or the HBA is undergoing initialization, the interrupt handler will not
15191  * process the interrupt. The SCSI FCP fast-path ring event are handled in
15192  * the intrrupt context. This function is called without any lock held.
15193  * It gets the hbalock to access and update SLI data structures. Note that,
15194  * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
15195  * equal to that of FCP CQ index.
15196  *
15197  * The link attention and ELS ring attention events are handled
15198  * by the worker thread. The interrupt handler signals the worker thread
15199  * and returns for these events. This function is called without any lock
15200  * held. It gets the hbalock to access and update SLI data structures.
15201  *
15202  * This function returns IRQ_HANDLED when interrupt is handled else it
15203  * returns IRQ_NONE.
15204  **/
15205 irqreturn_t
15206 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
15207 {
15208         struct lpfc_hba *phba;
15209         struct lpfc_hba_eq_hdl *hba_eq_hdl;
15210         struct lpfc_queue *fpeq;
15211         unsigned long iflag;
15212         int ecount = 0;
15213         int hba_eqidx;
15214         struct lpfc_eq_intr_info *eqi;
15215
15216         /* Get the driver's phba structure from the dev_id */
15217         hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
15218         phba = hba_eq_hdl->phba;
15219         hba_eqidx = hba_eq_hdl->idx;
15220
15221         if (unlikely(!phba))
15222                 return IRQ_NONE;
15223         if (unlikely(!phba->sli4_hba.hdwq))
15224                 return IRQ_NONE;
15225
15226         /* Get to the EQ struct associated with this vector */
15227         fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
15228         if (unlikely(!fpeq))
15229                 return IRQ_NONE;
15230
15231         /* Check device state for handling interrupt */
15232         if (unlikely(lpfc_intr_state_check(phba))) {
15233                 /* Check again for link_state with lock held */
15234                 spin_lock_irqsave(&phba->hbalock, iflag);
15235                 if (phba->link_state < LPFC_LINK_DOWN)
15236                         /* Flush, clear interrupt, and rearm the EQ */
15237                         lpfc_sli4_eqcq_flush(phba, fpeq);
15238                 spin_unlock_irqrestore(&phba->hbalock, iflag);
15239                 return IRQ_NONE;
15240         }
15241
15242         eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
15243         eqi->icnt++;
15244
15245         fpeq->last_cpu = raw_smp_processor_id();
15246
15247         if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
15248             fpeq->q_flag & HBA_EQ_DELAY_CHK &&
15249             phba->cfg_auto_imax &&
15250             fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
15251             phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
15252                 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
15253
15254         /* process and rearm the EQ */
15255         ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
15256
15257         if (unlikely(ecount == 0)) {
15258                 fpeq->EQ_no_entry++;
15259                 if (phba->intr_type == MSIX)
15260                         /* MSI-X treated interrupt served as no EQ share INT */
15261                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15262                                         "0358 MSI-X interrupt with no EQE\n");
15263                 else
15264                         /* Non MSI-X treated on interrupt as EQ share INT */
15265                         return IRQ_NONE;
15266         }
15267
15268         return IRQ_HANDLED;
15269 } /* lpfc_sli4_hba_intr_handler */
15270
15271 /**
15272  * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
15273  * @irq: Interrupt number.
15274  * @dev_id: The device context pointer.
15275  *
15276  * This function is the device-level interrupt handler to device with SLI-4
15277  * interface spec, called from the PCI layer when either MSI or Pin-IRQ
15278  * interrupt mode is enabled and there is an event in the HBA which requires
15279  * driver attention. This function invokes the slow-path interrupt attention
15280  * handling function and fast-path interrupt attention handling function in
15281  * turn to process the relevant HBA attention events. This function is called
15282  * without any lock held. It gets the hbalock to access and update SLI data
15283  * structures.
15284  *
15285  * This function returns IRQ_HANDLED when interrupt is handled, else it
15286  * returns IRQ_NONE.
15287  **/
15288 irqreturn_t
15289 lpfc_sli4_intr_handler(int irq, void *dev_id)
15290 {
15291         struct lpfc_hba  *phba;
15292         irqreturn_t hba_irq_rc;
15293         bool hba_handled = false;
15294         int qidx;
15295
15296         /* Get the driver's phba structure from the dev_id */
15297         phba = (struct lpfc_hba *)dev_id;
15298
15299         if (unlikely(!phba))
15300                 return IRQ_NONE;
15301
15302         /*
15303          * Invoke fast-path host attention interrupt handling as appropriate.
15304          */
15305         for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
15306                 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
15307                                         &phba->sli4_hba.hba_eq_hdl[qidx]);
15308                 if (hba_irq_rc == IRQ_HANDLED)
15309                         hba_handled |= true;
15310         }
15311
15312         return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
15313 } /* lpfc_sli4_intr_handler */
15314
15315 void lpfc_sli4_poll_hbtimer(struct timer_list *t)
15316 {
15317         struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
15318         struct lpfc_queue *eq;
15319         int i = 0;
15320
15321         rcu_read_lock();
15322
15323         list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
15324                 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
15325         if (!list_empty(&phba->poll_list))
15326                 mod_timer(&phba->cpuhp_poll_timer,
15327                           jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15328
15329         rcu_read_unlock();
15330 }
15331
15332 inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
15333 {
15334         struct lpfc_hba *phba = eq->phba;
15335         int i = 0;
15336
15337         /*
15338          * Unlocking an irq is one of the entry point to check
15339          * for re-schedule, but we are good for io submission
15340          * path as midlayer does a get_cpu to glue us in. Flush
15341          * out the invalidate queue so we can see the updated
15342          * value for flag.
15343          */
15344         smp_rmb();
15345
15346         if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
15347                 /* We will not likely get the completion for the caller
15348                  * during this iteration but i guess that's fine.
15349                  * Future io's coming on this eq should be able to
15350                  * pick it up.  As for the case of single io's, they
15351                  * will be handled through a sched from polling timer
15352                  * function which is currently triggered every 1msec.
15353                  */
15354                 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
15355
15356         return i;
15357 }
15358
15359 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
15360 {
15361         struct lpfc_hba *phba = eq->phba;
15362
15363         /* kickstart slowpath processing if needed */
15364         if (list_empty(&phba->poll_list))
15365                 mod_timer(&phba->cpuhp_poll_timer,
15366                           jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15367
15368         list_add_rcu(&eq->_poll_list, &phba->poll_list);
15369         synchronize_rcu();
15370 }
15371
15372 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15373 {
15374         struct lpfc_hba *phba = eq->phba;
15375
15376         /* Disable slowpath processing for this eq.  Kick start the eq
15377          * by RE-ARMING the eq's ASAP
15378          */
15379         list_del_rcu(&eq->_poll_list);
15380         synchronize_rcu();
15381
15382         if (list_empty(&phba->poll_list))
15383                 del_timer_sync(&phba->cpuhp_poll_timer);
15384 }
15385
15386 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
15387 {
15388         struct lpfc_queue *eq, *next;
15389
15390         list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15391                 list_del(&eq->_poll_list);
15392
15393         INIT_LIST_HEAD(&phba->poll_list);
15394         synchronize_rcu();
15395 }
15396
15397 static inline void
15398 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15399 {
15400         if (mode == eq->mode)
15401                 return;
15402         /*
15403          * currently this function is only called during a hotplug
15404          * event and the cpu on which this function is executing
15405          * is going offline.  By now the hotplug has instructed
15406          * the scheduler to remove this cpu from cpu active mask.
15407          * So we don't need to work about being put aside by the
15408          * scheduler for a high priority process.  Yes, the inte-
15409          * rrupts could come but they are known to retire ASAP.
15410          */
15411
15412         /* Disable polling in the fastpath */
15413         WRITE_ONCE(eq->mode, mode);
15414         /* flush out the store buffer */
15415         smp_wmb();
15416
15417         /*
15418          * Add this eq to the polling list and start polling. For
15419          * a grace period both interrupt handler and poller will
15420          * try to process the eq _but_ that's fine.  We have a
15421          * synchronization mechanism in place (queue_claimed) to
15422          * deal with it.  This is just a draining phase for int-
15423          * errupt handler (not eq's) as we have guranteed through
15424          * barrier that all the CPUs have seen the new CQ_POLLED
15425          * state. which will effectively disable the REARMING of
15426          * the EQ.  The whole idea is eq's die off eventually as
15427          * we are not rearming EQ's anymore.
15428          */
15429         mode ? lpfc_sli4_add_to_poll_list(eq) :
15430                lpfc_sli4_remove_from_poll_list(eq);
15431 }
15432
15433 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15434 {
15435         __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15436 }
15437
15438 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15439 {
15440         struct lpfc_hba *phba = eq->phba;
15441
15442         __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15443
15444         /* Kick start for the pending io's in h/w.
15445          * Once we switch back to interrupt processing on a eq
15446          * the io path completion will only arm eq's when it
15447          * receives a completion.  But since eq's are in disa-
15448          * rmed state it doesn't receive a completion.  This
15449          * creates a deadlock scenaro.
15450          */
15451         phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15452 }
15453
15454 /**
15455  * lpfc_sli4_queue_free - free a queue structure and associated memory
15456  * @queue: The queue structure to free.
15457  *
15458  * This function frees a queue structure and the DMAable memory used for
15459  * the host resident queue. This function must be called after destroying the
15460  * queue on the HBA.
15461  **/
15462 void
15463 lpfc_sli4_queue_free(struct lpfc_queue *queue)
15464 {
15465         struct lpfc_dmabuf *dmabuf;
15466
15467         if (!queue)
15468                 return;
15469
15470         if (!list_empty(&queue->wq_list))
15471                 list_del(&queue->wq_list);
15472
15473         while (!list_empty(&queue->page_list)) {
15474                 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15475                                  list);
15476                 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
15477                                   dmabuf->virt, dmabuf->phys);
15478                 kfree(dmabuf);
15479         }
15480         if (queue->rqbp) {
15481                 lpfc_free_rq_buffer(queue->phba, queue);
15482                 kfree(queue->rqbp);
15483         }
15484
15485         if (!list_empty(&queue->cpu_list))
15486                 list_del(&queue->cpu_list);
15487
15488         kfree(queue);
15489         return;
15490 }
15491
15492 /**
15493  * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
15494  * @phba: The HBA that this queue is being created on.
15495  * @page_size: The size of a queue page
15496  * @entry_size: The size of each queue entry for this queue.
15497  * @entry_count: The number of entries that this queue will handle.
15498  * @cpu: The cpu that will primarily utilize this queue.
15499  *
15500  * This function allocates a queue structure and the DMAable memory used for
15501  * the host resident queue. This function must be called before creating the
15502  * queue on the HBA.
15503  **/
15504 struct lpfc_queue *
15505 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
15506                       uint32_t entry_size, uint32_t entry_count, int cpu)
15507 {
15508         struct lpfc_queue *queue;
15509         struct lpfc_dmabuf *dmabuf;
15510         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15511         uint16_t x, pgcnt;
15512
15513         if (!phba->sli4_hba.pc_sli4_params.supported)
15514                 hw_page_size = page_size;
15515
15516         pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
15517
15518         /* If needed, Adjust page count to match the max the adapter supports */
15519         if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15520                 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15521
15522         queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15523                              GFP_KERNEL, cpu_to_node(cpu));
15524         if (!queue)
15525                 return NULL;
15526
15527         INIT_LIST_HEAD(&queue->list);
15528         INIT_LIST_HEAD(&queue->_poll_list);
15529         INIT_LIST_HEAD(&queue->wq_list);
15530         INIT_LIST_HEAD(&queue->wqfull_list);
15531         INIT_LIST_HEAD(&queue->page_list);
15532         INIT_LIST_HEAD(&queue->child_list);
15533         INIT_LIST_HEAD(&queue->cpu_list);
15534
15535         /* Set queue parameters now.  If the system cannot provide memory
15536          * resources, the free routine needs to know what was allocated.
15537          */
15538         queue->page_count = pgcnt;
15539         queue->q_pgs = (void **)&queue[1];
15540         queue->entry_cnt_per_pg = hw_page_size / entry_size;
15541         queue->entry_size = entry_size;
15542         queue->entry_count = entry_count;
15543         queue->page_size = hw_page_size;
15544         queue->phba = phba;
15545
15546         for (x = 0; x < queue->page_count; x++) {
15547                 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15548                                       dev_to_node(&phba->pcidev->dev));
15549                 if (!dmabuf)
15550                         goto out_fail;
15551                 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15552                                                   hw_page_size, &dmabuf->phys,
15553                                                   GFP_KERNEL);
15554                 if (!dmabuf->virt) {
15555                         kfree(dmabuf);
15556                         goto out_fail;
15557                 }
15558                 dmabuf->buffer_tag = x;
15559                 list_add_tail(&dmabuf->list, &queue->page_list);
15560                 /* use lpfc_sli4_qe to index a paritcular entry in this page */
15561                 queue->q_pgs[x] = dmabuf->virt;
15562         }
15563         INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15564         INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
15565         INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15566         INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
15567
15568         /* notify_interval will be set during q creation */
15569
15570         return queue;
15571 out_fail:
15572         lpfc_sli4_queue_free(queue);
15573         return NULL;
15574 }
15575
15576 /**
15577  * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
15578  * @phba: HBA structure that indicates port to create a queue on.
15579  * @pci_barset: PCI BAR set flag.
15580  *
15581  * This function shall perform iomap of the specified PCI BAR address to host
15582  * memory address if not already done so and return it. The returned host
15583  * memory address can be NULL.
15584  */
15585 static void __iomem *
15586 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15587 {
15588         if (!phba->pcidev)
15589                 return NULL;
15590
15591         switch (pci_barset) {
15592         case WQ_PCI_BAR_0_AND_1:
15593                 return phba->pci_bar0_memmap_p;
15594         case WQ_PCI_BAR_2_AND_3:
15595                 return phba->pci_bar2_memmap_p;
15596         case WQ_PCI_BAR_4_AND_5:
15597                 return phba->pci_bar4_memmap_p;
15598         default:
15599                 break;
15600         }
15601         return NULL;
15602 }
15603
15604 /**
15605  * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
15606  * @phba: HBA structure that EQs are on.
15607  * @startq: The starting EQ index to modify
15608  * @numq: The number of EQs (consecutive indexes) to modify
15609  * @usdelay: amount of delay
15610  *
15611  * This function revises the EQ delay on 1 or more EQs. The EQ delay
15612  * is set either by writing to a register (if supported by the SLI Port)
15613  * or by mailbox command. The mailbox command allows several EQs to be
15614  * updated at once.
15615  *
15616  * The @phba struct is used to send a mailbox command to HBA. The @startq
15617  * is used to get the starting EQ index to change. The @numq value is
15618  * used to specify how many consecutive EQ indexes, starting at EQ index,
15619  * are to be changed. This function is asynchronous and will wait for any
15620  * mailbox commands to finish before returning.
15621  *
15622  * On success this function will return a zero. If unable to allocate
15623  * enough memory this function will return -ENOMEM. If a mailbox command
15624  * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
15625  * have had their delay multipler changed.
15626  **/
15627 void
15628 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
15629                          uint32_t numq, uint32_t usdelay)
15630 {
15631         struct lpfc_mbx_modify_eq_delay *eq_delay;
15632         LPFC_MBOXQ_t *mbox;
15633         struct lpfc_queue *eq;
15634         int cnt = 0, rc, length;
15635         uint32_t shdr_status, shdr_add_status;
15636         uint32_t dmult;
15637         int qidx;
15638         union lpfc_sli4_cfg_shdr *shdr;
15639
15640         if (startq >= phba->cfg_irq_chann)
15641                 return;
15642
15643         if (usdelay > 0xFFFF) {
15644                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15645                                 "6429 usdelay %d too large. Scaled down to "
15646                                 "0xFFFF.\n", usdelay);
15647                 usdelay = 0xFFFF;
15648         }
15649
15650         /* set values by EQ_DELAY register if supported */
15651         if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15652                 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15653                         eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15654                         if (!eq)
15655                                 continue;
15656
15657                         lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15658
15659                         if (++cnt >= numq)
15660                                 break;
15661                 }
15662                 return;
15663         }
15664
15665         /* Otherwise, set values by mailbox cmd */
15666
15667         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15668         if (!mbox) {
15669                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15670                                 "6428 Failed allocating mailbox cmd buffer."
15671                                 " EQ delay was not set.\n");
15672                 return;
15673         }
15674         length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
15675                   sizeof(struct lpfc_sli4_cfg_mhdr));
15676         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15677                          LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
15678                          length, LPFC_SLI4_MBX_EMBED);
15679         eq_delay = &mbox->u.mqe.un.eq_delay;
15680
15681         /* Calculate delay multiper from maximum interrupt per second */
15682         dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
15683         if (dmult)
15684                 dmult--;
15685         if (dmult > LPFC_DMULT_MAX)
15686                 dmult = LPFC_DMULT_MAX;
15687
15688         for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15689                 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15690                 if (!eq)
15691                         continue;
15692                 eq->q_mode = usdelay;
15693                 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
15694                 eq_delay->u.request.eq[cnt].phase = 0;
15695                 eq_delay->u.request.eq[cnt].delay_multi = dmult;
15696
15697                 if (++cnt >= numq)
15698                         break;
15699         }
15700         eq_delay->u.request.num_eq = cnt;
15701
15702         mbox->vport = phba->pport;
15703         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15704         mbox->ctx_buf = NULL;
15705         mbox->ctx_ndlp = NULL;
15706         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15707         shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
15708         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15709         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15710         if (shdr_status || shdr_add_status || rc) {
15711                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15712                                 "2512 MODIFY_EQ_DELAY mailbox failed with "
15713                                 "status x%x add_status x%x, mbx status x%x\n",
15714                                 shdr_status, shdr_add_status, rc);
15715         }
15716         mempool_free(mbox, phba->mbox_mem_pool);
15717         return;
15718 }
15719
15720 /**
15721  * lpfc_eq_create - Create an Event Queue on the HBA
15722  * @phba: HBA structure that indicates port to create a queue on.
15723  * @eq: The queue structure to use to create the event queue.
15724  * @imax: The maximum interrupt per second limit.
15725  *
15726  * This function creates an event queue, as detailed in @eq, on a port,
15727  * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
15728  *
15729  * The @phba struct is used to send mailbox command to HBA. The @eq struct
15730  * is used to get the entry count and entry size that are necessary to
15731  * determine the number of pages to allocate and use for this queue. This
15732  * function will send the EQ_CREATE mailbox command to the HBA to setup the
15733  * event queue. This function is asynchronous and will wait for the mailbox
15734  * command to finish before continuing.
15735  *
15736  * On success this function will return a zero. If unable to allocate enough
15737  * memory this function will return -ENOMEM. If the queue create mailbox command
15738  * fails this function will return -ENXIO.
15739  **/
15740 int
15741 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
15742 {
15743         struct lpfc_mbx_eq_create *eq_create;
15744         LPFC_MBOXQ_t *mbox;
15745         int rc, length, status = 0;
15746         struct lpfc_dmabuf *dmabuf;
15747         uint32_t shdr_status, shdr_add_status;
15748         union lpfc_sli4_cfg_shdr *shdr;
15749         uint16_t dmult;
15750         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15751
15752         /* sanity check on queue memory */
15753         if (!eq)
15754                 return -ENODEV;
15755         if (!phba->sli4_hba.pc_sli4_params.supported)
15756                 hw_page_size = SLI4_PAGE_SIZE;
15757
15758         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15759         if (!mbox)
15760                 return -ENOMEM;
15761         length = (sizeof(struct lpfc_mbx_eq_create) -
15762                   sizeof(struct lpfc_sli4_cfg_mhdr));
15763         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15764                          LPFC_MBOX_OPCODE_EQ_CREATE,
15765                          length, LPFC_SLI4_MBX_EMBED);
15766         eq_create = &mbox->u.mqe.un.eq_create;
15767         shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
15768         bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
15769                eq->page_count);
15770         bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
15771                LPFC_EQE_SIZE);
15772         bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
15773
15774         /* Use version 2 of CREATE_EQ if eqav is set */
15775         if (phba->sli4_hba.pc_sli4_params.eqav) {
15776                 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15777                        LPFC_Q_CREATE_VERSION_2);
15778                 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
15779                        phba->sli4_hba.pc_sli4_params.eqav);
15780         }
15781
15782         /* don't setup delay multiplier using EQ_CREATE */
15783         dmult = 0;
15784         bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
15785                dmult);
15786         switch (eq->entry_count) {
15787         default:
15788                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15789                                 "0360 Unsupported EQ count. (%d)\n",
15790                                 eq->entry_count);
15791                 if (eq->entry_count < 256) {
15792                         status = -EINVAL;
15793                         goto out;
15794                 }
15795                 fallthrough;    /* otherwise default to smallest count */
15796         case 256:
15797                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15798                        LPFC_EQ_CNT_256);
15799                 break;
15800         case 512:
15801                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15802                        LPFC_EQ_CNT_512);
15803                 break;
15804         case 1024:
15805                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15806                        LPFC_EQ_CNT_1024);
15807                 break;
15808         case 2048:
15809                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15810                        LPFC_EQ_CNT_2048);
15811                 break;
15812         case 4096:
15813                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15814                        LPFC_EQ_CNT_4096);
15815                 break;
15816         }
15817         list_for_each_entry(dmabuf, &eq->page_list, list) {
15818                 memset(dmabuf->virt, 0, hw_page_size);
15819                 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15820                                         putPaddrLow(dmabuf->phys);
15821                 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15822                                         putPaddrHigh(dmabuf->phys);
15823         }
15824         mbox->vport = phba->pport;
15825         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15826         mbox->ctx_buf = NULL;
15827         mbox->ctx_ndlp = NULL;
15828         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15829         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15830         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15831         if (shdr_status || shdr_add_status || rc) {
15832                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15833                                 "2500 EQ_CREATE mailbox failed with "
15834                                 "status x%x add_status x%x, mbx status x%x\n",
15835                                 shdr_status, shdr_add_status, rc);
15836                 status = -ENXIO;
15837         }
15838         eq->type = LPFC_EQ;
15839         eq->subtype = LPFC_NONE;
15840         eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
15841         if (eq->queue_id == 0xFFFF)
15842                 status = -ENXIO;
15843         eq->host_index = 0;
15844         eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
15845         eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
15846 out:
15847         mempool_free(mbox, phba->mbox_mem_pool);
15848         return status;
15849 }
15850
15851 static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
15852 {
15853         struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
15854
15855         __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
15856
15857         return 1;
15858 }
15859
15860 /**
15861  * lpfc_cq_create - Create a Completion Queue on the HBA
15862  * @phba: HBA structure that indicates port to create a queue on.
15863  * @cq: The queue structure to use to create the completion queue.
15864  * @eq: The event queue to bind this completion queue to.
15865  * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15866  * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
15867  *
15868  * This function creates a completion queue, as detailed in @wq, on a port,
15869  * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
15870  *
15871  * The @phba struct is used to send mailbox command to HBA. The @cq struct
15872  * is used to get the entry count and entry size that are necessary to
15873  * determine the number of pages to allocate and use for this queue. The @eq
15874  * is used to indicate which event queue to bind this completion queue to. This
15875  * function will send the CQ_CREATE mailbox command to the HBA to setup the
15876  * completion queue. This function is asynchronous and will wait for the mailbox
15877  * command to finish before continuing.
15878  *
15879  * On success this function will return a zero. If unable to allocate enough
15880  * memory this function will return -ENOMEM. If the queue create mailbox command
15881  * fails this function will return -ENXIO.
15882  **/
15883 int
15884 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
15885                struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
15886 {
15887         struct lpfc_mbx_cq_create *cq_create;
15888         struct lpfc_dmabuf *dmabuf;
15889         LPFC_MBOXQ_t *mbox;
15890         int rc, length, status = 0;
15891         uint32_t shdr_status, shdr_add_status;
15892         union lpfc_sli4_cfg_shdr *shdr;
15893
15894         /* sanity check on queue memory */
15895         if (!cq || !eq)
15896                 return -ENODEV;
15897
15898         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15899         if (!mbox)
15900                 return -ENOMEM;
15901         length = (sizeof(struct lpfc_mbx_cq_create) -
15902                   sizeof(struct lpfc_sli4_cfg_mhdr));
15903         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15904                          LPFC_MBOX_OPCODE_CQ_CREATE,
15905                          length, LPFC_SLI4_MBX_EMBED);
15906         cq_create = &mbox->u.mqe.un.cq_create;
15907         shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
15908         bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
15909                     cq->page_count);
15910         bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
15911         bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
15912         bf_set(lpfc_mbox_hdr_version, &shdr->request,
15913                phba->sli4_hba.pc_sli4_params.cqv);
15914         if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
15915                 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
15916                        (cq->page_size / SLI4_PAGE_SIZE));
15917                 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
15918                        eq->queue_id);
15919                 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
15920                        phba->sli4_hba.pc_sli4_params.cqav);
15921         } else {
15922                 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
15923                        eq->queue_id);
15924         }
15925         switch (cq->entry_count) {
15926         case 2048:
15927         case 4096:
15928                 if (phba->sli4_hba.pc_sli4_params.cqv ==
15929                     LPFC_Q_CREATE_VERSION_2) {
15930                         cq_create->u.request.context.lpfc_cq_context_count =
15931                                 cq->entry_count;
15932                         bf_set(lpfc_cq_context_count,
15933                                &cq_create->u.request.context,
15934                                LPFC_CQ_CNT_WORD7);
15935                         break;
15936                 }
15937                 fallthrough;
15938         default:
15939                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15940                                 "0361 Unsupported CQ count: "
15941                                 "entry cnt %d sz %d pg cnt %d\n",
15942                                 cq->entry_count, cq->entry_size,
15943                                 cq->page_count);
15944                 if (cq->entry_count < 256) {
15945                         status = -EINVAL;
15946                         goto out;
15947                 }
15948                 fallthrough;    /* otherwise default to smallest count */
15949         case 256:
15950                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15951                        LPFC_CQ_CNT_256);
15952                 break;
15953         case 512:
15954                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15955                        LPFC_CQ_CNT_512);
15956                 break;
15957         case 1024:
15958                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15959                        LPFC_CQ_CNT_1024);
15960                 break;
15961         }
15962         list_for_each_entry(dmabuf, &cq->page_list, list) {
15963                 memset(dmabuf->virt, 0, cq->page_size);
15964                 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15965                                         putPaddrLow(dmabuf->phys);
15966                 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15967                                         putPaddrHigh(dmabuf->phys);
15968         }
15969         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15970
15971         /* The IOCTL status is embedded in the mailbox subheader. */
15972         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15973         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15974         if (shdr_status || shdr_add_status || rc) {
15975                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15976                                 "2501 CQ_CREATE mailbox failed with "
15977                                 "status x%x add_status x%x, mbx status x%x\n",
15978                                 shdr_status, shdr_add_status, rc);
15979                 status = -ENXIO;
15980                 goto out;
15981         }
15982         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15983         if (cq->queue_id == 0xFFFF) {
15984                 status = -ENXIO;
15985                 goto out;
15986         }
15987         /* link the cq onto the parent eq child list */
15988         list_add_tail(&cq->list, &eq->child_list);
15989         /* Set up completion queue's type and subtype */
15990         cq->type = type;
15991         cq->subtype = subtype;
15992         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15993         cq->assoc_qid = eq->queue_id;
15994         cq->assoc_qp = eq;
15995         cq->host_index = 0;
15996         cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15997         cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
15998
15999         if (cq->queue_id > phba->sli4_hba.cq_max)
16000                 phba->sli4_hba.cq_max = cq->queue_id;
16001
16002         irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
16003 out:
16004         mempool_free(mbox, phba->mbox_mem_pool);
16005         return status;
16006 }
16007
16008 /**
16009  * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
16010  * @phba: HBA structure that indicates port to create a queue on.
16011  * @cqp: The queue structure array to use to create the completion queues.
16012  * @hdwq: The hardware queue array  with the EQ to bind completion queues to.
16013  * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16014  * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16015  *
16016  * This function creates a set of  completion queue, s to support MRQ
16017  * as detailed in @cqp, on a port,
16018  * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
16019  *
16020  * The @phba struct is used to send mailbox command to HBA. The @cq struct
16021  * is used to get the entry count and entry size that are necessary to
16022  * determine the number of pages to allocate and use for this queue. The @eq
16023  * is used to indicate which event queue to bind this completion queue to. This
16024  * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
16025  * completion queue. This function is asynchronous and will wait for the mailbox
16026  * command to finish before continuing.
16027  *
16028  * On success this function will return a zero. If unable to allocate enough
16029  * memory this function will return -ENOMEM. If the queue create mailbox command
16030  * fails this function will return -ENXIO.
16031  **/
16032 int
16033 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
16034                    struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
16035                    uint32_t subtype)
16036 {
16037         struct lpfc_queue *cq;
16038         struct lpfc_queue *eq;
16039         struct lpfc_mbx_cq_create_set *cq_set;
16040         struct lpfc_dmabuf *dmabuf;
16041         LPFC_MBOXQ_t *mbox;
16042         int rc, length, alloclen, status = 0;
16043         int cnt, idx, numcq, page_idx = 0;
16044         uint32_t shdr_status, shdr_add_status;
16045         union lpfc_sli4_cfg_shdr *shdr;
16046         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16047
16048         /* sanity check on queue memory */
16049         numcq = phba->cfg_nvmet_mrq;
16050         if (!cqp || !hdwq || !numcq)
16051                 return -ENODEV;
16052
16053         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16054         if (!mbox)
16055                 return -ENOMEM;
16056
16057         length = sizeof(struct lpfc_mbx_cq_create_set);
16058         length += ((numcq * cqp[0]->page_count) *
16059                    sizeof(struct dma_address));
16060         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16061                         LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
16062                         LPFC_SLI4_MBX_NEMBED);
16063         if (alloclen < length) {
16064                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16065                                 "3098 Allocated DMA memory size (%d) is "
16066                                 "less than the requested DMA memory size "
16067                                 "(%d)\n", alloclen, length);
16068                 status = -ENOMEM;
16069                 goto out;
16070         }
16071         cq_set = mbox->sge_array->addr[0];
16072         shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
16073         bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
16074
16075         for (idx = 0; idx < numcq; idx++) {
16076                 cq = cqp[idx];
16077                 eq = hdwq[idx].hba_eq;
16078                 if (!cq || !eq) {
16079                         status = -ENOMEM;
16080                         goto out;
16081                 }
16082                 if (!phba->sli4_hba.pc_sli4_params.supported)
16083                         hw_page_size = cq->page_size;
16084
16085                 switch (idx) {
16086                 case 0:
16087                         bf_set(lpfc_mbx_cq_create_set_page_size,
16088                                &cq_set->u.request,
16089                                (hw_page_size / SLI4_PAGE_SIZE));
16090                         bf_set(lpfc_mbx_cq_create_set_num_pages,
16091                                &cq_set->u.request, cq->page_count);
16092                         bf_set(lpfc_mbx_cq_create_set_evt,
16093                                &cq_set->u.request, 1);
16094                         bf_set(lpfc_mbx_cq_create_set_valid,
16095                                &cq_set->u.request, 1);
16096                         bf_set(lpfc_mbx_cq_create_set_cqe_size,
16097                                &cq_set->u.request, 0);
16098                         bf_set(lpfc_mbx_cq_create_set_num_cq,
16099                                &cq_set->u.request, numcq);
16100                         bf_set(lpfc_mbx_cq_create_set_autovalid,
16101                                &cq_set->u.request,
16102                                phba->sli4_hba.pc_sli4_params.cqav);
16103                         switch (cq->entry_count) {
16104                         case 2048:
16105                         case 4096:
16106                                 if (phba->sli4_hba.pc_sli4_params.cqv ==
16107                                     LPFC_Q_CREATE_VERSION_2) {
16108                                         bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16109                                                &cq_set->u.request,
16110                                                 cq->entry_count);
16111                                         bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16112                                                &cq_set->u.request,
16113                                                LPFC_CQ_CNT_WORD7);
16114                                         break;
16115                                 }
16116                                 fallthrough;
16117                         default:
16118                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16119                                                 "3118 Bad CQ count. (%d)\n",
16120                                                 cq->entry_count);
16121                                 if (cq->entry_count < 256) {
16122                                         status = -EINVAL;
16123                                         goto out;
16124                                 }
16125                                 fallthrough;    /* otherwise default to smallest */
16126                         case 256:
16127                                 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16128                                        &cq_set->u.request, LPFC_CQ_CNT_256);
16129                                 break;
16130                         case 512:
16131                                 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16132                                        &cq_set->u.request, LPFC_CQ_CNT_512);
16133                                 break;
16134                         case 1024:
16135                                 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16136                                        &cq_set->u.request, LPFC_CQ_CNT_1024);
16137                                 break;
16138                         }
16139                         bf_set(lpfc_mbx_cq_create_set_eq_id0,
16140                                &cq_set->u.request, eq->queue_id);
16141                         break;
16142                 case 1:
16143                         bf_set(lpfc_mbx_cq_create_set_eq_id1,
16144                                &cq_set->u.request, eq->queue_id);
16145                         break;
16146                 case 2:
16147                         bf_set(lpfc_mbx_cq_create_set_eq_id2,
16148                                &cq_set->u.request, eq->queue_id);
16149                         break;
16150                 case 3:
16151                         bf_set(lpfc_mbx_cq_create_set_eq_id3,
16152                                &cq_set->u.request, eq->queue_id);
16153                         break;
16154                 case 4:
16155                         bf_set(lpfc_mbx_cq_create_set_eq_id4,
16156                                &cq_set->u.request, eq->queue_id);
16157                         break;
16158                 case 5:
16159                         bf_set(lpfc_mbx_cq_create_set_eq_id5,
16160                                &cq_set->u.request, eq->queue_id);
16161                         break;
16162                 case 6:
16163                         bf_set(lpfc_mbx_cq_create_set_eq_id6,
16164                                &cq_set->u.request, eq->queue_id);
16165                         break;
16166                 case 7:
16167                         bf_set(lpfc_mbx_cq_create_set_eq_id7,
16168                                &cq_set->u.request, eq->queue_id);
16169                         break;
16170                 case 8:
16171                         bf_set(lpfc_mbx_cq_create_set_eq_id8,
16172                                &cq_set->u.request, eq->queue_id);
16173                         break;
16174                 case 9:
16175                         bf_set(lpfc_mbx_cq_create_set_eq_id9,
16176                                &cq_set->u.request, eq->queue_id);
16177                         break;
16178                 case 10:
16179                         bf_set(lpfc_mbx_cq_create_set_eq_id10,
16180                                &cq_set->u.request, eq->queue_id);
16181                         break;
16182                 case 11:
16183                         bf_set(lpfc_mbx_cq_create_set_eq_id11,
16184                                &cq_set->u.request, eq->queue_id);
16185                         break;
16186                 case 12:
16187                         bf_set(lpfc_mbx_cq_create_set_eq_id12,
16188                                &cq_set->u.request, eq->queue_id);
16189                         break;
16190                 case 13:
16191                         bf_set(lpfc_mbx_cq_create_set_eq_id13,
16192                                &cq_set->u.request, eq->queue_id);
16193                         break;
16194                 case 14:
16195                         bf_set(lpfc_mbx_cq_create_set_eq_id14,
16196                                &cq_set->u.request, eq->queue_id);
16197                         break;
16198                 case 15:
16199                         bf_set(lpfc_mbx_cq_create_set_eq_id15,
16200                                &cq_set->u.request, eq->queue_id);
16201                         break;
16202                 }
16203
16204                 /* link the cq onto the parent eq child list */
16205                 list_add_tail(&cq->list, &eq->child_list);
16206                 /* Set up completion queue's type and subtype */
16207                 cq->type = type;
16208                 cq->subtype = subtype;
16209                 cq->assoc_qid = eq->queue_id;
16210                 cq->assoc_qp = eq;
16211                 cq->host_index = 0;
16212                 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16213                 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
16214                                          cq->entry_count);
16215                 cq->chann = idx;
16216
16217                 rc = 0;
16218                 list_for_each_entry(dmabuf, &cq->page_list, list) {
16219                         memset(dmabuf->virt, 0, hw_page_size);
16220                         cnt = page_idx + dmabuf->buffer_tag;
16221                         cq_set->u.request.page[cnt].addr_lo =
16222                                         putPaddrLow(dmabuf->phys);
16223                         cq_set->u.request.page[cnt].addr_hi =
16224                                         putPaddrHigh(dmabuf->phys);
16225                         rc++;
16226                 }
16227                 page_idx += rc;
16228         }
16229
16230         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16231
16232         /* The IOCTL status is embedded in the mailbox subheader. */
16233         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16234         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16235         if (shdr_status || shdr_add_status || rc) {
16236                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16237                                 "3119 CQ_CREATE_SET mailbox failed with "
16238                                 "status x%x add_status x%x, mbx status x%x\n",
16239                                 shdr_status, shdr_add_status, rc);
16240                 status = -ENXIO;
16241                 goto out;
16242         }
16243         rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
16244         if (rc == 0xFFFF) {
16245                 status = -ENXIO;
16246                 goto out;
16247         }
16248
16249         for (idx = 0; idx < numcq; idx++) {
16250                 cq = cqp[idx];
16251                 cq->queue_id = rc + idx;
16252                 if (cq->queue_id > phba->sli4_hba.cq_max)
16253                         phba->sli4_hba.cq_max = cq->queue_id;
16254         }
16255
16256 out:
16257         lpfc_sli4_mbox_cmd_free(phba, mbox);
16258         return status;
16259 }
16260
16261 /**
16262  * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
16263  * @phba: HBA structure that indicates port to create a queue on.
16264  * @mq: The queue structure to use to create the mailbox queue.
16265  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
16266  * @cq: The completion queue to associate with this cq.
16267  *
16268  * This function provides failback (fb) functionality when the
16269  * mq_create_ext fails on older FW generations.  It's purpose is identical
16270  * to mq_create_ext otherwise.
16271  *
16272  * This routine cannot fail as all attributes were previously accessed and
16273  * initialized in mq_create_ext.
16274  **/
16275 static void
16276 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
16277                        LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
16278 {
16279         struct lpfc_mbx_mq_create *mq_create;
16280         struct lpfc_dmabuf *dmabuf;
16281         int length;
16282
16283         length = (sizeof(struct lpfc_mbx_mq_create) -
16284                   sizeof(struct lpfc_sli4_cfg_mhdr));
16285         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16286                          LPFC_MBOX_OPCODE_MQ_CREATE,
16287                          length, LPFC_SLI4_MBX_EMBED);
16288         mq_create = &mbox->u.mqe.un.mq_create;
16289         bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
16290                mq->page_count);
16291         bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
16292                cq->queue_id);
16293         bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
16294         switch (mq->entry_count) {
16295         case 16:
16296                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16297                        LPFC_MQ_RING_SIZE_16);
16298                 break;
16299         case 32:
16300                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16301                        LPFC_MQ_RING_SIZE_32);
16302                 break;
16303         case 64:
16304                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16305                        LPFC_MQ_RING_SIZE_64);
16306                 break;
16307         case 128:
16308                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16309                        LPFC_MQ_RING_SIZE_128);
16310                 break;
16311         }
16312         list_for_each_entry(dmabuf, &mq->page_list, list) {
16313                 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16314                         putPaddrLow(dmabuf->phys);
16315                 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16316                         putPaddrHigh(dmabuf->phys);
16317         }
16318 }
16319
16320 /**
16321  * lpfc_mq_create - Create a mailbox Queue on the HBA
16322  * @phba: HBA structure that indicates port to create a queue on.
16323  * @mq: The queue structure to use to create the mailbox queue.
16324  * @cq: The completion queue to associate with this cq.
16325  * @subtype: The queue's subtype.
16326  *
16327  * This function creates a mailbox queue, as detailed in @mq, on a port,
16328  * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
16329  *
16330  * The @phba struct is used to send mailbox command to HBA. The @cq struct
16331  * is used to get the entry count and entry size that are necessary to
16332  * determine the number of pages to allocate and use for this queue. This
16333  * function will send the MQ_CREATE mailbox command to the HBA to setup the
16334  * mailbox queue. This function is asynchronous and will wait for the mailbox
16335  * command to finish before continuing.
16336  *
16337  * On success this function will return a zero. If unable to allocate enough
16338  * memory this function will return -ENOMEM. If the queue create mailbox command
16339  * fails this function will return -ENXIO.
16340  **/
16341 int32_t
16342 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
16343                struct lpfc_queue *cq, uint32_t subtype)
16344 {
16345         struct lpfc_mbx_mq_create *mq_create;
16346         struct lpfc_mbx_mq_create_ext *mq_create_ext;
16347         struct lpfc_dmabuf *dmabuf;
16348         LPFC_MBOXQ_t *mbox;
16349         int rc, length, status = 0;
16350         uint32_t shdr_status, shdr_add_status;
16351         union lpfc_sli4_cfg_shdr *shdr;
16352         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16353
16354         /* sanity check on queue memory */
16355         if (!mq || !cq)
16356                 return -ENODEV;
16357         if (!phba->sli4_hba.pc_sli4_params.supported)
16358                 hw_page_size = SLI4_PAGE_SIZE;
16359
16360         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16361         if (!mbox)
16362                 return -ENOMEM;
16363         length = (sizeof(struct lpfc_mbx_mq_create_ext) -
16364                   sizeof(struct lpfc_sli4_cfg_mhdr));
16365         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16366                          LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
16367                          length, LPFC_SLI4_MBX_EMBED);
16368
16369         mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
16370         shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
16371         bf_set(lpfc_mbx_mq_create_ext_num_pages,
16372                &mq_create_ext->u.request, mq->page_count);
16373         bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
16374                &mq_create_ext->u.request, 1);
16375         bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
16376                &mq_create_ext->u.request, 1);
16377         bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
16378                &mq_create_ext->u.request, 1);
16379         bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
16380                &mq_create_ext->u.request, 1);
16381         bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
16382                &mq_create_ext->u.request, 1);
16383         bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
16384         bf_set(lpfc_mbox_hdr_version, &shdr->request,
16385                phba->sli4_hba.pc_sli4_params.mqv);
16386         if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
16387                 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
16388                        cq->queue_id);
16389         else
16390                 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
16391                        cq->queue_id);
16392         switch (mq->entry_count) {
16393         default:
16394                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16395                                 "0362 Unsupported MQ count. (%d)\n",
16396                                 mq->entry_count);
16397                 if (mq->entry_count < 16) {
16398                         status = -EINVAL;
16399                         goto out;
16400                 }
16401                 fallthrough;    /* otherwise default to smallest count */
16402         case 16:
16403                 bf_set(lpfc_mq_context_ring_size,
16404                        &mq_create_ext->u.request.context,
16405                        LPFC_MQ_RING_SIZE_16);
16406                 break;
16407         case 32:
16408                 bf_set(lpfc_mq_context_ring_size,
16409                        &mq_create_ext->u.request.context,
16410                        LPFC_MQ_RING_SIZE_32);
16411                 break;
16412         case 64:
16413                 bf_set(lpfc_mq_context_ring_size,
16414                        &mq_create_ext->u.request.context,
16415                        LPFC_MQ_RING_SIZE_64);
16416                 break;
16417         case 128:
16418                 bf_set(lpfc_mq_context_ring_size,
16419                        &mq_create_ext->u.request.context,
16420                        LPFC_MQ_RING_SIZE_128);
16421                 break;
16422         }
16423         list_for_each_entry(dmabuf, &mq->page_list, list) {
16424                 memset(dmabuf->virt, 0, hw_page_size);
16425                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16426                                         putPaddrLow(dmabuf->phys);
16427                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
16428                                         putPaddrHigh(dmabuf->phys);
16429         }
16430         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16431         mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16432                               &mq_create_ext->u.response);
16433         if (rc != MBX_SUCCESS) {
16434                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16435                                 "2795 MQ_CREATE_EXT failed with "
16436                                 "status x%x. Failback to MQ_CREATE.\n",
16437                                 rc);
16438                 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16439                 mq_create = &mbox->u.mqe.un.mq_create;
16440                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16441                 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16442                 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16443                                       &mq_create->u.response);
16444         }
16445
16446         /* The IOCTL status is embedded in the mailbox subheader. */
16447         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16448         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16449         if (shdr_status || shdr_add_status || rc) {
16450                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16451                                 "2502 MQ_CREATE mailbox failed with "
16452                                 "status x%x add_status x%x, mbx status x%x\n",
16453                                 shdr_status, shdr_add_status, rc);
16454                 status = -ENXIO;
16455                 goto out;
16456         }
16457         if (mq->queue_id == 0xFFFF) {
16458                 status = -ENXIO;
16459                 goto out;
16460         }
16461         mq->type = LPFC_MQ;
16462         mq->assoc_qid = cq->queue_id;
16463         mq->subtype = subtype;
16464         mq->host_index = 0;
16465         mq->hba_index = 0;
16466
16467         /* link the mq onto the parent cq child list */
16468         list_add_tail(&mq->list, &cq->child_list);
16469 out:
16470         mempool_free(mbox, phba->mbox_mem_pool);
16471         return status;
16472 }
16473
16474 /**
16475  * lpfc_wq_create - Create a Work Queue on the HBA
16476  * @phba: HBA structure that indicates port to create a queue on.
16477  * @wq: The queue structure to use to create the work queue.
16478  * @cq: The completion queue to bind this work queue to.
16479  * @subtype: The subtype of the work queue indicating its functionality.
16480  *
16481  * This function creates a work queue, as detailed in @wq, on a port, described
16482  * by @phba by sending a WQ_CREATE mailbox command to the HBA.
16483  *
16484  * The @phba struct is used to send mailbox command to HBA. The @wq struct
16485  * is used to get the entry count and entry size that are necessary to
16486  * determine the number of pages to allocate and use for this queue. The @cq
16487  * is used to indicate which completion queue to bind this work queue to. This
16488  * function will send the WQ_CREATE mailbox command to the HBA to setup the
16489  * work queue. This function is asynchronous and will wait for the mailbox
16490  * command to finish before continuing.
16491  *
16492  * On success this function will return a zero. If unable to allocate enough
16493  * memory this function will return -ENOMEM. If the queue create mailbox command
16494  * fails this function will return -ENXIO.
16495  **/
16496 int
16497 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16498                struct lpfc_queue *cq, uint32_t subtype)
16499 {
16500         struct lpfc_mbx_wq_create *wq_create;
16501         struct lpfc_dmabuf *dmabuf;
16502         LPFC_MBOXQ_t *mbox;
16503         int rc, length, status = 0;
16504         uint32_t shdr_status, shdr_add_status;
16505         union lpfc_sli4_cfg_shdr *shdr;
16506         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16507         struct dma_address *page;
16508         void __iomem *bar_memmap_p;
16509         uint32_t db_offset;
16510         uint16_t pci_barset;
16511         uint8_t dpp_barset;
16512         uint32_t dpp_offset;
16513         uint8_t wq_create_version;
16514 #ifdef CONFIG_X86
16515         unsigned long pg_addr;
16516 #endif
16517
16518         /* sanity check on queue memory */
16519         if (!wq || !cq)
16520                 return -ENODEV;
16521         if (!phba->sli4_hba.pc_sli4_params.supported)
16522                 hw_page_size = wq->page_size;
16523
16524         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16525         if (!mbox)
16526                 return -ENOMEM;
16527         length = (sizeof(struct lpfc_mbx_wq_create) -
16528                   sizeof(struct lpfc_sli4_cfg_mhdr));
16529         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16530                          LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16531                          length, LPFC_SLI4_MBX_EMBED);
16532         wq_create = &mbox->u.mqe.un.wq_create;
16533         shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
16534         bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16535                     wq->page_count);
16536         bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16537                     cq->queue_id);
16538
16539         /* wqv is the earliest version supported, NOT the latest */
16540         bf_set(lpfc_mbox_hdr_version, &shdr->request,
16541                phba->sli4_hba.pc_sli4_params.wqv);
16542
16543         if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16544             (wq->page_size > SLI4_PAGE_SIZE))
16545                 wq_create_version = LPFC_Q_CREATE_VERSION_1;
16546         else
16547                 wq_create_version = LPFC_Q_CREATE_VERSION_0;
16548
16549         switch (wq_create_version) {
16550         case LPFC_Q_CREATE_VERSION_1:
16551                 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16552                        wq->entry_count);
16553                 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16554                        LPFC_Q_CREATE_VERSION_1);
16555
16556                 switch (wq->entry_size) {
16557                 default:
16558                 case 64:
16559                         bf_set(lpfc_mbx_wq_create_wqe_size,
16560                                &wq_create->u.request_1,
16561                                LPFC_WQ_WQE_SIZE_64);
16562                         break;
16563                 case 128:
16564                         bf_set(lpfc_mbx_wq_create_wqe_size,
16565                                &wq_create->u.request_1,
16566                                LPFC_WQ_WQE_SIZE_128);
16567                         break;
16568                 }
16569                 /* Request DPP by default */
16570                 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
16571                 bf_set(lpfc_mbx_wq_create_page_size,
16572                        &wq_create->u.request_1,
16573                        (wq->page_size / SLI4_PAGE_SIZE));
16574                 page = wq_create->u.request_1.page;
16575                 break;
16576         default:
16577                 page = wq_create->u.request.page;
16578                 break;
16579         }
16580
16581         list_for_each_entry(dmabuf, &wq->page_list, list) {
16582                 memset(dmabuf->virt, 0, hw_page_size);
16583                 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
16584                 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
16585         }
16586
16587         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16588                 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
16589
16590         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16591         /* The IOCTL status is embedded in the mailbox subheader. */
16592         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16593         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16594         if (shdr_status || shdr_add_status || rc) {
16595                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16596                                 "2503 WQ_CREATE mailbox failed with "
16597                                 "status x%x add_status x%x, mbx status x%x\n",
16598                                 shdr_status, shdr_add_status, rc);
16599                 status = -ENXIO;
16600                 goto out;
16601         }
16602
16603         if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
16604                 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
16605                                         &wq_create->u.response);
16606         else
16607                 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
16608                                         &wq_create->u.response_1);
16609
16610         if (wq->queue_id == 0xFFFF) {
16611                 status = -ENXIO;
16612                 goto out;
16613         }
16614
16615         wq->db_format = LPFC_DB_LIST_FORMAT;
16616         if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
16617                 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16618                         wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
16619                                                &wq_create->u.response);
16620                         if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
16621                             (wq->db_format != LPFC_DB_RING_FORMAT)) {
16622                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16623                                                 "3265 WQ[%d] doorbell format "
16624                                                 "not supported: x%x\n",
16625                                                 wq->queue_id, wq->db_format);
16626                                 status = -EINVAL;
16627                                 goto out;
16628                         }
16629                         pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
16630                                             &wq_create->u.response);
16631                         bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16632                                                                    pci_barset);
16633                         if (!bar_memmap_p) {
16634                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16635                                                 "3263 WQ[%d] failed to memmap "
16636                                                 "pci barset:x%x\n",
16637                                                 wq->queue_id, pci_barset);
16638                                 status = -ENOMEM;
16639                                 goto out;
16640                         }
16641                         db_offset = wq_create->u.response.doorbell_offset;
16642                         if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
16643                             (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
16644                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16645                                                 "3252 WQ[%d] doorbell offset "
16646                                                 "not supported: x%x\n",
16647                                                 wq->queue_id, db_offset);
16648                                 status = -EINVAL;
16649                                 goto out;
16650                         }
16651                         wq->db_regaddr = bar_memmap_p + db_offset;
16652                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16653                                         "3264 WQ[%d]: barset:x%x, offset:x%x, "
16654                                         "format:x%x\n", wq->queue_id,
16655                                         pci_barset, db_offset, wq->db_format);
16656                 } else
16657                         wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16658         } else {
16659                 /* Check if DPP was honored by the firmware */
16660                 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
16661                                     &wq_create->u.response_1);
16662                 if (wq->dpp_enable) {
16663                         pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
16664                                             &wq_create->u.response_1);
16665                         bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16666                                                                    pci_barset);
16667                         if (!bar_memmap_p) {
16668                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16669                                                 "3267 WQ[%d] failed to memmap "
16670                                                 "pci barset:x%x\n",
16671                                                 wq->queue_id, pci_barset);
16672                                 status = -ENOMEM;
16673                                 goto out;
16674                         }
16675                         db_offset = wq_create->u.response_1.doorbell_offset;
16676                         wq->db_regaddr = bar_memmap_p + db_offset;
16677                         wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
16678                                             &wq_create->u.response_1);
16679                         dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
16680                                             &wq_create->u.response_1);
16681                         bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16682                                                                    dpp_barset);
16683                         if (!bar_memmap_p) {
16684                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16685                                                 "3268 WQ[%d] failed to memmap "
16686                                                 "pci barset:x%x\n",
16687                                                 wq->queue_id, dpp_barset);
16688                                 status = -ENOMEM;
16689                                 goto out;
16690                         }
16691                         dpp_offset = wq_create->u.response_1.dpp_offset;
16692                         wq->dpp_regaddr = bar_memmap_p + dpp_offset;
16693                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16694                                         "3271 WQ[%d]: barset:x%x, offset:x%x, "
16695                                         "dpp_id:x%x dpp_barset:x%x "
16696                                         "dpp_offset:x%x\n",
16697                                         wq->queue_id, pci_barset, db_offset,
16698                                         wq->dpp_id, dpp_barset, dpp_offset);
16699
16700 #ifdef CONFIG_X86
16701                         /* Enable combined writes for DPP aperture */
16702                         pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
16703                         rc = set_memory_wc(pg_addr, 1);
16704                         if (rc) {
16705                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16706                                         "3272 Cannot setup Combined "
16707                                         "Write on WQ[%d] - disable DPP\n",
16708                                         wq->queue_id);
16709                                 phba->cfg_enable_dpp = 0;
16710                         }
16711 #else
16712                         phba->cfg_enable_dpp = 0;
16713 #endif
16714                 } else
16715                         wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16716         }
16717         wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
16718         if (wq->pring == NULL) {
16719                 status = -ENOMEM;
16720                 goto out;
16721         }
16722         wq->type = LPFC_WQ;
16723         wq->assoc_qid = cq->queue_id;
16724         wq->subtype = subtype;
16725         wq->host_index = 0;
16726         wq->hba_index = 0;
16727         wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
16728
16729         /* link the wq onto the parent cq child list */
16730         list_add_tail(&wq->list, &cq->child_list);
16731 out:
16732         mempool_free(mbox, phba->mbox_mem_pool);
16733         return status;
16734 }
16735
16736 /**
16737  * lpfc_rq_create - Create a Receive Queue on the HBA
16738  * @phba: HBA structure that indicates port to create a queue on.
16739  * @hrq: The queue structure to use to create the header receive queue.
16740  * @drq: The queue structure to use to create the data receive queue.
16741  * @cq: The completion queue to bind this work queue to.
16742  * @subtype: The subtype of the work queue indicating its functionality.
16743  *
16744  * This function creates a receive buffer queue pair , as detailed in @hrq and
16745  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16746  * to the HBA.
16747  *
16748  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16749  * struct is used to get the entry count that is necessary to determine the
16750  * number of pages to use for this queue. The @cq is used to indicate which
16751  * completion queue to bind received buffers that are posted to these queues to.
16752  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16753  * receive queue pair. This function is asynchronous and will wait for the
16754  * mailbox command to finish before continuing.
16755  *
16756  * On success this function will return a zero. If unable to allocate enough
16757  * memory this function will return -ENOMEM. If the queue create mailbox command
16758  * fails this function will return -ENXIO.
16759  **/
16760 int
16761 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16762                struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
16763 {
16764         struct lpfc_mbx_rq_create *rq_create;
16765         struct lpfc_dmabuf *dmabuf;
16766         LPFC_MBOXQ_t *mbox;
16767         int rc, length, status = 0;
16768         uint32_t shdr_status, shdr_add_status;
16769         union lpfc_sli4_cfg_shdr *shdr;
16770         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16771         void __iomem *bar_memmap_p;
16772         uint32_t db_offset;
16773         uint16_t pci_barset;
16774
16775         /* sanity check on queue memory */
16776         if (!hrq || !drq || !cq)
16777                 return -ENODEV;
16778         if (!phba->sli4_hba.pc_sli4_params.supported)
16779                 hw_page_size = SLI4_PAGE_SIZE;
16780
16781         if (hrq->entry_count != drq->entry_count)
16782                 return -EINVAL;
16783         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16784         if (!mbox)
16785                 return -ENOMEM;
16786         length = (sizeof(struct lpfc_mbx_rq_create) -
16787                   sizeof(struct lpfc_sli4_cfg_mhdr));
16788         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16789                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16790                          length, LPFC_SLI4_MBX_EMBED);
16791         rq_create = &mbox->u.mqe.un.rq_create;
16792         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16793         bf_set(lpfc_mbox_hdr_version, &shdr->request,
16794                phba->sli4_hba.pc_sli4_params.rqv);
16795         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16796                 bf_set(lpfc_rq_context_rqe_count_1,
16797                        &rq_create->u.request.context,
16798                        hrq->entry_count);
16799                 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
16800                 bf_set(lpfc_rq_context_rqe_size,
16801                        &rq_create->u.request.context,
16802                        LPFC_RQE_SIZE_8);
16803                 bf_set(lpfc_rq_context_page_size,
16804                        &rq_create->u.request.context,
16805                        LPFC_RQ_PAGE_SIZE_4096);
16806         } else {
16807                 switch (hrq->entry_count) {
16808                 default:
16809                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16810                                         "2535 Unsupported RQ count. (%d)\n",
16811                                         hrq->entry_count);
16812                         if (hrq->entry_count < 512) {
16813                                 status = -EINVAL;
16814                                 goto out;
16815                         }
16816                         fallthrough;    /* otherwise default to smallest count */
16817                 case 512:
16818                         bf_set(lpfc_rq_context_rqe_count,
16819                                &rq_create->u.request.context,
16820                                LPFC_RQ_RING_SIZE_512);
16821                         break;
16822                 case 1024:
16823                         bf_set(lpfc_rq_context_rqe_count,
16824                                &rq_create->u.request.context,
16825                                LPFC_RQ_RING_SIZE_1024);
16826                         break;
16827                 case 2048:
16828                         bf_set(lpfc_rq_context_rqe_count,
16829                                &rq_create->u.request.context,
16830                                LPFC_RQ_RING_SIZE_2048);
16831                         break;
16832                 case 4096:
16833                         bf_set(lpfc_rq_context_rqe_count,
16834                                &rq_create->u.request.context,
16835                                LPFC_RQ_RING_SIZE_4096);
16836                         break;
16837                 }
16838                 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
16839                        LPFC_HDR_BUF_SIZE);
16840         }
16841         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16842                cq->queue_id);
16843         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16844                hrq->page_count);
16845         list_for_each_entry(dmabuf, &hrq->page_list, list) {
16846                 memset(dmabuf->virt, 0, hw_page_size);
16847                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16848                                         putPaddrLow(dmabuf->phys);
16849                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16850                                         putPaddrHigh(dmabuf->phys);
16851         }
16852         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16853                 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16854
16855         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16856         /* The IOCTL status is embedded in the mailbox subheader. */
16857         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16858         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16859         if (shdr_status || shdr_add_status || rc) {
16860                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16861                                 "2504 RQ_CREATE mailbox failed with "
16862                                 "status x%x add_status x%x, mbx status x%x\n",
16863                                 shdr_status, shdr_add_status, rc);
16864                 status = -ENXIO;
16865                 goto out;
16866         }
16867         hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16868         if (hrq->queue_id == 0xFFFF) {
16869                 status = -ENXIO;
16870                 goto out;
16871         }
16872
16873         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16874                 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
16875                                         &rq_create->u.response);
16876                 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
16877                     (hrq->db_format != LPFC_DB_RING_FORMAT)) {
16878                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16879                                         "3262 RQ [%d] doorbell format not "
16880                                         "supported: x%x\n", hrq->queue_id,
16881                                         hrq->db_format);
16882                         status = -EINVAL;
16883                         goto out;
16884                 }
16885
16886                 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
16887                                     &rq_create->u.response);
16888                 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
16889                 if (!bar_memmap_p) {
16890                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16891                                         "3269 RQ[%d] failed to memmap pci "
16892                                         "barset:x%x\n", hrq->queue_id,
16893                                         pci_barset);
16894                         status = -ENOMEM;
16895                         goto out;
16896                 }
16897
16898                 db_offset = rq_create->u.response.doorbell_offset;
16899                 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
16900                     (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
16901                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16902                                         "3270 RQ[%d] doorbell offset not "
16903                                         "supported: x%x\n", hrq->queue_id,
16904                                         db_offset);
16905                         status = -EINVAL;
16906                         goto out;
16907                 }
16908                 hrq->db_regaddr = bar_memmap_p + db_offset;
16909                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16910                                 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
16911                                 "format:x%x\n", hrq->queue_id, pci_barset,
16912                                 db_offset, hrq->db_format);
16913         } else {
16914                 hrq->db_format = LPFC_DB_RING_FORMAT;
16915                 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16916         }
16917         hrq->type = LPFC_HRQ;
16918         hrq->assoc_qid = cq->queue_id;
16919         hrq->subtype = subtype;
16920         hrq->host_index = 0;
16921         hrq->hba_index = 0;
16922         hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16923
16924         /* now create the data queue */
16925         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16926                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16927                          length, LPFC_SLI4_MBX_EMBED);
16928         bf_set(lpfc_mbox_hdr_version, &shdr->request,
16929                phba->sli4_hba.pc_sli4_params.rqv);
16930         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16931                 bf_set(lpfc_rq_context_rqe_count_1,
16932                        &rq_create->u.request.context, hrq->entry_count);
16933                 if (subtype == LPFC_NVMET)
16934                         rq_create->u.request.context.buffer_size =
16935                                 LPFC_NVMET_DATA_BUF_SIZE;
16936                 else
16937                         rq_create->u.request.context.buffer_size =
16938                                 LPFC_DATA_BUF_SIZE;
16939                 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
16940                        LPFC_RQE_SIZE_8);
16941                 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
16942                        (PAGE_SIZE/SLI4_PAGE_SIZE));
16943         } else {
16944                 switch (drq->entry_count) {
16945                 default:
16946                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16947                                         "2536 Unsupported RQ count. (%d)\n",
16948                                         drq->entry_count);
16949                         if (drq->entry_count < 512) {
16950                                 status = -EINVAL;
16951                                 goto out;
16952                         }
16953                         fallthrough;    /* otherwise default to smallest count */
16954                 case 512:
16955                         bf_set(lpfc_rq_context_rqe_count,
16956                                &rq_create->u.request.context,
16957                                LPFC_RQ_RING_SIZE_512);
16958                         break;
16959                 case 1024:
16960                         bf_set(lpfc_rq_context_rqe_count,
16961                                &rq_create->u.request.context,
16962                                LPFC_RQ_RING_SIZE_1024);
16963                         break;
16964                 case 2048:
16965                         bf_set(lpfc_rq_context_rqe_count,
16966                                &rq_create->u.request.context,
16967                                LPFC_RQ_RING_SIZE_2048);
16968                         break;
16969                 case 4096:
16970                         bf_set(lpfc_rq_context_rqe_count,
16971                                &rq_create->u.request.context,
16972                                LPFC_RQ_RING_SIZE_4096);
16973                         break;
16974                 }
16975                 if (subtype == LPFC_NVMET)
16976                         bf_set(lpfc_rq_context_buf_size,
16977                                &rq_create->u.request.context,
16978                                LPFC_NVMET_DATA_BUF_SIZE);
16979                 else
16980                         bf_set(lpfc_rq_context_buf_size,
16981                                &rq_create->u.request.context,
16982                                LPFC_DATA_BUF_SIZE);
16983         }
16984         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16985                cq->queue_id);
16986         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16987                drq->page_count);
16988         list_for_each_entry(dmabuf, &drq->page_list, list) {
16989                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16990                                         putPaddrLow(dmabuf->phys);
16991                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16992                                         putPaddrHigh(dmabuf->phys);
16993         }
16994         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16995                 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16996         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16997         /* The IOCTL status is embedded in the mailbox subheader. */
16998         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16999         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17000         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17001         if (shdr_status || shdr_add_status || rc) {
17002                 status = -ENXIO;
17003                 goto out;
17004         }
17005         drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17006         if (drq->queue_id == 0xFFFF) {
17007                 status = -ENXIO;
17008                 goto out;
17009         }
17010         drq->type = LPFC_DRQ;
17011         drq->assoc_qid = cq->queue_id;
17012         drq->subtype = subtype;
17013         drq->host_index = 0;
17014         drq->hba_index = 0;
17015         drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17016
17017         /* link the header and data RQs onto the parent cq child list */
17018         list_add_tail(&hrq->list, &cq->child_list);
17019         list_add_tail(&drq->list, &cq->child_list);
17020
17021 out:
17022         mempool_free(mbox, phba->mbox_mem_pool);
17023         return status;
17024 }
17025
17026 /**
17027  * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
17028  * @phba: HBA structure that indicates port to create a queue on.
17029  * @hrqp: The queue structure array to use to create the header receive queues.
17030  * @drqp: The queue structure array to use to create the data receive queues.
17031  * @cqp: The completion queue array to bind these receive queues to.
17032  * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
17033  *
17034  * This function creates a receive buffer queue pair , as detailed in @hrq and
17035  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17036  * to the HBA.
17037  *
17038  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17039  * struct is used to get the entry count that is necessary to determine the
17040  * number of pages to use for this queue. The @cq is used to indicate which
17041  * completion queue to bind received buffers that are posted to these queues to.
17042  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17043  * receive queue pair. This function is asynchronous and will wait for the
17044  * mailbox command to finish before continuing.
17045  *
17046  * On success this function will return a zero. If unable to allocate enough
17047  * memory this function will return -ENOMEM. If the queue create mailbox command
17048  * fails this function will return -ENXIO.
17049  **/
17050 int
17051 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
17052                 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
17053                 uint32_t subtype)
17054 {
17055         struct lpfc_queue *hrq, *drq, *cq;
17056         struct lpfc_mbx_rq_create_v2 *rq_create;
17057         struct lpfc_dmabuf *dmabuf;
17058         LPFC_MBOXQ_t *mbox;
17059         int rc, length, alloclen, status = 0;
17060         int cnt, idx, numrq, page_idx = 0;
17061         uint32_t shdr_status, shdr_add_status;
17062         union lpfc_sli4_cfg_shdr *shdr;
17063         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17064
17065         numrq = phba->cfg_nvmet_mrq;
17066         /* sanity check on array memory */
17067         if (!hrqp || !drqp || !cqp || !numrq)
17068                 return -ENODEV;
17069         if (!phba->sli4_hba.pc_sli4_params.supported)
17070                 hw_page_size = SLI4_PAGE_SIZE;
17071
17072         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17073         if (!mbox)
17074                 return -ENOMEM;
17075
17076         length = sizeof(struct lpfc_mbx_rq_create_v2);
17077         length += ((2 * numrq * hrqp[0]->page_count) *
17078                    sizeof(struct dma_address));
17079
17080         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17081                                     LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
17082                                     LPFC_SLI4_MBX_NEMBED);
17083         if (alloclen < length) {
17084                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17085                                 "3099 Allocated DMA memory size (%d) is "
17086                                 "less than the requested DMA memory size "
17087                                 "(%d)\n", alloclen, length);
17088                 status = -ENOMEM;
17089                 goto out;
17090         }
17091
17092
17093
17094         rq_create = mbox->sge_array->addr[0];
17095         shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
17096
17097         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
17098         cnt = 0;
17099
17100         for (idx = 0; idx < numrq; idx++) {
17101                 hrq = hrqp[idx];
17102                 drq = drqp[idx];
17103                 cq  = cqp[idx];
17104
17105                 /* sanity check on queue memory */
17106                 if (!hrq || !drq || !cq) {
17107                         status = -ENODEV;
17108                         goto out;
17109                 }
17110
17111                 if (hrq->entry_count != drq->entry_count) {
17112                         status = -EINVAL;
17113                         goto out;
17114                 }
17115
17116                 if (idx == 0) {
17117                         bf_set(lpfc_mbx_rq_create_num_pages,
17118                                &rq_create->u.request,
17119                                hrq->page_count);
17120                         bf_set(lpfc_mbx_rq_create_rq_cnt,
17121                                &rq_create->u.request, (numrq * 2));
17122                         bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
17123                                1);
17124                         bf_set(lpfc_rq_context_base_cq,
17125                                &rq_create->u.request.context,
17126                                cq->queue_id);
17127                         bf_set(lpfc_rq_context_data_size,
17128                                &rq_create->u.request.context,
17129                                LPFC_NVMET_DATA_BUF_SIZE);
17130                         bf_set(lpfc_rq_context_hdr_size,
17131                                &rq_create->u.request.context,
17132                                LPFC_HDR_BUF_SIZE);
17133                         bf_set(lpfc_rq_context_rqe_count_1,
17134                                &rq_create->u.request.context,
17135                                hrq->entry_count);
17136                         bf_set(lpfc_rq_context_rqe_size,
17137                                &rq_create->u.request.context,
17138                                LPFC_RQE_SIZE_8);
17139                         bf_set(lpfc_rq_context_page_size,
17140                                &rq_create->u.request.context,
17141                                (PAGE_SIZE/SLI4_PAGE_SIZE));
17142                 }
17143                 rc = 0;
17144                 list_for_each_entry(dmabuf, &hrq->page_list, list) {
17145                         memset(dmabuf->virt, 0, hw_page_size);
17146                         cnt = page_idx + dmabuf->buffer_tag;
17147                         rq_create->u.request.page[cnt].addr_lo =
17148                                         putPaddrLow(dmabuf->phys);
17149                         rq_create->u.request.page[cnt].addr_hi =
17150                                         putPaddrHigh(dmabuf->phys);
17151                         rc++;
17152                 }
17153                 page_idx += rc;
17154
17155                 rc = 0;
17156                 list_for_each_entry(dmabuf, &drq->page_list, list) {
17157                         memset(dmabuf->virt, 0, hw_page_size);
17158                         cnt = page_idx + dmabuf->buffer_tag;
17159                         rq_create->u.request.page[cnt].addr_lo =
17160                                         putPaddrLow(dmabuf->phys);
17161                         rq_create->u.request.page[cnt].addr_hi =
17162                                         putPaddrHigh(dmabuf->phys);
17163                         rc++;
17164                 }
17165                 page_idx += rc;
17166
17167                 hrq->db_format = LPFC_DB_RING_FORMAT;
17168                 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17169                 hrq->type = LPFC_HRQ;
17170                 hrq->assoc_qid = cq->queue_id;
17171                 hrq->subtype = subtype;
17172                 hrq->host_index = 0;
17173                 hrq->hba_index = 0;
17174                 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17175
17176                 drq->db_format = LPFC_DB_RING_FORMAT;
17177                 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17178                 drq->type = LPFC_DRQ;
17179                 drq->assoc_qid = cq->queue_id;
17180                 drq->subtype = subtype;
17181                 drq->host_index = 0;
17182                 drq->hba_index = 0;
17183                 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17184
17185                 list_add_tail(&hrq->list, &cq->child_list);
17186                 list_add_tail(&drq->list, &cq->child_list);
17187         }
17188
17189         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17190         /* The IOCTL status is embedded in the mailbox subheader. */
17191         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17192         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17193         if (shdr_status || shdr_add_status || rc) {
17194                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17195                                 "3120 RQ_CREATE mailbox failed with "
17196                                 "status x%x add_status x%x, mbx status x%x\n",
17197                                 shdr_status, shdr_add_status, rc);
17198                 status = -ENXIO;
17199                 goto out;
17200         }
17201         rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17202         if (rc == 0xFFFF) {
17203                 status = -ENXIO;
17204                 goto out;
17205         }
17206
17207         /* Initialize all RQs with associated queue id */
17208         for (idx = 0; idx < numrq; idx++) {
17209                 hrq = hrqp[idx];
17210                 hrq->queue_id = rc + (2 * idx);
17211                 drq = drqp[idx];
17212                 drq->queue_id = rc + (2 * idx) + 1;
17213         }
17214
17215 out:
17216         lpfc_sli4_mbox_cmd_free(phba, mbox);
17217         return status;
17218 }
17219
17220 /**
17221  * lpfc_eq_destroy - Destroy an event Queue on the HBA
17222  * @phba: HBA structure that indicates port to destroy a queue on.
17223  * @eq: The queue structure associated with the queue to destroy.
17224  *
17225  * This function destroys a queue, as detailed in @eq by sending an mailbox
17226  * command, specific to the type of queue, to the HBA.
17227  *
17228  * The @eq struct is used to get the queue ID of the queue to destroy.
17229  *
17230  * On success this function will return a zero. If the queue destroy mailbox
17231  * command fails this function will return -ENXIO.
17232  **/
17233 int
17234 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
17235 {
17236         LPFC_MBOXQ_t *mbox;
17237         int rc, length, status = 0;
17238         uint32_t shdr_status, shdr_add_status;
17239         union lpfc_sli4_cfg_shdr *shdr;
17240
17241         /* sanity check on queue memory */
17242         if (!eq)
17243                 return -ENODEV;
17244
17245         mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
17246         if (!mbox)
17247                 return -ENOMEM;
17248         length = (sizeof(struct lpfc_mbx_eq_destroy) -
17249                   sizeof(struct lpfc_sli4_cfg_mhdr));
17250         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17251                          LPFC_MBOX_OPCODE_EQ_DESTROY,
17252                          length, LPFC_SLI4_MBX_EMBED);
17253         bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
17254                eq->queue_id);
17255         mbox->vport = eq->phba->pport;
17256         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17257
17258         rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
17259         /* The IOCTL status is embedded in the mailbox subheader. */
17260         shdr = (union lpfc_sli4_cfg_shdr *)
17261                 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
17262         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17263         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17264         if (shdr_status || shdr_add_status || rc) {
17265                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17266                                 "2505 EQ_DESTROY mailbox failed with "
17267                                 "status x%x add_status x%x, mbx status x%x\n",
17268                                 shdr_status, shdr_add_status, rc);
17269                 status = -ENXIO;
17270         }
17271
17272         /* Remove eq from any list */
17273         list_del_init(&eq->list);
17274         mempool_free(mbox, eq->phba->mbox_mem_pool);
17275         return status;
17276 }
17277
17278 /**
17279  * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
17280  * @phba: HBA structure that indicates port to destroy a queue on.
17281  * @cq: The queue structure associated with the queue to destroy.
17282  *
17283  * This function destroys a queue, as detailed in @cq by sending an mailbox
17284  * command, specific to the type of queue, to the HBA.
17285  *
17286  * The @cq struct is used to get the queue ID of the queue to destroy.
17287  *
17288  * On success this function will return a zero. If the queue destroy mailbox
17289  * command fails this function will return -ENXIO.
17290  **/
17291 int
17292 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
17293 {
17294         LPFC_MBOXQ_t *mbox;
17295         int rc, length, status = 0;
17296         uint32_t shdr_status, shdr_add_status;
17297         union lpfc_sli4_cfg_shdr *shdr;
17298
17299         /* sanity check on queue memory */
17300         if (!cq)
17301                 return -ENODEV;
17302         mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
17303         if (!mbox)
17304                 return -ENOMEM;
17305         length = (sizeof(struct lpfc_mbx_cq_destroy) -
17306                   sizeof(struct lpfc_sli4_cfg_mhdr));
17307         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17308                          LPFC_MBOX_OPCODE_CQ_DESTROY,
17309                          length, LPFC_SLI4_MBX_EMBED);
17310         bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
17311                cq->queue_id);
17312         mbox->vport = cq->phba->pport;
17313         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17314         rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
17315         /* The IOCTL status is embedded in the mailbox subheader. */
17316         shdr = (union lpfc_sli4_cfg_shdr *)
17317                 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
17318         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17319         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17320         if (shdr_status || shdr_add_status || rc) {
17321                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17322                                 "2506 CQ_DESTROY mailbox failed with "
17323                                 "status x%x add_status x%x, mbx status x%x\n",
17324                                 shdr_status, shdr_add_status, rc);
17325                 status = -ENXIO;
17326         }
17327         /* Remove cq from any list */
17328         list_del_init(&cq->list);
17329         mempool_free(mbox, cq->phba->mbox_mem_pool);
17330         return status;
17331 }
17332
17333 /**
17334  * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
17335  * @phba: HBA structure that indicates port to destroy a queue on.
17336  * @mq: The queue structure associated with the queue to destroy.
17337  *
17338  * This function destroys a queue, as detailed in @mq by sending an mailbox
17339  * command, specific to the type of queue, to the HBA.
17340  *
17341  * The @mq struct is used to get the queue ID of the queue to destroy.
17342  *
17343  * On success this function will return a zero. If the queue destroy mailbox
17344  * command fails this function will return -ENXIO.
17345  **/
17346 int
17347 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
17348 {
17349         LPFC_MBOXQ_t *mbox;
17350         int rc, length, status = 0;
17351         uint32_t shdr_status, shdr_add_status;
17352         union lpfc_sli4_cfg_shdr *shdr;
17353
17354         /* sanity check on queue memory */
17355         if (!mq)
17356                 return -ENODEV;
17357         mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
17358         if (!mbox)
17359                 return -ENOMEM;
17360         length = (sizeof(struct lpfc_mbx_mq_destroy) -
17361                   sizeof(struct lpfc_sli4_cfg_mhdr));
17362         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17363                          LPFC_MBOX_OPCODE_MQ_DESTROY,
17364                          length, LPFC_SLI4_MBX_EMBED);
17365         bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
17366                mq->queue_id);
17367         mbox->vport = mq->phba->pport;
17368         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17369         rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
17370         /* The IOCTL status is embedded in the mailbox subheader. */
17371         shdr = (union lpfc_sli4_cfg_shdr *)
17372                 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
17373         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17374         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17375         if (shdr_status || shdr_add_status || rc) {
17376                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17377                                 "2507 MQ_DESTROY mailbox failed with "
17378                                 "status x%x add_status x%x, mbx status x%x\n",
17379                                 shdr_status, shdr_add_status, rc);
17380                 status = -ENXIO;
17381         }
17382         /* Remove mq from any list */
17383         list_del_init(&mq->list);
17384         mempool_free(mbox, mq->phba->mbox_mem_pool);
17385         return status;
17386 }
17387
17388 /**
17389  * lpfc_wq_destroy - Destroy a Work Queue on the HBA
17390  * @phba: HBA structure that indicates port to destroy a queue on.
17391  * @wq: The queue structure associated with the queue to destroy.
17392  *
17393  * This function destroys a queue, as detailed in @wq by sending an mailbox
17394  * command, specific to the type of queue, to the HBA.
17395  *
17396  * The @wq struct is used to get the queue ID of the queue to destroy.
17397  *
17398  * On success this function will return a zero. If the queue destroy mailbox
17399  * command fails this function will return -ENXIO.
17400  **/
17401 int
17402 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
17403 {
17404         LPFC_MBOXQ_t *mbox;
17405         int rc, length, status = 0;
17406         uint32_t shdr_status, shdr_add_status;
17407         union lpfc_sli4_cfg_shdr *shdr;
17408
17409         /* sanity check on queue memory */
17410         if (!wq)
17411                 return -ENODEV;
17412         mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
17413         if (!mbox)
17414                 return -ENOMEM;
17415         length = (sizeof(struct lpfc_mbx_wq_destroy) -
17416                   sizeof(struct lpfc_sli4_cfg_mhdr));
17417         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17418                          LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17419                          length, LPFC_SLI4_MBX_EMBED);
17420         bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17421                wq->queue_id);
17422         mbox->vport = wq->phba->pport;
17423         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17424         rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17425         shdr = (union lpfc_sli4_cfg_shdr *)
17426                 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17427         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17428         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17429         if (shdr_status || shdr_add_status || rc) {
17430                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17431                                 "2508 WQ_DESTROY mailbox failed with "
17432                                 "status x%x add_status x%x, mbx status x%x\n",
17433                                 shdr_status, shdr_add_status, rc);
17434                 status = -ENXIO;
17435         }
17436         /* Remove wq from any list */
17437         list_del_init(&wq->list);
17438         kfree(wq->pring);
17439         wq->pring = NULL;
17440         mempool_free(mbox, wq->phba->mbox_mem_pool);
17441         return status;
17442 }
17443
17444 /**
17445  * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
17446  * @phba: HBA structure that indicates port to destroy a queue on.
17447  * @hrq: The queue structure associated with the queue to destroy.
17448  * @drq: The queue structure associated with the queue to destroy.
17449  *
17450  * This function destroys a queue, as detailed in @rq by sending an mailbox
17451  * command, specific to the type of queue, to the HBA.
17452  *
17453  * The @rq struct is used to get the queue ID of the queue to destroy.
17454  *
17455  * On success this function will return a zero. If the queue destroy mailbox
17456  * command fails this function will return -ENXIO.
17457  **/
17458 int
17459 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17460                 struct lpfc_queue *drq)
17461 {
17462         LPFC_MBOXQ_t *mbox;
17463         int rc, length, status = 0;
17464         uint32_t shdr_status, shdr_add_status;
17465         union lpfc_sli4_cfg_shdr *shdr;
17466
17467         /* sanity check on queue memory */
17468         if (!hrq || !drq)
17469                 return -ENODEV;
17470         mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17471         if (!mbox)
17472                 return -ENOMEM;
17473         length = (sizeof(struct lpfc_mbx_rq_destroy) -
17474                   sizeof(struct lpfc_sli4_cfg_mhdr));
17475         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17476                          LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17477                          length, LPFC_SLI4_MBX_EMBED);
17478         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17479                hrq->queue_id);
17480         mbox->vport = hrq->phba->pport;
17481         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17482         rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17483         /* The IOCTL status is embedded in the mailbox subheader. */
17484         shdr = (union lpfc_sli4_cfg_shdr *)
17485                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17486         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17487         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17488         if (shdr_status || shdr_add_status || rc) {
17489                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17490                                 "2509 RQ_DESTROY mailbox failed with "
17491                                 "status x%x add_status x%x, mbx status x%x\n",
17492                                 shdr_status, shdr_add_status, rc);
17493                 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17494                 return -ENXIO;
17495         }
17496         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17497                drq->queue_id);
17498         rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17499         shdr = (union lpfc_sli4_cfg_shdr *)
17500                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17501         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17502         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17503         if (shdr_status || shdr_add_status || rc) {
17504                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17505                                 "2510 RQ_DESTROY mailbox failed with "
17506                                 "status x%x add_status x%x, mbx status x%x\n",
17507                                 shdr_status, shdr_add_status, rc);
17508                 status = -ENXIO;
17509         }
17510         list_del_init(&hrq->list);
17511         list_del_init(&drq->list);
17512         mempool_free(mbox, hrq->phba->mbox_mem_pool);
17513         return status;
17514 }
17515
17516 /**
17517  * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
17518  * @phba: The virtual port for which this call being executed.
17519  * @pdma_phys_addr0: Physical address of the 1st SGL page.
17520  * @pdma_phys_addr1: Physical address of the 2nd SGL page.
17521  * @xritag: the xritag that ties this io to the SGL pages.
17522  *
17523  * This routine will post the sgl pages for the IO that has the xritag
17524  * that is in the iocbq structure. The xritag is assigned during iocbq
17525  * creation and persists for as long as the driver is loaded.
17526  * if the caller has fewer than 256 scatter gather segments to map then
17527  * pdma_phys_addr1 should be 0.
17528  * If the caller needs to map more than 256 scatter gather segment then
17529  * pdma_phys_addr1 should be a valid physical address.
17530  * physical address for SGLs must be 64 byte aligned.
17531  * If you are going to map 2 SGL's then the first one must have 256 entries
17532  * the second sgl can have between 1 and 256 entries.
17533  *
17534  * Return codes:
17535  *      0 - Success
17536  *      -ENXIO, -ENOMEM - Failure
17537  **/
17538 int
17539 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
17540                 dma_addr_t pdma_phys_addr0,
17541                 dma_addr_t pdma_phys_addr1,
17542                 uint16_t xritag)
17543 {
17544         struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
17545         LPFC_MBOXQ_t *mbox;
17546         int rc;
17547         uint32_t shdr_status, shdr_add_status;
17548         uint32_t mbox_tmo;
17549         union lpfc_sli4_cfg_shdr *shdr;
17550
17551         if (xritag == NO_XRI) {
17552                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17553                                 "0364 Invalid param:\n");
17554                 return -EINVAL;
17555         }
17556
17557         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17558         if (!mbox)
17559                 return -ENOMEM;
17560
17561         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17562                         LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17563                         sizeof(struct lpfc_mbx_post_sgl_pages) -
17564                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17565
17566         post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
17567                                 &mbox->u.mqe.un.post_sgl_pages;
17568         bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
17569         bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
17570
17571         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
17572                                 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
17573         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
17574                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
17575
17576         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
17577                                 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
17578         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
17579                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
17580         if (!phba->sli4_hba.intr_enable)
17581                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17582         else {
17583                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17584                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17585         }
17586         /* The IOCTL status is embedded in the mailbox subheader. */
17587         shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
17588         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17589         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17590         if (!phba->sli4_hba.intr_enable)
17591                 mempool_free(mbox, phba->mbox_mem_pool);
17592         else if (rc != MBX_TIMEOUT)
17593                 mempool_free(mbox, phba->mbox_mem_pool);
17594         if (shdr_status || shdr_add_status || rc) {
17595                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17596                                 "2511 POST_SGL mailbox failed with "
17597                                 "status x%x add_status x%x, mbx status x%x\n",
17598                                 shdr_status, shdr_add_status, rc);
17599         }
17600         return 0;
17601 }
17602
17603 /**
17604  * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
17605  * @phba: pointer to lpfc hba data structure.
17606  *
17607  * This routine is invoked to post rpi header templates to the
17608  * HBA consistent with the SLI-4 interface spec.  This routine
17609  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17610  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17611  *
17612  * Returns
17613  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17614  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
17615  **/
17616 static uint16_t
17617 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
17618 {
17619         unsigned long xri;
17620
17621         /*
17622          * Fetch the next logical xri.  Because this index is logical,
17623          * the driver starts at 0 each time.
17624          */
17625         spin_lock_irq(&phba->hbalock);
17626         xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
17627                                  phba->sli4_hba.max_cfg_param.max_xri);
17628         if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
17629                 spin_unlock_irq(&phba->hbalock);
17630                 return NO_XRI;
17631         } else {
17632                 set_bit(xri, phba->sli4_hba.xri_bmask);
17633                 phba->sli4_hba.max_cfg_param.xri_used++;
17634         }
17635         spin_unlock_irq(&phba->hbalock);
17636         return xri;
17637 }
17638
17639 /**
17640  * __lpfc_sli4_free_xri - Release an xri for reuse.
17641  * @phba: pointer to lpfc hba data structure.
17642  * @xri: xri to release.
17643  *
17644  * This routine is invoked to release an xri to the pool of
17645  * available rpis maintained by the driver.
17646  **/
17647 static void
17648 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17649 {
17650         if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
17651                 phba->sli4_hba.max_cfg_param.xri_used--;
17652         }
17653 }
17654
17655 /**
17656  * lpfc_sli4_free_xri - Release an xri for reuse.
17657  * @phba: pointer to lpfc hba data structure.
17658  * @xri: xri to release.
17659  *
17660  * This routine is invoked to release an xri to the pool of
17661  * available rpis maintained by the driver.
17662  **/
17663 void
17664 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17665 {
17666         spin_lock_irq(&phba->hbalock);
17667         __lpfc_sli4_free_xri(phba, xri);
17668         spin_unlock_irq(&phba->hbalock);
17669 }
17670
17671 /**
17672  * lpfc_sli4_next_xritag - Get an xritag for the io
17673  * @phba: Pointer to HBA context object.
17674  *
17675  * This function gets an xritag for the iocb. If there is no unused xritag
17676  * it will return 0xffff.
17677  * The function returns the allocated xritag if successful, else returns zero.
17678  * Zero is not a valid xritag.
17679  * The caller is not required to hold any lock.
17680  **/
17681 uint16_t
17682 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
17683 {
17684         uint16_t xri_index;
17685
17686         xri_index = lpfc_sli4_alloc_xri(phba);
17687         if (xri_index == NO_XRI)
17688                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17689                                 "2004 Failed to allocate XRI.last XRITAG is %d"
17690                                 " Max XRI is %d, Used XRI is %d\n",
17691                                 xri_index,
17692                                 phba->sli4_hba.max_cfg_param.max_xri,
17693                                 phba->sli4_hba.max_cfg_param.xri_used);
17694         return xri_index;
17695 }
17696
17697 /**
17698  * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
17699  * @phba: pointer to lpfc hba data structure.
17700  * @post_sgl_list: pointer to els sgl entry list.
17701  * @post_cnt: number of els sgl entries on the list.
17702  *
17703  * This routine is invoked to post a block of driver's sgl pages to the
17704  * HBA using non-embedded mailbox command. No Lock is held. This routine
17705  * is only called when the driver is loading and after all IO has been
17706  * stopped.
17707  **/
17708 static int
17709 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
17710                             struct list_head *post_sgl_list,
17711                             int post_cnt)
17712 {
17713         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
17714         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17715         struct sgl_page_pairs *sgl_pg_pairs;
17716         void *viraddr;
17717         LPFC_MBOXQ_t *mbox;
17718         uint32_t reqlen, alloclen, pg_pairs;
17719         uint32_t mbox_tmo;
17720         uint16_t xritag_start = 0;
17721         int rc = 0;
17722         uint32_t shdr_status, shdr_add_status;
17723         union lpfc_sli4_cfg_shdr *shdr;
17724
17725         reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
17726                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17727         if (reqlen > SLI4_PAGE_SIZE) {
17728                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17729                                 "2559 Block sgl registration required DMA "
17730                                 "size (%d) great than a page\n", reqlen);
17731                 return -ENOMEM;
17732         }
17733
17734         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17735         if (!mbox)
17736                 return -ENOMEM;
17737
17738         /* Allocate DMA memory and set up the non-embedded mailbox command */
17739         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17740                          LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
17741                          LPFC_SLI4_MBX_NEMBED);
17742
17743         if (alloclen < reqlen) {
17744                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17745                                 "0285 Allocated DMA memory size (%d) is "
17746                                 "less than the requested DMA memory "
17747                                 "size (%d)\n", alloclen, reqlen);
17748                 lpfc_sli4_mbox_cmd_free(phba, mbox);
17749                 return -ENOMEM;
17750         }
17751         /* Set up the SGL pages in the non-embedded DMA pages */
17752         viraddr = mbox->sge_array->addr[0];
17753         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17754         sgl_pg_pairs = &sgl->sgl_pg_pairs;
17755
17756         pg_pairs = 0;
17757         list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
17758                 /* Set up the sge entry */
17759                 sgl_pg_pairs->sgl_pg0_addr_lo =
17760                                 cpu_to_le32(putPaddrLow(sglq_entry->phys));
17761                 sgl_pg_pairs->sgl_pg0_addr_hi =
17762                                 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
17763                 sgl_pg_pairs->sgl_pg1_addr_lo =
17764                                 cpu_to_le32(putPaddrLow(0));
17765                 sgl_pg_pairs->sgl_pg1_addr_hi =
17766                                 cpu_to_le32(putPaddrHigh(0));
17767
17768                 /* Keep the first xritag on the list */
17769                 if (pg_pairs == 0)
17770                         xritag_start = sglq_entry->sli4_xritag;
17771                 sgl_pg_pairs++;
17772                 pg_pairs++;
17773         }
17774
17775         /* Complete initialization and perform endian conversion. */
17776         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17777         bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
17778         sgl->word0 = cpu_to_le32(sgl->word0);
17779
17780         if (!phba->sli4_hba.intr_enable)
17781                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17782         else {
17783                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17784                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17785         }
17786         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
17787         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17788         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17789         if (!phba->sli4_hba.intr_enable)
17790                 lpfc_sli4_mbox_cmd_free(phba, mbox);
17791         else if (rc != MBX_TIMEOUT)
17792                 lpfc_sli4_mbox_cmd_free(phba, mbox);
17793         if (shdr_status || shdr_add_status || rc) {
17794                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17795                                 "2513 POST_SGL_BLOCK mailbox command failed "
17796                                 "status x%x add_status x%x mbx status x%x\n",
17797                                 shdr_status, shdr_add_status, rc);
17798                 rc = -ENXIO;
17799         }
17800         return rc;
17801 }
17802
17803 /**
17804  * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
17805  * @phba: pointer to lpfc hba data structure.
17806  * @nblist: pointer to nvme buffer list.
17807  * @count: number of scsi buffers on the list.
17808  *
17809  * This routine is invoked to post a block of @count scsi sgl pages from a
17810  * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
17811  * No Lock is held.
17812  *
17813  **/
17814 static int
17815 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
17816                             int count)
17817 {
17818         struct lpfc_io_buf *lpfc_ncmd;
17819         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17820         struct sgl_page_pairs *sgl_pg_pairs;
17821         void *viraddr;
17822         LPFC_MBOXQ_t *mbox;
17823         uint32_t reqlen, alloclen, pg_pairs;
17824         uint32_t mbox_tmo;
17825         uint16_t xritag_start = 0;
17826         int rc = 0;
17827         uint32_t shdr_status, shdr_add_status;
17828         dma_addr_t pdma_phys_bpl1;
17829         union lpfc_sli4_cfg_shdr *shdr;
17830
17831         /* Calculate the requested length of the dma memory */
17832         reqlen = count * sizeof(struct sgl_page_pairs) +
17833                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17834         if (reqlen > SLI4_PAGE_SIZE) {
17835                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
17836                                 "6118 Block sgl registration required DMA "
17837                                 "size (%d) great than a page\n", reqlen);
17838                 return -ENOMEM;
17839         }
17840         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17841         if (!mbox) {
17842                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17843                                 "6119 Failed to allocate mbox cmd memory\n");
17844                 return -ENOMEM;
17845         }
17846
17847         /* Allocate DMA memory and set up the non-embedded mailbox command */
17848         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17849                                     LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17850                                     reqlen, LPFC_SLI4_MBX_NEMBED);
17851
17852         if (alloclen < reqlen) {
17853                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17854                                 "6120 Allocated DMA memory size (%d) is "
17855                                 "less than the requested DMA memory "
17856                                 "size (%d)\n", alloclen, reqlen);
17857                 lpfc_sli4_mbox_cmd_free(phba, mbox);
17858                 return -ENOMEM;
17859         }
17860
17861         /* Get the first SGE entry from the non-embedded DMA memory */
17862         viraddr = mbox->sge_array->addr[0];
17863
17864         /* Set up the SGL pages in the non-embedded DMA pages */
17865         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17866         sgl_pg_pairs = &sgl->sgl_pg_pairs;
17867
17868         pg_pairs = 0;
17869         list_for_each_entry(lpfc_ncmd, nblist, list) {
17870                 /* Set up the sge entry */
17871                 sgl_pg_pairs->sgl_pg0_addr_lo =
17872                         cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
17873                 sgl_pg_pairs->sgl_pg0_addr_hi =
17874                         cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
17875                 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
17876                         pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
17877                                                 SGL_PAGE_SIZE;
17878                 else
17879                         pdma_phys_bpl1 = 0;
17880                 sgl_pg_pairs->sgl_pg1_addr_lo =
17881                         cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
17882                 sgl_pg_pairs->sgl_pg1_addr_hi =
17883                         cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
17884                 /* Keep the first xritag on the list */
17885                 if (pg_pairs == 0)
17886                         xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
17887                 sgl_pg_pairs++;
17888                 pg_pairs++;
17889         }
17890         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17891         bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
17892         /* Perform endian conversion if necessary */
17893         sgl->word0 = cpu_to_le32(sgl->word0);
17894
17895         if (!phba->sli4_hba.intr_enable) {
17896                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17897         } else {
17898                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17899                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17900         }
17901         shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
17902         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17903         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17904         if (!phba->sli4_hba.intr_enable)
17905                 lpfc_sli4_mbox_cmd_free(phba, mbox);
17906         else if (rc != MBX_TIMEOUT)
17907                 lpfc_sli4_mbox_cmd_free(phba, mbox);
17908         if (shdr_status || shdr_add_status || rc) {
17909                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17910                                 "6125 POST_SGL_BLOCK mailbox command failed "
17911                                 "status x%x add_status x%x mbx status x%x\n",
17912                                 shdr_status, shdr_add_status, rc);
17913                 rc = -ENXIO;
17914         }
17915         return rc;
17916 }
17917
17918 /**
17919  * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
17920  * @phba: pointer to lpfc hba data structure.
17921  * @post_nblist: pointer to the nvme buffer list.
17922  * @sb_count: number of nvme buffers.
17923  *
17924  * This routine walks a list of nvme buffers that was passed in. It attempts
17925  * to construct blocks of nvme buffer sgls which contains contiguous xris and
17926  * uses the non-embedded SGL block post mailbox commands to post to the port.
17927  * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
17928  * embedded SGL post mailbox command for posting. The @post_nblist passed in
17929  * must be local list, thus no lock is needed when manipulate the list.
17930  *
17931  * Returns: 0 = failure, non-zero number of successfully posted buffers.
17932  **/
17933 int
17934 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
17935                            struct list_head *post_nblist, int sb_count)
17936 {
17937         struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
17938         int status, sgl_size;
17939         int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
17940         dma_addr_t pdma_phys_sgl1;
17941         int last_xritag = NO_XRI;
17942         int cur_xritag;
17943         LIST_HEAD(prep_nblist);
17944         LIST_HEAD(blck_nblist);
17945         LIST_HEAD(nvme_nblist);
17946
17947         /* sanity check */
17948         if (sb_count <= 0)
17949                 return -EINVAL;
17950
17951         sgl_size = phba->cfg_sg_dma_buf_size;
17952         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
17953                 list_del_init(&lpfc_ncmd->list);
17954                 block_cnt++;
17955                 if ((last_xritag != NO_XRI) &&
17956                     (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
17957                         /* a hole in xri block, form a sgl posting block */
17958                         list_splice_init(&prep_nblist, &blck_nblist);
17959                         post_cnt = block_cnt - 1;
17960                         /* prepare list for next posting block */
17961                         list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17962                         block_cnt = 1;
17963                 } else {
17964                         /* prepare list for next posting block */
17965                         list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17966                         /* enough sgls for non-embed sgl mbox command */
17967                         if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
17968                                 list_splice_init(&prep_nblist, &blck_nblist);
17969                                 post_cnt = block_cnt;
17970                                 block_cnt = 0;
17971                         }
17972                 }
17973                 num_posting++;
17974                 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17975
17976                 /* end of repost sgl list condition for NVME buffers */
17977                 if (num_posting == sb_count) {
17978                         if (post_cnt == 0) {
17979                                 /* last sgl posting block */
17980                                 list_splice_init(&prep_nblist, &blck_nblist);
17981                                 post_cnt = block_cnt;
17982                         } else if (block_cnt == 1) {
17983                                 /* last single sgl with non-contiguous xri */
17984                                 if (sgl_size > SGL_PAGE_SIZE)
17985                                         pdma_phys_sgl1 =
17986                                                 lpfc_ncmd->dma_phys_sgl +
17987                                                 SGL_PAGE_SIZE;
17988                                 else
17989                                         pdma_phys_sgl1 = 0;
17990                                 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17991                                 status = lpfc_sli4_post_sgl(
17992                                                 phba, lpfc_ncmd->dma_phys_sgl,
17993                                                 pdma_phys_sgl1, cur_xritag);
17994                                 if (status) {
17995                                         /* Post error.  Buffer unavailable. */
17996                                         lpfc_ncmd->flags |=
17997                                                 LPFC_SBUF_NOT_POSTED;
17998                                 } else {
17999                                         /* Post success. Bffer available. */
18000                                         lpfc_ncmd->flags &=
18001                                                 ~LPFC_SBUF_NOT_POSTED;
18002                                         lpfc_ncmd->status = IOSTAT_SUCCESS;
18003                                         num_posted++;
18004                                 }
18005                                 /* success, put on NVME buffer sgl list */
18006                                 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18007                         }
18008                 }
18009
18010                 /* continue until a nembed page worth of sgls */
18011                 if (post_cnt == 0)
18012                         continue;
18013
18014                 /* post block of NVME buffer list sgls */
18015                 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
18016                                                      post_cnt);
18017
18018                 /* don't reset xirtag due to hole in xri block */
18019                 if (block_cnt == 0)
18020                         last_xritag = NO_XRI;
18021
18022                 /* reset NVME buffer post count for next round of posting */
18023                 post_cnt = 0;
18024
18025                 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
18026                 while (!list_empty(&blck_nblist)) {
18027                         list_remove_head(&blck_nblist, lpfc_ncmd,
18028                                          struct lpfc_io_buf, list);
18029                         if (status) {
18030                                 /* Post error.  Mark buffer unavailable. */
18031                                 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
18032                         } else {
18033                                 /* Post success, Mark buffer available. */
18034                                 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
18035                                 lpfc_ncmd->status = IOSTAT_SUCCESS;
18036                                 num_posted++;
18037                         }
18038                         list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18039                 }
18040         }
18041         /* Push NVME buffers with sgl posted to the available list */
18042         lpfc_io_buf_replenish(phba, &nvme_nblist);
18043
18044         return num_posted;
18045 }
18046
18047 /**
18048  * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
18049  * @phba: pointer to lpfc_hba struct that the frame was received on
18050  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18051  *
18052  * This function checks the fields in the @fc_hdr to see if the FC frame is a
18053  * valid type of frame that the LPFC driver will handle. This function will
18054  * return a zero if the frame is a valid frame or a non zero value when the
18055  * frame does not pass the check.
18056  **/
18057 static int
18058 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
18059 {
18060         /*  make rctl_names static to save stack space */
18061         struct fc_vft_header *fc_vft_hdr;
18062         uint32_t *header = (uint32_t *) fc_hdr;
18063
18064 #define FC_RCTL_MDS_DIAGS       0xF4
18065
18066         switch (fc_hdr->fh_r_ctl) {
18067         case FC_RCTL_DD_UNCAT:          /* uncategorized information */
18068         case FC_RCTL_DD_SOL_DATA:       /* solicited data */
18069         case FC_RCTL_DD_UNSOL_CTL:      /* unsolicited control */
18070         case FC_RCTL_DD_SOL_CTL:        /* solicited control or reply */
18071         case FC_RCTL_DD_UNSOL_DATA:     /* unsolicited data */
18072         case FC_RCTL_DD_DATA_DESC:      /* data descriptor */
18073         case FC_RCTL_DD_UNSOL_CMD:      /* unsolicited command */
18074         case FC_RCTL_DD_CMD_STATUS:     /* command status */
18075         case FC_RCTL_ELS_REQ:   /* extended link services request */
18076         case FC_RCTL_ELS_REP:   /* extended link services reply */
18077         case FC_RCTL_ELS4_REQ:  /* FC-4 ELS request */
18078         case FC_RCTL_ELS4_REP:  /* FC-4 ELS reply */
18079         case FC_RCTL_BA_NOP:    /* basic link service NOP */
18080         case FC_RCTL_BA_ABTS:   /* basic link service abort */
18081         case FC_RCTL_BA_RMC:    /* remove connection */
18082         case FC_RCTL_BA_ACC:    /* basic accept */
18083         case FC_RCTL_BA_RJT:    /* basic reject */
18084         case FC_RCTL_BA_PRMT:
18085         case FC_RCTL_ACK_1:     /* acknowledge_1 */
18086         case FC_RCTL_ACK_0:     /* acknowledge_0 */
18087         case FC_RCTL_P_RJT:     /* port reject */
18088         case FC_RCTL_F_RJT:     /* fabric reject */
18089         case FC_RCTL_P_BSY:     /* port busy */
18090         case FC_RCTL_F_BSY:     /* fabric busy to data frame */
18091         case FC_RCTL_F_BSYL:    /* fabric busy to link control frame */
18092         case FC_RCTL_LCR:       /* link credit reset */
18093         case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
18094         case FC_RCTL_END:       /* end */
18095                 break;
18096         case FC_RCTL_VFTH:      /* Virtual Fabric tagging Header */
18097                 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18098                 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
18099                 return lpfc_fc_frame_check(phba, fc_hdr);
18100         default:
18101                 goto drop;
18102         }
18103
18104         switch (fc_hdr->fh_type) {
18105         case FC_TYPE_BLS:
18106         case FC_TYPE_ELS:
18107         case FC_TYPE_FCP:
18108         case FC_TYPE_CT:
18109         case FC_TYPE_NVME:
18110                 break;
18111         case FC_TYPE_IP:
18112         case FC_TYPE_ILS:
18113         default:
18114                 goto drop;
18115         }
18116
18117         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
18118                         "2538 Received frame rctl:x%x, type:x%x, "
18119                         "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
18120                         fc_hdr->fh_r_ctl, fc_hdr->fh_type,
18121                         be32_to_cpu(header[0]), be32_to_cpu(header[1]),
18122                         be32_to_cpu(header[2]), be32_to_cpu(header[3]),
18123                         be32_to_cpu(header[4]), be32_to_cpu(header[5]),
18124                         be32_to_cpu(header[6]));
18125         return 0;
18126 drop:
18127         lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
18128                         "2539 Dropped frame rctl:x%x type:x%x\n",
18129                         fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18130         return 1;
18131 }
18132
18133 /**
18134  * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
18135  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18136  *
18137  * This function processes the FC header to retrieve the VFI from the VF
18138  * header, if one exists. This function will return the VFI if one exists
18139  * or 0 if no VSAN Header exists.
18140  **/
18141 static uint32_t
18142 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
18143 {
18144         struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18145
18146         if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
18147                 return 0;
18148         return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
18149 }
18150
18151 /**
18152  * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
18153  * @phba: Pointer to the HBA structure to search for the vport on
18154  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18155  * @fcfi: The FC Fabric ID that the frame came from
18156  * @did: Destination ID to match against
18157  *
18158  * This function searches the @phba for a vport that matches the content of the
18159  * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
18160  * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
18161  * returns the matching vport pointer or NULL if unable to match frame to a
18162  * vport.
18163  **/
18164 static struct lpfc_vport *
18165 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
18166                        uint16_t fcfi, uint32_t did)
18167 {
18168         struct lpfc_vport **vports;
18169         struct lpfc_vport *vport = NULL;
18170         int i;
18171
18172         if (did == Fabric_DID)
18173                 return phba->pport;
18174         if ((phba->pport->fc_flag & FC_PT2PT) &&
18175                 !(phba->link_state == LPFC_HBA_READY))
18176                 return phba->pport;
18177
18178         vports = lpfc_create_vport_work_array(phba);
18179         if (vports != NULL) {
18180                 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
18181                         if (phba->fcf.fcfi == fcfi &&
18182                             vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
18183                             vports[i]->fc_myDID == did) {
18184                                 vport = vports[i];
18185                                 break;
18186                         }
18187                 }
18188         }
18189         lpfc_destroy_vport_work_array(phba, vports);
18190         return vport;
18191 }
18192
18193 /**
18194  * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
18195  * @vport: The vport to work on.
18196  *
18197  * This function updates the receive sequence time stamp for this vport. The
18198  * receive sequence time stamp indicates the time that the last frame of the
18199  * the sequence that has been idle for the longest amount of time was received.
18200  * the driver uses this time stamp to indicate if any received sequences have
18201  * timed out.
18202  **/
18203 static void
18204 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
18205 {
18206         struct lpfc_dmabuf *h_buf;
18207         struct hbq_dmabuf *dmabuf = NULL;
18208
18209         /* get the oldest sequence on the rcv list */
18210         h_buf = list_get_first(&vport->rcv_buffer_list,
18211                                struct lpfc_dmabuf, list);
18212         if (!h_buf)
18213                 return;
18214         dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18215         vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
18216 }
18217
18218 /**
18219  * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
18220  * @vport: The vport that the received sequences were sent to.
18221  *
18222  * This function cleans up all outstanding received sequences. This is called
18223  * by the driver when a link event or user action invalidates all the received
18224  * sequences.
18225  **/
18226 void
18227 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
18228 {
18229         struct lpfc_dmabuf *h_buf, *hnext;
18230         struct lpfc_dmabuf *d_buf, *dnext;
18231         struct hbq_dmabuf *dmabuf = NULL;
18232
18233         /* start with the oldest sequence on the rcv list */
18234         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18235                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18236                 list_del_init(&dmabuf->hbuf.list);
18237                 list_for_each_entry_safe(d_buf, dnext,
18238                                          &dmabuf->dbuf.list, list) {
18239                         list_del_init(&d_buf->list);
18240                         lpfc_in_buf_free(vport->phba, d_buf);
18241                 }
18242                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18243         }
18244 }
18245
18246 /**
18247  * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
18248  * @vport: The vport that the received sequences were sent to.
18249  *
18250  * This function determines whether any received sequences have timed out by
18251  * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
18252  * indicates that there is at least one timed out sequence this routine will
18253  * go through the received sequences one at a time from most inactive to most
18254  * active to determine which ones need to be cleaned up. Once it has determined
18255  * that a sequence needs to be cleaned up it will simply free up the resources
18256  * without sending an abort.
18257  **/
18258 void
18259 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
18260 {
18261         struct lpfc_dmabuf *h_buf, *hnext;
18262         struct lpfc_dmabuf *d_buf, *dnext;
18263         struct hbq_dmabuf *dmabuf = NULL;
18264         unsigned long timeout;
18265         int abort_count = 0;
18266
18267         timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18268                    vport->rcv_buffer_time_stamp);
18269         if (list_empty(&vport->rcv_buffer_list) ||
18270             time_before(jiffies, timeout))
18271                 return;
18272         /* start with the oldest sequence on the rcv list */
18273         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18274                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18275                 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18276                            dmabuf->time_stamp);
18277                 if (time_before(jiffies, timeout))
18278                         break;
18279                 abort_count++;
18280                 list_del_init(&dmabuf->hbuf.list);
18281                 list_for_each_entry_safe(d_buf, dnext,
18282                                          &dmabuf->dbuf.list, list) {
18283                         list_del_init(&d_buf->list);
18284                         lpfc_in_buf_free(vport->phba, d_buf);
18285                 }
18286                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18287         }
18288         if (abort_count)
18289                 lpfc_update_rcv_time_stamp(vport);
18290 }
18291
18292 /**
18293  * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
18294  * @vport: pointer to a vitural port
18295  * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
18296  *
18297  * This function searches through the existing incomplete sequences that have
18298  * been sent to this @vport. If the frame matches one of the incomplete
18299  * sequences then the dbuf in the @dmabuf is added to the list of frames that
18300  * make up that sequence. If no sequence is found that matches this frame then
18301  * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
18302  * This function returns a pointer to the first dmabuf in the sequence list that
18303  * the frame was linked to.
18304  **/
18305 static struct hbq_dmabuf *
18306 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18307 {
18308         struct fc_frame_header *new_hdr;
18309         struct fc_frame_header *temp_hdr;
18310         struct lpfc_dmabuf *d_buf;
18311         struct lpfc_dmabuf *h_buf;
18312         struct hbq_dmabuf *seq_dmabuf = NULL;
18313         struct hbq_dmabuf *temp_dmabuf = NULL;
18314         uint8_t found = 0;
18315
18316         INIT_LIST_HEAD(&dmabuf->dbuf.list);
18317         dmabuf->time_stamp = jiffies;
18318         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18319
18320         /* Use the hdr_buf to find the sequence that this frame belongs to */
18321         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18322                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18323                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18324                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18325                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18326                         continue;
18327                 /* found a pending sequence that matches this frame */
18328                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18329                 break;
18330         }
18331         if (!seq_dmabuf) {
18332                 /*
18333                  * This indicates first frame received for this sequence.
18334                  * Queue the buffer on the vport's rcv_buffer_list.
18335                  */
18336                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18337                 lpfc_update_rcv_time_stamp(vport);
18338                 return dmabuf;
18339         }
18340         temp_hdr = seq_dmabuf->hbuf.virt;
18341         if (be16_to_cpu(new_hdr->fh_seq_cnt) <
18342                 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18343                 list_del_init(&seq_dmabuf->hbuf.list);
18344                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18345                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18346                 lpfc_update_rcv_time_stamp(vport);
18347                 return dmabuf;
18348         }
18349         /* move this sequence to the tail to indicate a young sequence */
18350         list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
18351         seq_dmabuf->time_stamp = jiffies;
18352         lpfc_update_rcv_time_stamp(vport);
18353         if (list_empty(&seq_dmabuf->dbuf.list)) {
18354                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18355                 return seq_dmabuf;
18356         }
18357         /* find the correct place in the sequence to insert this frame */
18358         d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
18359         while (!found) {
18360                 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18361                 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
18362                 /*
18363                  * If the frame's sequence count is greater than the frame on
18364                  * the list then insert the frame right after this frame
18365                  */
18366                 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
18367                         be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18368                         list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
18369                         found = 1;
18370                         break;
18371                 }
18372
18373                 if (&d_buf->list == &seq_dmabuf->dbuf.list)
18374                         break;
18375                 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
18376         }
18377
18378         if (found)
18379                 return seq_dmabuf;
18380         return NULL;
18381 }
18382
18383 /**
18384  * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
18385  * @vport: pointer to a vitural port
18386  * @dmabuf: pointer to a dmabuf that describes the FC sequence
18387  *
18388  * This function tries to abort from the partially assembed sequence, described
18389  * by the information from basic abbort @dmabuf. It checks to see whether such
18390  * partially assembled sequence held by the driver. If so, it shall free up all
18391  * the frames from the partially assembled sequence.
18392  *
18393  * Return
18394  * true  -- if there is matching partially assembled sequence present and all
18395  *          the frames freed with the sequence;
18396  * false -- if there is no matching partially assembled sequence present so
18397  *          nothing got aborted in the lower layer driver
18398  **/
18399 static bool
18400 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
18401                             struct hbq_dmabuf *dmabuf)
18402 {
18403         struct fc_frame_header *new_hdr;
18404         struct fc_frame_header *temp_hdr;
18405         struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
18406         struct hbq_dmabuf *seq_dmabuf = NULL;
18407
18408         /* Use the hdr_buf to find the sequence that matches this frame */
18409         INIT_LIST_HEAD(&dmabuf->dbuf.list);
18410         INIT_LIST_HEAD(&dmabuf->hbuf.list);
18411         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18412         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18413                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18414                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18415                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18416                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18417                         continue;
18418                 /* found a pending sequence that matches this frame */
18419                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18420                 break;
18421         }
18422
18423         /* Free up all the frames from the partially assembled sequence */
18424         if (seq_dmabuf) {
18425                 list_for_each_entry_safe(d_buf, n_buf,
18426                                          &seq_dmabuf->dbuf.list, list) {
18427                         list_del_init(&d_buf->list);
18428                         lpfc_in_buf_free(vport->phba, d_buf);
18429                 }
18430                 return true;
18431         }
18432         return false;
18433 }
18434
18435 /**
18436  * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
18437  * @vport: pointer to a vitural port
18438  * @dmabuf: pointer to a dmabuf that describes the FC sequence
18439  *
18440  * This function tries to abort from the assembed sequence from upper level
18441  * protocol, described by the information from basic abbort @dmabuf. It
18442  * checks to see whether such pending context exists at upper level protocol.
18443  * If so, it shall clean up the pending context.
18444  *
18445  * Return
18446  * true  -- if there is matching pending context of the sequence cleaned
18447  *          at ulp;
18448  * false -- if there is no matching pending context of the sequence present
18449  *          at ulp.
18450  **/
18451 static bool
18452 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18453 {
18454         struct lpfc_hba *phba = vport->phba;
18455         int handled;
18456
18457         /* Accepting abort at ulp with SLI4 only */
18458         if (phba->sli_rev < LPFC_SLI_REV4)
18459                 return false;
18460
18461         /* Register all caring upper level protocols to attend abort */
18462         handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18463         if (handled)
18464                 return true;
18465
18466         return false;
18467 }
18468
18469 /**
18470  * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
18471  * @phba: Pointer to HBA context object.
18472  * @cmd_iocbq: pointer to the command iocbq structure.
18473  * @rsp_iocbq: pointer to the response iocbq structure.
18474  *
18475  * This function handles the sequence abort response iocb command complete
18476  * event. It properly releases the memory allocated to the sequence abort
18477  * accept iocb.
18478  **/
18479 static void
18480 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
18481                              struct lpfc_iocbq *cmd_iocbq,
18482                              struct lpfc_iocbq *rsp_iocbq)
18483 {
18484         struct lpfc_nodelist *ndlp;
18485
18486         if (cmd_iocbq) {
18487                 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
18488                 lpfc_nlp_put(ndlp);
18489                 lpfc_sli_release_iocbq(phba, cmd_iocbq);
18490         }
18491
18492         /* Failure means BLS ABORT RSP did not get delivered to remote node*/
18493         if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
18494                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18495                         "3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
18496                         get_job_ulpstatus(phba, rsp_iocbq),
18497                         get_job_word4(phba, rsp_iocbq));
18498 }
18499
18500 /**
18501  * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
18502  * @phba: Pointer to HBA context object.
18503  * @xri: xri id in transaction.
18504  *
18505  * This function validates the xri maps to the known range of XRIs allocated an
18506  * used by the driver.
18507  **/
18508 uint16_t
18509 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18510                       uint16_t xri)
18511 {
18512         uint16_t i;
18513
18514         for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
18515                 if (xri == phba->sli4_hba.xri_ids[i])
18516                         return i;
18517         }
18518         return NO_XRI;
18519 }
18520
18521 /**
18522  * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
18523  * @vport: pointer to a virtual port.
18524  * @fc_hdr: pointer to a FC frame header.
18525  * @aborted: was the partially assembled receive sequence successfully aborted
18526  *
18527  * This function sends a basic response to a previous unsol sequence abort
18528  * event after aborting the sequence handling.
18529  **/
18530 void
18531 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
18532                         struct fc_frame_header *fc_hdr, bool aborted)
18533 {
18534         struct lpfc_hba *phba = vport->phba;
18535         struct lpfc_iocbq *ctiocb = NULL;
18536         struct lpfc_nodelist *ndlp;
18537         uint16_t oxid, rxid, xri, lxri;
18538         uint32_t sid, fctl;
18539         union lpfc_wqe128 *icmd;
18540         int rc;
18541
18542         if (!lpfc_is_link_up(phba))
18543                 return;
18544
18545         sid = sli4_sid_from_fc_hdr(fc_hdr);
18546         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
18547         rxid = be16_to_cpu(fc_hdr->fh_rx_id);
18548
18549         ndlp = lpfc_findnode_did(vport, sid);
18550         if (!ndlp) {
18551                 ndlp = lpfc_nlp_init(vport, sid);
18552                 if (!ndlp) {
18553                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
18554                                          "1268 Failed to allocate ndlp for "
18555                                          "oxid:x%x SID:x%x\n", oxid, sid);
18556                         return;
18557                 }
18558                 /* Put ndlp onto pport node list */
18559                 lpfc_enqueue_node(vport, ndlp);
18560         }
18561
18562         /* Allocate buffer for rsp iocb */
18563         ctiocb = lpfc_sli_get_iocbq(phba);
18564         if (!ctiocb)
18565                 return;
18566
18567         icmd = &ctiocb->wqe;
18568
18569         /* Extract the F_CTL field from FC_HDR */
18570         fctl = sli4_fctl_from_fc_hdr(fc_hdr);
18571
18572         ctiocb->context1 = lpfc_nlp_get(ndlp);
18573         if (!ctiocb->context1) {
18574                 lpfc_sli_release_iocbq(phba, ctiocb);
18575                 return;
18576         }
18577
18578         ctiocb->vport = phba->pport;
18579         ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
18580         ctiocb->sli4_lxritag = NO_XRI;
18581         ctiocb->sli4_xritag = NO_XRI;
18582         ctiocb->abort_rctl = FC_RCTL_BA_ACC;
18583
18584         if (fctl & FC_FC_EX_CTX)
18585                 /* Exchange responder sent the abort so we
18586                  * own the oxid.
18587                  */
18588                 xri = oxid;
18589         else
18590                 xri = rxid;
18591         lxri = lpfc_sli4_xri_inrange(phba, xri);
18592         if (lxri != NO_XRI)
18593                 lpfc_set_rrq_active(phba, ndlp, lxri,
18594                         (xri == oxid) ? rxid : oxid, 0);
18595         /* For BA_ABTS from exchange responder, if the logical xri with
18596          * the oxid maps to the FCP XRI range, the port no longer has
18597          * that exchange context, send a BLS_RJT. Override the IOCB for
18598          * a BA_RJT.
18599          */
18600         if ((fctl & FC_FC_EX_CTX) &&
18601             (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
18602                 ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18603                 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18604                 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18605                        FC_BA_RJT_INV_XID);
18606                 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18607                        FC_BA_RJT_UNABLE);
18608         }
18609
18610         /* If BA_ABTS failed to abort a partially assembled receive sequence,
18611          * the driver no longer has that exchange, send a BLS_RJT. Override
18612          * the IOCB for a BA_RJT.
18613          */
18614         if (aborted == false) {
18615                 ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18616                 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18617                 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18618                        FC_BA_RJT_INV_XID);
18619                 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18620                        FC_BA_RJT_UNABLE);
18621         }
18622
18623         if (fctl & FC_FC_EX_CTX) {
18624                 /* ABTS sent by responder to CT exchange, construction
18625                  * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
18626                  * field and RX_ID from ABTS for RX_ID field.
18627                  */
18628                 ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
18629                 bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid);
18630         } else {
18631                 /* ABTS sent by initiator to CT exchange, construction
18632                  * of BA_ACC will need to allocate a new XRI as for the
18633                  * XRI_TAG field.
18634                  */
18635                 ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
18636         }
18637
18638         /* OX_ID is invariable to who sent ABTS to CT exchange */
18639         bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid);
18640         bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid);
18641
18642         /* Use CT=VPI */
18643         bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest,
18644                ndlp->nlp_DID);
18645         bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp,
18646                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
18647         bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
18648
18649
18650         /* Xmit CT abts response on exchange <xid> */
18651         lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
18652                          "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
18653                          ctiocb->abort_rctl, oxid, phba->link_state);
18654
18655         lpfc_sli_prep_wqe(phba, ctiocb);
18656         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
18657         if (rc == IOCB_ERROR) {
18658                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18659                                  "2925 Failed to issue CT ABTS RSP x%x on "
18660                                  "xri x%x, Data x%x\n",
18661                                  ctiocb->abort_rctl, oxid,
18662                                  phba->link_state);
18663                 lpfc_nlp_put(ndlp);
18664                 ctiocb->context1 = NULL;
18665                 lpfc_sli_release_iocbq(phba, ctiocb);
18666         }
18667 }
18668
18669 /**
18670  * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
18671  * @vport: Pointer to the vport on which this sequence was received
18672  * @dmabuf: pointer to a dmabuf that describes the FC sequence
18673  *
18674  * This function handles an SLI-4 unsolicited abort event. If the unsolicited
18675  * receive sequence is only partially assembed by the driver, it shall abort
18676  * the partially assembled frames for the sequence. Otherwise, if the
18677  * unsolicited receive sequence has been completely assembled and passed to
18678  * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
18679  * unsolicited sequence has been aborted. After that, it will issue a basic
18680  * accept to accept the abort.
18681  **/
18682 static void
18683 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
18684                              struct hbq_dmabuf *dmabuf)
18685 {
18686         struct lpfc_hba *phba = vport->phba;
18687         struct fc_frame_header fc_hdr;
18688         uint32_t fctl;
18689         bool aborted;
18690
18691         /* Make a copy of fc_hdr before the dmabuf being released */
18692         memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
18693         fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
18694
18695         if (fctl & FC_FC_EX_CTX) {
18696                 /* ABTS by responder to exchange, no cleanup needed */
18697                 aborted = true;
18698         } else {
18699                 /* ABTS by initiator to exchange, need to do cleanup */
18700                 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
18701                 if (aborted == false)
18702                         aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
18703         }
18704         lpfc_in_buf_free(phba, &dmabuf->dbuf);
18705
18706         if (phba->nvmet_support) {
18707                 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
18708                 return;
18709         }
18710
18711         /* Respond with BA_ACC or BA_RJT accordingly */
18712         lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
18713 }
18714
18715 /**
18716  * lpfc_seq_complete - Indicates if a sequence is complete
18717  * @dmabuf: pointer to a dmabuf that describes the FC sequence
18718  *
18719  * This function checks the sequence, starting with the frame described by
18720  * @dmabuf, to see if all the frames associated with this sequence are present.
18721  * the frames associated with this sequence are linked to the @dmabuf using the
18722  * dbuf list. This function looks for two major things. 1) That the first frame
18723  * has a sequence count of zero. 2) There is a frame with last frame of sequence
18724  * set. 3) That there are no holes in the sequence count. The function will
18725  * return 1 when the sequence is complete, otherwise it will return 0.
18726  **/
18727 static int
18728 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
18729 {
18730         struct fc_frame_header *hdr;
18731         struct lpfc_dmabuf *d_buf;
18732         struct hbq_dmabuf *seq_dmabuf;
18733         uint32_t fctl;
18734         int seq_count = 0;
18735
18736         hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18737         /* make sure first fame of sequence has a sequence count of zero */
18738         if (hdr->fh_seq_cnt != seq_count)
18739                 return 0;
18740         fctl = (hdr->fh_f_ctl[0] << 16 |
18741                 hdr->fh_f_ctl[1] << 8 |
18742                 hdr->fh_f_ctl[2]);
18743         /* If last frame of sequence we can return success. */
18744         if (fctl & FC_FC_END_SEQ)
18745                 return 1;
18746         list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
18747                 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18748                 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18749                 /* If there is a hole in the sequence count then fail. */
18750                 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
18751                         return 0;
18752                 fctl = (hdr->fh_f_ctl[0] << 16 |
18753                         hdr->fh_f_ctl[1] << 8 |
18754                         hdr->fh_f_ctl[2]);
18755                 /* If last frame of sequence we can return success. */
18756                 if (fctl & FC_FC_END_SEQ)
18757                         return 1;
18758         }
18759         return 0;
18760 }
18761
18762 /**
18763  * lpfc_prep_seq - Prep sequence for ULP processing
18764  * @vport: Pointer to the vport on which this sequence was received
18765  * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
18766  *
18767  * This function takes a sequence, described by a list of frames, and creates
18768  * a list of iocbq structures to describe the sequence. This iocbq list will be
18769  * used to issue to the generic unsolicited sequence handler. This routine
18770  * returns a pointer to the first iocbq in the list. If the function is unable
18771  * to allocate an iocbq then it throw out the received frames that were not
18772  * able to be described and return a pointer to the first iocbq. If unable to
18773  * allocate any iocbqs (including the first) this function will return NULL.
18774  **/
18775 static struct lpfc_iocbq *
18776 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
18777 {
18778         struct hbq_dmabuf *hbq_buf;
18779         struct lpfc_dmabuf *d_buf, *n_buf;
18780         struct lpfc_iocbq *first_iocbq, *iocbq;
18781         struct fc_frame_header *fc_hdr;
18782         uint32_t sid;
18783         uint32_t len, tot_len;
18784
18785         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18786         /* remove from receive buffer list */
18787         list_del_init(&seq_dmabuf->hbuf.list);
18788         lpfc_update_rcv_time_stamp(vport);
18789         /* get the Remote Port's SID */
18790         sid = sli4_sid_from_fc_hdr(fc_hdr);
18791         tot_len = 0;
18792         /* Get an iocbq struct to fill in. */
18793         first_iocbq = lpfc_sli_get_iocbq(vport->phba);
18794         if (first_iocbq) {
18795                 /* Initialize the first IOCB. */
18796                 first_iocbq->wcqe_cmpl.total_data_placed = 0;
18797                 bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl,
18798                        IOSTAT_SUCCESS);
18799                 first_iocbq->vport = vport;
18800
18801                 /* Check FC Header to see what TYPE of frame we are rcv'ing */
18802                 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
18803                         bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp,
18804                                sli4_did_from_fc_hdr(fc_hdr));
18805                 }
18806
18807                 bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
18808                        NO_XRI);
18809                 bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
18810                        be16_to_cpu(fc_hdr->fh_ox_id));
18811
18812                 /* put the first buffer into the first iocb */
18813                 tot_len = bf_get(lpfc_rcqe_length,
18814                                  &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
18815
18816                 first_iocbq->context2 = &seq_dmabuf->dbuf;
18817                 first_iocbq->context3 = NULL;
18818                 /* Keep track of the BDE count */
18819                 first_iocbq->wcqe_cmpl.word3 = 1;
18820
18821                 if (tot_len > LPFC_DATA_BUF_SIZE)
18822                         first_iocbq->wqe.gen_req.bde.tus.f.bdeSize =
18823                                 LPFC_DATA_BUF_SIZE;
18824                 else
18825                         first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len;
18826
18827                 first_iocbq->wcqe_cmpl.total_data_placed = tot_len;
18828                 bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest,
18829                        sid);
18830         }
18831         iocbq = first_iocbq;
18832         /*
18833          * Each IOCBq can have two Buffers assigned, so go through the list
18834          * of buffers for this sequence and save two buffers in each IOCBq
18835          */
18836         list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
18837                 if (!iocbq) {
18838                         lpfc_in_buf_free(vport->phba, d_buf);
18839                         continue;
18840                 }
18841                 if (!iocbq->context3) {
18842                         iocbq->context3 = d_buf;
18843                         iocbq->wcqe_cmpl.word3++;
18844                         /* We need to get the size out of the right CQE */
18845                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18846                         len = bf_get(lpfc_rcqe_length,
18847                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
18848                         iocbq->unsol_rcv_len = len;
18849                         iocbq->wcqe_cmpl.total_data_placed += len;
18850                         tot_len += len;
18851                 } else {
18852                         iocbq = lpfc_sli_get_iocbq(vport->phba);
18853                         if (!iocbq) {
18854                                 if (first_iocbq) {
18855                                         bf_set(lpfc_wcqe_c_status,
18856                                                &first_iocbq->wcqe_cmpl,
18857                                                IOSTAT_SUCCESS);
18858                                         first_iocbq->wcqe_cmpl.parameter =
18859                                                 IOERR_NO_RESOURCES;
18860                                 }
18861                                 lpfc_in_buf_free(vport->phba, d_buf);
18862                                 continue;
18863                         }
18864                         /* We need to get the size out of the right CQE */
18865                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18866                         len = bf_get(lpfc_rcqe_length,
18867                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
18868                         iocbq->context2 = d_buf;
18869                         iocbq->context3 = NULL;
18870                         iocbq->wcqe_cmpl.word3 = 1;
18871
18872                         if (len > LPFC_DATA_BUF_SIZE)
18873                                 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
18874                                         LPFC_DATA_BUF_SIZE;
18875                         else
18876                                 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
18877                                         len;
18878
18879                         tot_len += len;
18880                         iocbq->wcqe_cmpl.total_data_placed = tot_len;
18881                         bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest,
18882                                sid);
18883                         list_add_tail(&iocbq->list, &first_iocbq->list);
18884                 }
18885         }
18886         /* Free the sequence's header buffer */
18887         if (!first_iocbq)
18888                 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
18889
18890         return first_iocbq;
18891 }
18892
18893 static void
18894 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
18895                           struct hbq_dmabuf *seq_dmabuf)
18896 {
18897         struct fc_frame_header *fc_hdr;
18898         struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
18899         struct lpfc_hba *phba = vport->phba;
18900
18901         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18902         iocbq = lpfc_prep_seq(vport, seq_dmabuf);
18903         if (!iocbq) {
18904                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18905                                 "2707 Ring %d handler: Failed to allocate "
18906                                 "iocb Rctl x%x Type x%x received\n",
18907                                 LPFC_ELS_RING,
18908                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18909                 return;
18910         }
18911         if (!lpfc_complete_unsol_iocb(phba,
18912                                       phba->sli4_hba.els_wq->pring,
18913                                       iocbq, fc_hdr->fh_r_ctl,
18914                                       fc_hdr->fh_type))
18915                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18916                                 "2540 Ring %d handler: unexpected Rctl "
18917                                 "x%x Type x%x received\n",
18918                                 LPFC_ELS_RING,
18919                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18920
18921         /* Free iocb created in lpfc_prep_seq */
18922         list_for_each_entry_safe(curr_iocb, next_iocb,
18923                 &iocbq->list, list) {
18924                 list_del_init(&curr_iocb->list);
18925                 lpfc_sli_release_iocbq(phba, curr_iocb);
18926         }
18927         lpfc_sli_release_iocbq(phba, iocbq);
18928 }
18929
18930 static void
18931 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
18932                             struct lpfc_iocbq *rspiocb)
18933 {
18934         struct lpfc_dmabuf *pcmd = cmdiocb->context2;
18935
18936         if (pcmd && pcmd->virt)
18937                 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18938         kfree(pcmd);
18939         lpfc_sli_release_iocbq(phba, cmdiocb);
18940         lpfc_drain_txq(phba);
18941 }
18942
18943 static void
18944 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
18945                               struct hbq_dmabuf *dmabuf)
18946 {
18947         struct fc_frame_header *fc_hdr;
18948         struct lpfc_hba *phba = vport->phba;
18949         struct lpfc_iocbq *iocbq = NULL;
18950         union  lpfc_wqe128 *pwqe;
18951         struct lpfc_dmabuf *pcmd = NULL;
18952         uint32_t frame_len;
18953         int rc;
18954         unsigned long iflags;
18955
18956         fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18957         frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
18958
18959         /* Send the received frame back */
18960         iocbq = lpfc_sli_get_iocbq(phba);
18961         if (!iocbq) {
18962                 /* Queue cq event and wakeup worker thread to process it */
18963                 spin_lock_irqsave(&phba->hbalock, iflags);
18964                 list_add_tail(&dmabuf->cq_event.list,
18965                               &phba->sli4_hba.sp_queue_event);
18966                 phba->hba_flag |= HBA_SP_QUEUE_EVT;
18967                 spin_unlock_irqrestore(&phba->hbalock, iflags);
18968                 lpfc_worker_wake_up(phba);
18969                 return;
18970         }
18971
18972         /* Allocate buffer for command payload */
18973         pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
18974         if (pcmd)
18975                 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
18976                                             &pcmd->phys);
18977         if (!pcmd || !pcmd->virt)
18978                 goto exit;
18979
18980         INIT_LIST_HEAD(&pcmd->list);
18981
18982         /* copyin the payload */
18983         memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
18984
18985         iocbq->context2 = pcmd;
18986         iocbq->vport = vport;
18987         iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
18988         iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
18989         iocbq->num_bdes = 0;
18990
18991         pwqe = &iocbq->wqe;
18992         /* fill in BDE's for command */
18993         pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys);
18994         pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys);
18995         pwqe->gen_req.bde.tus.f.bdeSize = frame_len;
18996         pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
18997
18998         pwqe->send_frame.frame_len = frame_len;
18999         pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr));
19000         pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1));
19001         pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2));
19002         pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3));
19003         pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4));
19004         pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5));
19005
19006         pwqe->generic.wqe_com.word7 = 0;
19007         pwqe->generic.wqe_com.word10 = 0;
19008
19009         bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME);
19010         bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */
19011         bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */
19012         bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1);
19013         bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1);
19014         bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1);
19015         bf_set(wqe_xc, &pwqe->generic.wqe_com, 1);
19016         bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA);
19017         bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
19018         bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag);
19019         bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag);
19020         bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3);
19021         pwqe->generic.wqe_com.abort_tag = iocbq->iotag;
19022
19023         iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
19024
19025         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
19026         if (rc == IOCB_ERROR)
19027                 goto exit;
19028
19029         lpfc_in_buf_free(phba, &dmabuf->dbuf);
19030         return;
19031
19032 exit:
19033         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
19034                         "2023 Unable to process MDS loopback frame\n");
19035         if (pcmd && pcmd->virt)
19036                 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19037         kfree(pcmd);
19038         if (iocbq)
19039                 lpfc_sli_release_iocbq(phba, iocbq);
19040         lpfc_in_buf_free(phba, &dmabuf->dbuf);
19041 }
19042
19043 /**
19044  * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
19045  * @phba: Pointer to HBA context object.
19046  * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
19047  *
19048  * This function is called with no lock held. This function processes all
19049  * the received buffers and gives it to upper layers when a received buffer
19050  * indicates that it is the final frame in the sequence. The interrupt
19051  * service routine processes received buffers at interrupt contexts.
19052  * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
19053  * appropriate receive function when the final frame in a sequence is received.
19054  **/
19055 void
19056 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
19057                                  struct hbq_dmabuf *dmabuf)
19058 {
19059         struct hbq_dmabuf *seq_dmabuf;
19060         struct fc_frame_header *fc_hdr;
19061         struct lpfc_vport *vport;
19062         uint32_t fcfi;
19063         uint32_t did;
19064
19065         /* Process each received buffer */
19066         fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19067
19068         if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
19069             fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
19070                 vport = phba->pport;
19071                 /* Handle MDS Loopback frames */
19072                 if  (!(phba->pport->load_flag & FC_UNLOADING))
19073                         lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19074                 else
19075                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
19076                 return;
19077         }
19078
19079         /* check to see if this a valid type of frame */
19080         if (lpfc_fc_frame_check(phba, fc_hdr)) {
19081                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19082                 return;
19083         }
19084
19085         if ((bf_get(lpfc_cqe_code,
19086                     &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
19087                 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
19088                               &dmabuf->cq_event.cqe.rcqe_cmpl);
19089         else
19090                 fcfi = bf_get(lpfc_rcqe_fcf_id,
19091                               &dmabuf->cq_event.cqe.rcqe_cmpl);
19092
19093         if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
19094                 vport = phba->pport;
19095                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
19096                                 "2023 MDS Loopback %d bytes\n",
19097                                 bf_get(lpfc_rcqe_length,
19098                                        &dmabuf->cq_event.cqe.rcqe_cmpl));
19099                 /* Handle MDS Loopback frames */
19100                 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19101                 return;
19102         }
19103
19104         /* d_id this frame is directed to */
19105         did = sli4_did_from_fc_hdr(fc_hdr);
19106
19107         vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
19108         if (!vport) {
19109                 /* throw out the frame */
19110                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19111                 return;
19112         }
19113
19114         /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
19115         if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
19116                 (did != Fabric_DID)) {
19117                 /*
19118                  * Throw out the frame if we are not pt2pt.
19119                  * The pt2pt protocol allows for discovery frames
19120                  * to be received without a registered VPI.
19121                  */
19122                 if (!(vport->fc_flag & FC_PT2PT) ||
19123                         (phba->link_state == LPFC_HBA_READY)) {
19124                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
19125                         return;
19126                 }
19127         }
19128
19129         /* Handle the basic abort sequence (BA_ABTS) event */
19130         if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
19131                 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
19132                 return;
19133         }
19134
19135         /* Link this frame */
19136         seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
19137         if (!seq_dmabuf) {
19138                 /* unable to add frame to vport - throw it out */
19139                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19140                 return;
19141         }
19142         /* If not last frame in sequence continue processing frames. */
19143         if (!lpfc_seq_complete(seq_dmabuf))
19144                 return;
19145
19146         /* Send the complete sequence to the upper layer protocol */
19147         lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
19148 }
19149
19150 /**
19151  * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
19152  * @phba: pointer to lpfc hba data structure.
19153  *
19154  * This routine is invoked to post rpi header templates to the
19155  * HBA consistent with the SLI-4 interface spec.  This routine
19156  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19157  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19158  *
19159  * This routine does not require any locks.  It's usage is expected
19160  * to be driver load or reset recovery when the driver is
19161  * sequential.
19162  *
19163  * Return codes
19164  *      0 - successful
19165  *      -EIO - The mailbox failed to complete successfully.
19166  *      When this error occurs, the driver is not guaranteed
19167  *      to have any rpi regions posted to the device and
19168  *      must either attempt to repost the regions or take a
19169  *      fatal error.
19170  **/
19171 int
19172 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
19173 {
19174         struct lpfc_rpi_hdr *rpi_page;
19175         uint32_t rc = 0;
19176         uint16_t lrpi = 0;
19177
19178         /* SLI4 ports that support extents do not require RPI headers. */
19179         if (!phba->sli4_hba.rpi_hdrs_in_use)
19180                 goto exit;
19181         if (phba->sli4_hba.extents_in_use)
19182                 return -EIO;
19183
19184         list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
19185                 /*
19186                  * Assign the rpi headers a physical rpi only if the driver
19187                  * has not initialized those resources.  A port reset only
19188                  * needs the headers posted.
19189                  */
19190                 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
19191                     LPFC_RPI_RSRC_RDY)
19192                         rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19193
19194                 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
19195                 if (rc != MBX_SUCCESS) {
19196                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19197                                         "2008 Error %d posting all rpi "
19198                                         "headers\n", rc);
19199                         rc = -EIO;
19200                         break;
19201                 }
19202         }
19203
19204  exit:
19205         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
19206                LPFC_RPI_RSRC_RDY);
19207         return rc;
19208 }
19209
19210 /**
19211  * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
19212  * @phba: pointer to lpfc hba data structure.
19213  * @rpi_page:  pointer to the rpi memory region.
19214  *
19215  * This routine is invoked to post a single rpi header to the
19216  * HBA consistent with the SLI-4 interface spec.  This memory region
19217  * maps up to 64 rpi context regions.
19218  *
19219  * Return codes
19220  *      0 - successful
19221  *      -ENOMEM - No available memory
19222  *      -EIO - The mailbox failed to complete successfully.
19223  **/
19224 int
19225 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
19226 {
19227         LPFC_MBOXQ_t *mboxq;
19228         struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
19229         uint32_t rc = 0;
19230         uint32_t shdr_status, shdr_add_status;
19231         union lpfc_sli4_cfg_shdr *shdr;
19232
19233         /* SLI4 ports that support extents do not require RPI headers. */
19234         if (!phba->sli4_hba.rpi_hdrs_in_use)
19235                 return rc;
19236         if (phba->sli4_hba.extents_in_use)
19237                 return -EIO;
19238
19239         /* The port is notified of the header region via a mailbox command. */
19240         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19241         if (!mboxq) {
19242                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19243                                 "2001 Unable to allocate memory for issuing "
19244                                 "SLI_CONFIG_SPECIAL mailbox command\n");
19245                 return -ENOMEM;
19246         }
19247
19248         /* Post all rpi memory regions to the port. */
19249         hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
19250         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19251                          LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
19252                          sizeof(struct lpfc_mbx_post_hdr_tmpl) -
19253                          sizeof(struct lpfc_sli4_cfg_mhdr),
19254                          LPFC_SLI4_MBX_EMBED);
19255
19256
19257         /* Post the physical rpi to the port for this rpi header. */
19258         bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
19259                rpi_page->start_rpi);
19260         bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
19261                hdr_tmpl, rpi_page->page_count);
19262
19263         hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
19264         hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
19265         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19266         shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
19267         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19268         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19269         mempool_free(mboxq, phba->mbox_mem_pool);
19270         if (shdr_status || shdr_add_status || rc) {
19271                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19272                                 "2514 POST_RPI_HDR mailbox failed with "
19273                                 "status x%x add_status x%x, mbx status x%x\n",
19274                                 shdr_status, shdr_add_status, rc);
19275                 rc = -ENXIO;
19276         } else {
19277                 /*
19278                  * The next_rpi stores the next logical module-64 rpi value used
19279                  * to post physical rpis in subsequent rpi postings.
19280                  */
19281                 spin_lock_irq(&phba->hbalock);
19282                 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
19283                 spin_unlock_irq(&phba->hbalock);
19284         }
19285         return rc;
19286 }
19287
19288 /**
19289  * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
19290  * @phba: pointer to lpfc hba data structure.
19291  *
19292  * This routine is invoked to post rpi header templates to the
19293  * HBA consistent with the SLI-4 interface spec.  This routine
19294  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19295  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19296  *
19297  * Returns
19298  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
19299  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
19300  **/
19301 int
19302 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
19303 {
19304         unsigned long rpi;
19305         uint16_t max_rpi, rpi_limit;
19306         uint16_t rpi_remaining, lrpi = 0;
19307         struct lpfc_rpi_hdr *rpi_hdr;
19308         unsigned long iflag;
19309
19310         /*
19311          * Fetch the next logical rpi.  Because this index is logical,
19312          * the  driver starts at 0 each time.
19313          */
19314         spin_lock_irqsave(&phba->hbalock, iflag);
19315         max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
19316         rpi_limit = phba->sli4_hba.next_rpi;
19317
19318         rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
19319         if (rpi >= rpi_limit)
19320                 rpi = LPFC_RPI_ALLOC_ERROR;
19321         else {
19322                 set_bit(rpi, phba->sli4_hba.rpi_bmask);
19323                 phba->sli4_hba.max_cfg_param.rpi_used++;
19324                 phba->sli4_hba.rpi_count++;
19325         }
19326         lpfc_printf_log(phba, KERN_INFO,
19327                         LOG_NODE | LOG_DISCOVERY,
19328                         "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
19329                         (int) rpi, max_rpi, rpi_limit);
19330
19331         /*
19332          * Don't try to allocate more rpi header regions if the device limit
19333          * has been exhausted.
19334          */
19335         if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
19336             (phba->sli4_hba.rpi_count >= max_rpi)) {
19337                 spin_unlock_irqrestore(&phba->hbalock, iflag);
19338                 return rpi;
19339         }
19340
19341         /*
19342          * RPI header postings are not required for SLI4 ports capable of
19343          * extents.
19344          */
19345         if (!phba->sli4_hba.rpi_hdrs_in_use) {
19346                 spin_unlock_irqrestore(&phba->hbalock, iflag);
19347                 return rpi;
19348         }
19349
19350         /*
19351          * If the driver is running low on rpi resources, allocate another
19352          * page now.  Note that the next_rpi value is used because
19353          * it represents how many are actually in use whereas max_rpi notes
19354          * how many are supported max by the device.
19355          */
19356         rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
19357         spin_unlock_irqrestore(&phba->hbalock, iflag);
19358         if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
19359                 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
19360                 if (!rpi_hdr) {
19361                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19362                                         "2002 Error Could not grow rpi "
19363                                         "count\n");
19364                 } else {
19365                         lrpi = rpi_hdr->start_rpi;
19366                         rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19367                         lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
19368                 }
19369         }
19370
19371         return rpi;
19372 }
19373
19374 /**
19375  * __lpfc_sli4_free_rpi - Release an rpi for reuse.
19376  * @phba: pointer to lpfc hba data structure.
19377  * @rpi: rpi to free
19378  *
19379  * This routine is invoked to release an rpi to the pool of
19380  * available rpis maintained by the driver.
19381  **/
19382 static void
19383 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19384 {
19385         /*
19386          * if the rpi value indicates a prior unreg has already
19387          * been done, skip the unreg.
19388          */
19389         if (rpi == LPFC_RPI_ALLOC_ERROR)
19390                 return;
19391
19392         if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
19393                 phba->sli4_hba.rpi_count--;
19394                 phba->sli4_hba.max_cfg_param.rpi_used--;
19395         } else {
19396                 lpfc_printf_log(phba, KERN_INFO,
19397                                 LOG_NODE | LOG_DISCOVERY,
19398                                 "2016 rpi %x not inuse\n",
19399                                 rpi);
19400         }
19401 }
19402
19403 /**
19404  * lpfc_sli4_free_rpi - Release an rpi for reuse.
19405  * @phba: pointer to lpfc hba data structure.
19406  * @rpi: rpi to free
19407  *
19408  * This routine is invoked to release an rpi to the pool of
19409  * available rpis maintained by the driver.
19410  **/
19411 void
19412 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19413 {
19414         spin_lock_irq(&phba->hbalock);
19415         __lpfc_sli4_free_rpi(phba, rpi);
19416         spin_unlock_irq(&phba->hbalock);
19417 }
19418
19419 /**
19420  * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
19421  * @phba: pointer to lpfc hba data structure.
19422  *
19423  * This routine is invoked to remove the memory region that
19424  * provided rpi via a bitmask.
19425  **/
19426 void
19427 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
19428 {
19429         kfree(phba->sli4_hba.rpi_bmask);
19430         kfree(phba->sli4_hba.rpi_ids);
19431         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
19432 }
19433
19434 /**
19435  * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
19436  * @ndlp: pointer to lpfc nodelist data structure.
19437  * @cmpl: completion call-back.
19438  * @arg: data to load as MBox 'caller buffer information'
19439  *
19440  * This routine is invoked to remove the memory region that
19441  * provided rpi via a bitmask.
19442  **/
19443 int
19444 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19445         void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
19446 {
19447         LPFC_MBOXQ_t *mboxq;
19448         struct lpfc_hba *phba = ndlp->phba;
19449         int rc;
19450
19451         /* The port is notified of the header region via a mailbox command. */
19452         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19453         if (!mboxq)
19454                 return -ENOMEM;
19455
19456         /* If cmpl assigned, then this nlp_get pairs with
19457          * lpfc_mbx_cmpl_resume_rpi.
19458          *
19459          * Else cmpl is NULL, then this nlp_get pairs with
19460          * lpfc_sli_def_mbox_cmpl.
19461          */
19462         if (!lpfc_nlp_get(ndlp)) {
19463                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19464                                 "2122 %s: Failed to get nlp ref\n",
19465                                 __func__);
19466                 mempool_free(mboxq, phba->mbox_mem_pool);
19467                 return -EIO;
19468         }
19469
19470         /* Post all rpi memory regions to the port. */
19471         lpfc_resume_rpi(mboxq, ndlp);
19472         if (cmpl) {
19473                 mboxq->mbox_cmpl = cmpl;
19474                 mboxq->ctx_buf = arg;
19475         } else
19476                 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19477         mboxq->ctx_ndlp = ndlp;
19478         mboxq->vport = ndlp->vport;
19479         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19480         if (rc == MBX_NOT_FINISHED) {
19481                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19482                                 "2010 Resume RPI Mailbox failed "
19483                                 "status %d, mbxStatus x%x\n", rc,
19484                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19485                 lpfc_nlp_put(ndlp);
19486                 mempool_free(mboxq, phba->mbox_mem_pool);
19487                 return -EIO;
19488         }
19489         return 0;
19490 }
19491
19492 /**
19493  * lpfc_sli4_init_vpi - Initialize a vpi with the port
19494  * @vport: Pointer to the vport for which the vpi is being initialized
19495  *
19496  * This routine is invoked to activate a vpi with the port.
19497  *
19498  * Returns:
19499  *    0 success
19500  *    -Evalue otherwise
19501  **/
19502 int
19503 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
19504 {
19505         LPFC_MBOXQ_t *mboxq;
19506         int rc = 0;
19507         int retval = MBX_SUCCESS;
19508         uint32_t mbox_tmo;
19509         struct lpfc_hba *phba = vport->phba;
19510         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19511         if (!mboxq)
19512                 return -ENOMEM;
19513         lpfc_init_vpi(phba, mboxq, vport->vpi);
19514         mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
19515         rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
19516         if (rc != MBX_SUCCESS) {
19517                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19518                                 "2022 INIT VPI Mailbox failed "
19519                                 "status %d, mbxStatus x%x\n", rc,
19520                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19521                 retval = -EIO;
19522         }
19523         if (rc != MBX_TIMEOUT)
19524                 mempool_free(mboxq, vport->phba->mbox_mem_pool);
19525
19526         return retval;
19527 }
19528
19529 /**
19530  * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
19531  * @phba: pointer to lpfc hba data structure.
19532  * @mboxq: Pointer to mailbox object.
19533  *
19534  * This routine is invoked to manually add a single FCF record. The caller
19535  * must pass a completely initialized FCF_Record.  This routine takes
19536  * care of the nonembedded mailbox operations.
19537  **/
19538 static void
19539 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
19540 {
19541         void *virt_addr;
19542         union lpfc_sli4_cfg_shdr *shdr;
19543         uint32_t shdr_status, shdr_add_status;
19544
19545         virt_addr = mboxq->sge_array->addr[0];
19546         /* The IOCTL status is embedded in the mailbox subheader. */
19547         shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
19548         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19549         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19550
19551         if ((shdr_status || shdr_add_status) &&
19552                 (shdr_status != STATUS_FCF_IN_USE))
19553                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19554                         "2558 ADD_FCF_RECORD mailbox failed with "
19555                         "status x%x add_status x%x\n",
19556                         shdr_status, shdr_add_status);
19557
19558         lpfc_sli4_mbox_cmd_free(phba, mboxq);
19559 }
19560
19561 /**
19562  * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
19563  * @phba: pointer to lpfc hba data structure.
19564  * @fcf_record:  pointer to the initialized fcf record to add.
19565  *
19566  * This routine is invoked to manually add a single FCF record. The caller
19567  * must pass a completely initialized FCF_Record.  This routine takes
19568  * care of the nonembedded mailbox operations.
19569  **/
19570 int
19571 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
19572 {
19573         int rc = 0;
19574         LPFC_MBOXQ_t *mboxq;
19575         uint8_t *bytep;
19576         void *virt_addr;
19577         struct lpfc_mbx_sge sge;
19578         uint32_t alloc_len, req_len;
19579         uint32_t fcfindex;
19580
19581         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19582         if (!mboxq) {
19583                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19584                         "2009 Failed to allocate mbox for ADD_FCF cmd\n");
19585                 return -ENOMEM;
19586         }
19587
19588         req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
19589                   sizeof(uint32_t);
19590
19591         /* Allocate DMA memory and set up the non-embedded mailbox command */
19592         alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19593                                      LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
19594                                      req_len, LPFC_SLI4_MBX_NEMBED);
19595         if (alloc_len < req_len) {
19596                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19597                         "2523 Allocated DMA memory size (x%x) is "
19598                         "less than the requested DMA memory "
19599                         "size (x%x)\n", alloc_len, req_len);
19600                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19601                 return -ENOMEM;
19602         }
19603
19604         /*
19605          * Get the first SGE entry from the non-embedded DMA memory.  This
19606          * routine only uses a single SGE.
19607          */
19608         lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
19609         virt_addr = mboxq->sge_array->addr[0];
19610         /*
19611          * Configure the FCF record for FCFI 0.  This is the driver's
19612          * hardcoded default and gets used in nonFIP mode.
19613          */
19614         fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
19615         bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
19616         lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
19617
19618         /*
19619          * Copy the fcf_index and the FCF Record Data. The data starts after
19620          * the FCoE header plus word10. The data copy needs to be endian
19621          * correct.
19622          */
19623         bytep += sizeof(uint32_t);
19624         lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
19625         mboxq->vport = phba->pport;
19626         mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
19627         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19628         if (rc == MBX_NOT_FINISHED) {
19629                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19630                         "2515 ADD_FCF_RECORD mailbox failed with "
19631                         "status 0x%x\n", rc);
19632                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19633                 rc = -EIO;
19634         } else
19635                 rc = 0;
19636
19637         return rc;
19638 }
19639
19640 /**
19641  * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
19642  * @phba: pointer to lpfc hba data structure.
19643  * @fcf_record:  pointer to the fcf record to write the default data.
19644  * @fcf_index: FCF table entry index.
19645  *
19646  * This routine is invoked to build the driver's default FCF record.  The
19647  * values used are hardcoded.  This routine handles memory initialization.
19648  *
19649  **/
19650 void
19651 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
19652                                 struct fcf_record *fcf_record,
19653                                 uint16_t fcf_index)
19654 {
19655         memset(fcf_record, 0, sizeof(struct fcf_record));
19656         fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
19657         fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
19658         fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
19659         bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
19660         bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
19661         bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
19662         bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
19663         bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
19664         bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
19665         bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
19666         bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
19667         bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
19668         bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
19669         bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
19670         bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
19671         bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
19672                 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
19673         /* Set the VLAN bit map */
19674         if (phba->valid_vlan) {
19675                 fcf_record->vlan_bitmap[phba->vlan_id / 8]
19676                         = 1 << (phba->vlan_id % 8);
19677         }
19678 }
19679
19680 /**
19681  * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
19682  * @phba: pointer to lpfc hba data structure.
19683  * @fcf_index: FCF table entry offset.
19684  *
19685  * This routine is invoked to scan the entire FCF table by reading FCF
19686  * record and processing it one at a time starting from the @fcf_index
19687  * for initial FCF discovery or fast FCF failover rediscovery.
19688  *
19689  * Return 0 if the mailbox command is submitted successfully, none 0
19690  * otherwise.
19691  **/
19692 int
19693 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19694 {
19695         int rc = 0, error;
19696         LPFC_MBOXQ_t *mboxq;
19697
19698         phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
19699         phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
19700         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19701         if (!mboxq) {
19702                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19703                                 "2000 Failed to allocate mbox for "
19704                                 "READ_FCF cmd\n");
19705                 error = -ENOMEM;
19706                 goto fail_fcf_scan;
19707         }
19708         /* Construct the read FCF record mailbox command */
19709         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19710         if (rc) {
19711                 error = -EINVAL;
19712                 goto fail_fcf_scan;
19713         }
19714         /* Issue the mailbox command asynchronously */
19715         mboxq->vport = phba->pport;
19716         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
19717
19718         spin_lock_irq(&phba->hbalock);
19719         phba->hba_flag |= FCF_TS_INPROG;
19720         spin_unlock_irq(&phba->hbalock);
19721
19722         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19723         if (rc == MBX_NOT_FINISHED)
19724                 error = -EIO;
19725         else {
19726                 /* Reset eligible FCF count for new scan */
19727                 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
19728                         phba->fcf.eligible_fcf_cnt = 0;
19729                 error = 0;
19730         }
19731 fail_fcf_scan:
19732         if (error) {
19733                 if (mboxq)
19734                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
19735                 /* FCF scan failed, clear FCF_TS_INPROG flag */
19736                 spin_lock_irq(&phba->hbalock);
19737                 phba->hba_flag &= ~FCF_TS_INPROG;
19738                 spin_unlock_irq(&phba->hbalock);
19739         }
19740         return error;
19741 }
19742
19743 /**
19744  * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
19745  * @phba: pointer to lpfc hba data structure.
19746  * @fcf_index: FCF table entry offset.
19747  *
19748  * This routine is invoked to read an FCF record indicated by @fcf_index
19749  * and to use it for FLOGI roundrobin FCF failover.
19750  *
19751  * Return 0 if the mailbox command is submitted successfully, none 0
19752  * otherwise.
19753  **/
19754 int
19755 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19756 {
19757         int rc = 0, error;
19758         LPFC_MBOXQ_t *mboxq;
19759
19760         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19761         if (!mboxq) {
19762                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19763                                 "2763 Failed to allocate mbox for "
19764                                 "READ_FCF cmd\n");
19765                 error = -ENOMEM;
19766                 goto fail_fcf_read;
19767         }
19768         /* Construct the read FCF record mailbox command */
19769         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19770         if (rc) {
19771                 error = -EINVAL;
19772                 goto fail_fcf_read;
19773         }
19774         /* Issue the mailbox command asynchronously */
19775         mboxq->vport = phba->pport;
19776         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
19777         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19778         if (rc == MBX_NOT_FINISHED)
19779                 error = -EIO;
19780         else
19781                 error = 0;
19782
19783 fail_fcf_read:
19784         if (error && mboxq)
19785                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19786         return error;
19787 }
19788
19789 /**
19790  * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
19791  * @phba: pointer to lpfc hba data structure.
19792  * @fcf_index: FCF table entry offset.
19793  *
19794  * This routine is invoked to read an FCF record indicated by @fcf_index to
19795  * determine whether it's eligible for FLOGI roundrobin failover list.
19796  *
19797  * Return 0 if the mailbox command is submitted successfully, none 0
19798  * otherwise.
19799  **/
19800 int
19801 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19802 {
19803         int rc = 0, error;
19804         LPFC_MBOXQ_t *mboxq;
19805
19806         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19807         if (!mboxq) {
19808                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19809                                 "2758 Failed to allocate mbox for "
19810                                 "READ_FCF cmd\n");
19811                                 error = -ENOMEM;
19812                                 goto fail_fcf_read;
19813         }
19814         /* Construct the read FCF record mailbox command */
19815         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19816         if (rc) {
19817                 error = -EINVAL;
19818                 goto fail_fcf_read;
19819         }
19820         /* Issue the mailbox command asynchronously */
19821         mboxq->vport = phba->pport;
19822         mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
19823         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19824         if (rc == MBX_NOT_FINISHED)
19825                 error = -EIO;
19826         else
19827                 error = 0;
19828
19829 fail_fcf_read:
19830         if (error && mboxq)
19831                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19832         return error;
19833 }
19834
19835 /**
19836  * lpfc_check_next_fcf_pri_level
19837  * @phba: pointer to the lpfc_hba struct for this port.
19838  * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
19839  * routine when the rr_bmask is empty. The FCF indecies are put into the
19840  * rr_bmask based on their priority level. Starting from the highest priority
19841  * to the lowest. The most likely FCF candidate will be in the highest
19842  * priority group. When this routine is called it searches the fcf_pri list for
19843  * next lowest priority group and repopulates the rr_bmask with only those
19844  * fcf_indexes.
19845  * returns:
19846  * 1=success 0=failure
19847  **/
19848 static int
19849 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
19850 {
19851         uint16_t next_fcf_pri;
19852         uint16_t last_index;
19853         struct lpfc_fcf_pri *fcf_pri;
19854         int rc;
19855         int ret = 0;
19856
19857         last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
19858                         LPFC_SLI4_FCF_TBL_INDX_MAX);
19859         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19860                         "3060 Last IDX %d\n", last_index);
19861
19862         /* Verify the priority list has 2 or more entries */
19863         spin_lock_irq(&phba->hbalock);
19864         if (list_empty(&phba->fcf.fcf_pri_list) ||
19865             list_is_singular(&phba->fcf.fcf_pri_list)) {
19866                 spin_unlock_irq(&phba->hbalock);
19867                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19868                         "3061 Last IDX %d\n", last_index);
19869                 return 0; /* Empty rr list */
19870         }
19871         spin_unlock_irq(&phba->hbalock);
19872
19873         next_fcf_pri = 0;
19874         /*
19875          * Clear the rr_bmask and set all of the bits that are at this
19876          * priority.
19877          */
19878         memset(phba->fcf.fcf_rr_bmask, 0,
19879                         sizeof(*phba->fcf.fcf_rr_bmask));
19880         spin_lock_irq(&phba->hbalock);
19881         list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19882                 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
19883                         continue;
19884                 /*
19885                  * the 1st priority that has not FLOGI failed
19886                  * will be the highest.
19887                  */
19888                 if (!next_fcf_pri)
19889                         next_fcf_pri = fcf_pri->fcf_rec.priority;
19890                 spin_unlock_irq(&phba->hbalock);
19891                 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19892                         rc = lpfc_sli4_fcf_rr_index_set(phba,
19893                                                 fcf_pri->fcf_rec.fcf_index);
19894                         if (rc)
19895                                 return 0;
19896                 }
19897                 spin_lock_irq(&phba->hbalock);
19898         }
19899         /*
19900          * if next_fcf_pri was not set above and the list is not empty then
19901          * we have failed flogis on all of them. So reset flogi failed
19902          * and start at the beginning.
19903          */
19904         if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
19905                 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19906                         fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
19907                         /*
19908                          * the 1st priority that has not FLOGI failed
19909                          * will be the highest.
19910                          */
19911                         if (!next_fcf_pri)
19912                                 next_fcf_pri = fcf_pri->fcf_rec.priority;
19913                         spin_unlock_irq(&phba->hbalock);
19914                         if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19915                                 rc = lpfc_sli4_fcf_rr_index_set(phba,
19916                                                 fcf_pri->fcf_rec.fcf_index);
19917                                 if (rc)
19918                                         return 0;
19919                         }
19920                         spin_lock_irq(&phba->hbalock);
19921                 }
19922         } else
19923                 ret = 1;
19924         spin_unlock_irq(&phba->hbalock);
19925
19926         return ret;
19927 }
19928 /**
19929  * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
19930  * @phba: pointer to lpfc hba data structure.
19931  *
19932  * This routine is to get the next eligible FCF record index in a round
19933  * robin fashion. If the next eligible FCF record index equals to the
19934  * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
19935  * shall be returned, otherwise, the next eligible FCF record's index
19936  * shall be returned.
19937  **/
19938 uint16_t
19939 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
19940 {
19941         uint16_t next_fcf_index;
19942
19943 initial_priority:
19944         /* Search start from next bit of currently registered FCF index */
19945         next_fcf_index = phba->fcf.current_rec.fcf_indx;
19946
19947 next_priority:
19948         /* Determine the next fcf index to check */
19949         next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
19950         next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19951                                        LPFC_SLI4_FCF_TBL_INDX_MAX,
19952                                        next_fcf_index);
19953
19954         /* Wrap around condition on phba->fcf.fcf_rr_bmask */
19955         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19956                 /*
19957                  * If we have wrapped then we need to clear the bits that
19958                  * have been tested so that we can detect when we should
19959                  * change the priority level.
19960                  */
19961                 next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
19962                                                LPFC_SLI4_FCF_TBL_INDX_MAX);
19963         }
19964
19965
19966         /* Check roundrobin failover list empty condition */
19967         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
19968                 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
19969                 /*
19970                  * If next fcf index is not found check if there are lower
19971                  * Priority level fcf's in the fcf_priority list.
19972                  * Set up the rr_bmask with all of the avaiable fcf bits
19973                  * at that level and continue the selection process.
19974                  */
19975                 if (lpfc_check_next_fcf_pri_level(phba))
19976                         goto initial_priority;
19977                 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
19978                                 "2844 No roundrobin failover FCF available\n");
19979
19980                 return LPFC_FCOE_FCF_NEXT_NONE;
19981         }
19982
19983         if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
19984                 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
19985                 LPFC_FCF_FLOGI_FAILED) {
19986                 if (list_is_singular(&phba->fcf.fcf_pri_list))
19987                         return LPFC_FCOE_FCF_NEXT_NONE;
19988
19989                 goto next_priority;
19990         }
19991
19992         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19993                         "2845 Get next roundrobin failover FCF (x%x)\n",
19994                         next_fcf_index);
19995
19996         return next_fcf_index;
19997 }
19998
19999 /**
20000  * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
20001  * @phba: pointer to lpfc hba data structure.
20002  * @fcf_index: index into the FCF table to 'set'
20003  *
20004  * This routine sets the FCF record index in to the eligible bmask for
20005  * roundrobin failover search. It checks to make sure that the index
20006  * does not go beyond the range of the driver allocated bmask dimension
20007  * before setting the bit.
20008  *
20009  * Returns 0 if the index bit successfully set, otherwise, it returns
20010  * -EINVAL.
20011  **/
20012 int
20013 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
20014 {
20015         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20016                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20017                                 "2610 FCF (x%x) reached driver's book "
20018                                 "keeping dimension:x%x\n",
20019                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20020                 return -EINVAL;
20021         }
20022         /* Set the eligible FCF record index bmask */
20023         set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20024
20025         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20026                         "2790 Set FCF (x%x) to roundrobin FCF failover "
20027                         "bmask\n", fcf_index);
20028
20029         return 0;
20030 }
20031
20032 /**
20033  * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
20034  * @phba: pointer to lpfc hba data structure.
20035  * @fcf_index: index into the FCF table to 'clear'
20036  *
20037  * This routine clears the FCF record index from the eligible bmask for
20038  * roundrobin failover search. It checks to make sure that the index
20039  * does not go beyond the range of the driver allocated bmask dimension
20040  * before clearing the bit.
20041  **/
20042 void
20043 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
20044 {
20045         struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
20046         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20047                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20048                                 "2762 FCF (x%x) reached driver's book "
20049                                 "keeping dimension:x%x\n",
20050                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20051                 return;
20052         }
20053         /* Clear the eligible FCF record index bmask */
20054         spin_lock_irq(&phba->hbalock);
20055         list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
20056                                  list) {
20057                 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
20058                         list_del_init(&fcf_pri->list);
20059                         break;
20060                 }
20061         }
20062         spin_unlock_irq(&phba->hbalock);
20063         clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20064
20065         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20066                         "2791 Clear FCF (x%x) from roundrobin failover "
20067                         "bmask\n", fcf_index);
20068 }
20069
20070 /**
20071  * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
20072  * @phba: pointer to lpfc hba data structure.
20073  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
20074  *
20075  * This routine is the completion routine for the rediscover FCF table mailbox
20076  * command. If the mailbox command returned failure, it will try to stop the
20077  * FCF rediscover wait timer.
20078  **/
20079 static void
20080 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
20081 {
20082         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20083         uint32_t shdr_status, shdr_add_status;
20084
20085         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20086
20087         shdr_status = bf_get(lpfc_mbox_hdr_status,
20088                              &redisc_fcf->header.cfg_shdr.response);
20089         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20090                              &redisc_fcf->header.cfg_shdr.response);
20091         if (shdr_status || shdr_add_status) {
20092                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20093                                 "2746 Requesting for FCF rediscovery failed "
20094                                 "status x%x add_status x%x\n",
20095                                 shdr_status, shdr_add_status);
20096                 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
20097                         spin_lock_irq(&phba->hbalock);
20098                         phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
20099                         spin_unlock_irq(&phba->hbalock);
20100                         /*
20101                          * CVL event triggered FCF rediscover request failed,
20102                          * last resort to re-try current registered FCF entry.
20103                          */
20104                         lpfc_retry_pport_discovery(phba);
20105                 } else {
20106                         spin_lock_irq(&phba->hbalock);
20107                         phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
20108                         spin_unlock_irq(&phba->hbalock);
20109                         /*
20110                          * DEAD FCF event triggered FCF rediscover request
20111                          * failed, last resort to fail over as a link down
20112                          * to FCF registration.
20113                          */
20114                         lpfc_sli4_fcf_dead_failthrough(phba);
20115                 }
20116         } else {
20117                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20118                                 "2775 Start FCF rediscover quiescent timer\n");
20119                 /*
20120                  * Start FCF rediscovery wait timer for pending FCF
20121                  * before rescan FCF record table.
20122                  */
20123                 lpfc_fcf_redisc_wait_start_timer(phba);
20124         }
20125
20126         mempool_free(mbox, phba->mbox_mem_pool);
20127 }
20128
20129 /**
20130  * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
20131  * @phba: pointer to lpfc hba data structure.
20132  *
20133  * This routine is invoked to request for rediscovery of the entire FCF table
20134  * by the port.
20135  **/
20136 int
20137 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
20138 {
20139         LPFC_MBOXQ_t *mbox;
20140         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20141         int rc, length;
20142
20143         /* Cancel retry delay timers to all vports before FCF rediscover */
20144         lpfc_cancel_all_vport_retry_delay_timer(phba);
20145
20146         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20147         if (!mbox) {
20148                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20149                                 "2745 Failed to allocate mbox for "
20150                                 "requesting FCF rediscover.\n");
20151                 return -ENOMEM;
20152         }
20153
20154         length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
20155                   sizeof(struct lpfc_sli4_cfg_mhdr));
20156         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
20157                          LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
20158                          length, LPFC_SLI4_MBX_EMBED);
20159
20160         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20161         /* Set count to 0 for invalidating the entire FCF database */
20162         bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
20163
20164         /* Issue the mailbox command asynchronously */
20165         mbox->vport = phba->pport;
20166         mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
20167         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
20168
20169         if (rc == MBX_NOT_FINISHED) {
20170                 mempool_free(mbox, phba->mbox_mem_pool);
20171                 return -EIO;
20172         }
20173         return 0;
20174 }
20175
20176 /**
20177  * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
20178  * @phba: pointer to lpfc hba data structure.
20179  *
20180  * This function is the failover routine as a last resort to the FCF DEAD
20181  * event when driver failed to perform fast FCF failover.
20182  **/
20183 void
20184 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
20185 {
20186         uint32_t link_state;
20187
20188         /*
20189          * Last resort as FCF DEAD event failover will treat this as
20190          * a link down, but save the link state because we don't want
20191          * it to be changed to Link Down unless it is already down.
20192          */
20193         link_state = phba->link_state;
20194         lpfc_linkdown(phba);
20195         phba->link_state = link_state;
20196
20197         /* Unregister FCF if no devices connected to it */
20198         lpfc_unregister_unused_fcf(phba);
20199 }
20200
20201 /**
20202  * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
20203  * @phba: pointer to lpfc hba data structure.
20204  * @rgn23_data: pointer to configure region 23 data.
20205  *
20206  * This function gets SLI3 port configure region 23 data through memory dump
20207  * mailbox command. When it successfully retrieves data, the size of the data
20208  * will be returned, otherwise, 0 will be returned.
20209  **/
20210 static uint32_t
20211 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20212 {
20213         LPFC_MBOXQ_t *pmb = NULL;
20214         MAILBOX_t *mb;
20215         uint32_t offset = 0;
20216         int rc;
20217
20218         if (!rgn23_data)
20219                 return 0;
20220
20221         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20222         if (!pmb) {
20223                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20224                                 "2600 failed to allocate mailbox memory\n");
20225                 return 0;
20226         }
20227         mb = &pmb->u.mb;
20228
20229         do {
20230                 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
20231                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
20232
20233                 if (rc != MBX_SUCCESS) {
20234                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20235                                         "2601 failed to read config "
20236                                         "region 23, rc 0x%x Status 0x%x\n",
20237                                         rc, mb->mbxStatus);
20238                         mb->un.varDmp.word_cnt = 0;
20239                 }
20240                 /*
20241                  * dump mem may return a zero when finished or we got a
20242                  * mailbox error, either way we are done.
20243                  */
20244                 if (mb->un.varDmp.word_cnt == 0)
20245                         break;
20246
20247                 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
20248                         mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
20249
20250                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
20251                                        rgn23_data + offset,
20252                                        mb->un.varDmp.word_cnt);
20253                 offset += mb->un.varDmp.word_cnt;
20254         } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
20255
20256         mempool_free(pmb, phba->mbox_mem_pool);
20257         return offset;
20258 }
20259
20260 /**
20261  * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
20262  * @phba: pointer to lpfc hba data structure.
20263  * @rgn23_data: pointer to configure region 23 data.
20264  *
20265  * This function gets SLI4 port configure region 23 data through memory dump
20266  * mailbox command. When it successfully retrieves data, the size of the data
20267  * will be returned, otherwise, 0 will be returned.
20268  **/
20269 static uint32_t
20270 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20271 {
20272         LPFC_MBOXQ_t *mboxq = NULL;
20273         struct lpfc_dmabuf *mp = NULL;
20274         struct lpfc_mqe *mqe;
20275         uint32_t data_length = 0;
20276         int rc;
20277
20278         if (!rgn23_data)
20279                 return 0;
20280
20281         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20282         if (!mboxq) {
20283                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20284                                 "3105 failed to allocate mailbox memory\n");
20285                 return 0;
20286         }
20287
20288         if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
20289                 goto out;
20290         mqe = &mboxq->u.mqe;
20291         mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
20292         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
20293         if (rc)
20294                 goto out;
20295         data_length = mqe->un.mb_words[5];
20296         if (data_length == 0)
20297                 goto out;
20298         if (data_length > DMP_RGN23_SIZE) {
20299                 data_length = 0;
20300                 goto out;
20301         }
20302         lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
20303 out:
20304         mempool_free(mboxq, phba->mbox_mem_pool);
20305         if (mp) {
20306                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
20307                 kfree(mp);
20308         }
20309         return data_length;
20310 }
20311
20312 /**
20313  * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
20314  * @phba: pointer to lpfc hba data structure.
20315  *
20316  * This function read region 23 and parse TLV for port status to
20317  * decide if the user disaled the port. If the TLV indicates the
20318  * port is disabled, the hba_flag is set accordingly.
20319  **/
20320 void
20321 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
20322 {
20323         uint8_t *rgn23_data = NULL;
20324         uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
20325         uint32_t offset = 0;
20326
20327         /* Get adapter Region 23 data */
20328         rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
20329         if (!rgn23_data)
20330                 goto out;
20331
20332         if (phba->sli_rev < LPFC_SLI_REV4)
20333                 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
20334         else {
20335                 if_type = bf_get(lpfc_sli_intf_if_type,
20336                                  &phba->sli4_hba.sli_intf);
20337                 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
20338                         goto out;
20339                 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
20340         }
20341
20342         if (!data_size)
20343                 goto out;
20344
20345         /* Check the region signature first */
20346         if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
20347                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20348                         "2619 Config region 23 has bad signature\n");
20349                         goto out;
20350         }
20351         offset += 4;
20352
20353         /* Check the data structure version */
20354         if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
20355                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20356                         "2620 Config region 23 has bad version\n");
20357                 goto out;
20358         }
20359         offset += 4;
20360
20361         /* Parse TLV entries in the region */
20362         while (offset < data_size) {
20363                 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
20364                         break;
20365                 /*
20366                  * If the TLV is not driver specific TLV or driver id is
20367                  * not linux driver id, skip the record.
20368                  */
20369                 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
20370                     (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
20371                     (rgn23_data[offset + 3] != 0)) {
20372                         offset += rgn23_data[offset + 1] * 4 + 4;
20373                         continue;
20374                 }
20375
20376                 /* Driver found a driver specific TLV in the config region */
20377                 sub_tlv_len = rgn23_data[offset + 1] * 4;
20378                 offset += 4;
20379                 tlv_offset = 0;
20380
20381                 /*
20382                  * Search for configured port state sub-TLV.
20383                  */
20384                 while ((offset < data_size) &&
20385                         (tlv_offset < sub_tlv_len)) {
20386                         if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
20387                                 offset += 4;
20388                                 tlv_offset += 4;
20389                                 break;
20390                         }
20391                         if (rgn23_data[offset] != PORT_STE_TYPE) {
20392                                 offset += rgn23_data[offset + 1] * 4 + 4;
20393                                 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
20394                                 continue;
20395                         }
20396
20397                         /* This HBA contains PORT_STE configured */
20398                         if (!rgn23_data[offset + 2])
20399                                 phba->hba_flag |= LINK_DISABLED;
20400
20401                         goto out;
20402                 }
20403         }
20404
20405 out:
20406         kfree(rgn23_data);
20407         return;
20408 }
20409
20410 /**
20411  * lpfc_log_fw_write_cmpl - logs firmware write completion status
20412  * @phba: pointer to lpfc hba data structure
20413  * @shdr_status: wr_object rsp's status field
20414  * @shdr_add_status: wr_object rsp's add_status field
20415  * @shdr_add_status_2: wr_object rsp's add_status_2 field
20416  * @shdr_change_status: wr_object rsp's change_status field
20417  * @shdr_csf: wr_object rsp's csf bit
20418  *
20419  * This routine is intended to be called after a firmware write completes.
20420  * It will log next action items to be performed by the user to instantiate
20421  * the newly downloaded firmware or reason for incompatibility.
20422  **/
20423 static void
20424 lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
20425                        u32 shdr_add_status, u32 shdr_add_status_2,
20426                        u32 shdr_change_status, u32 shdr_csf)
20427 {
20428         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20429                         "4198 %s: flash_id x%02x, asic_rev x%02x, "
20430                         "status x%02x, add_status x%02x, add_status_2 x%02x, "
20431                         "change_status x%02x, csf %01x\n", __func__,
20432                         phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
20433                         shdr_status, shdr_add_status, shdr_add_status_2,
20434                         shdr_change_status, shdr_csf);
20435
20436         if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
20437                 switch (shdr_add_status_2) {
20438                 case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
20439                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20440                                         "4199 Firmware write failed: "
20441                                         "image incompatible with flash x%02x\n",
20442                                         phba->sli4_hba.flash_id);
20443                         break;
20444                 case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
20445                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20446                                         "4200 Firmware write failed: "
20447                                         "image incompatible with ASIC "
20448                                         "architecture x%02x\n",
20449                                         phba->sli4_hba.asic_rev);
20450                         break;
20451                 default:
20452                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20453                                         "4210 Firmware write failed: "
20454                                         "add_status_2 x%02x\n",
20455                                         shdr_add_status_2);
20456                         break;
20457                 }
20458         } else if (!shdr_status && !shdr_add_status) {
20459                 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20460                     shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20461                         if (shdr_csf)
20462                                 shdr_change_status =
20463                                                    LPFC_CHANGE_STATUS_PCI_RESET;
20464                 }
20465
20466                 switch (shdr_change_status) {
20467                 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20468                         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20469                                         "3198 Firmware write complete: System "
20470                                         "reboot required to instantiate\n");
20471                         break;
20472                 case (LPFC_CHANGE_STATUS_FW_RESET):
20473                         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20474                                         "3199 Firmware write complete: "
20475                                         "Firmware reset required to "
20476                                         "instantiate\n");
20477                         break;
20478                 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20479                         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20480                                         "3200 Firmware write complete: Port "
20481                                         "Migration or PCI Reset required to "
20482                                         "instantiate\n");
20483                         break;
20484                 case (LPFC_CHANGE_STATUS_PCI_RESET):
20485                         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20486                                         "3201 Firmware write complete: PCI "
20487                                         "Reset required to instantiate\n");
20488                         break;
20489                 default:
20490                         break;
20491                 }
20492         }
20493 }
20494
20495 /**
20496  * lpfc_wr_object - write an object to the firmware
20497  * @phba: HBA structure that indicates port to create a queue on.
20498  * @dmabuf_list: list of dmabufs to write to the port.
20499  * @size: the total byte value of the objects to write to the port.
20500  * @offset: the current offset to be used to start the transfer.
20501  *
20502  * This routine will create a wr_object mailbox command to send to the port.
20503  * the mailbox command will be constructed using the dma buffers described in
20504  * @dmabuf_list to create a list of BDEs. This routine will fill in as many
20505  * BDEs that the imbedded mailbox can support. The @offset variable will be
20506  * used to indicate the starting offset of the transfer and will also return
20507  * the offset after the write object mailbox has completed. @size is used to
20508  * determine the end of the object and whether the eof bit should be set.
20509  *
20510  * Return 0 is successful and offset will contain the the new offset to use
20511  * for the next write.
20512  * Return negative value for error cases.
20513  **/
20514 int
20515 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
20516                uint32_t size, uint32_t *offset)
20517 {
20518         struct lpfc_mbx_wr_object *wr_object;
20519         LPFC_MBOXQ_t *mbox;
20520         int rc = 0, i = 0;
20521         uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
20522         uint32_t shdr_change_status = 0, shdr_csf = 0;
20523         uint32_t mbox_tmo;
20524         struct lpfc_dmabuf *dmabuf;
20525         uint32_t written = 0;
20526         bool check_change_status = false;
20527
20528         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20529         if (!mbox)
20530                 return -ENOMEM;
20531
20532         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
20533                         LPFC_MBOX_OPCODE_WRITE_OBJECT,
20534                         sizeof(struct lpfc_mbx_wr_object) -
20535                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
20536
20537         wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
20538         wr_object->u.request.write_offset = *offset;
20539         sprintf((uint8_t *)wr_object->u.request.object_name, "/");
20540         wr_object->u.request.object_name[0] =
20541                 cpu_to_le32(wr_object->u.request.object_name[0]);
20542         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
20543         list_for_each_entry(dmabuf, dmabuf_list, list) {
20544                 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
20545                         break;
20546                 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
20547                 wr_object->u.request.bde[i].addrHigh =
20548                         putPaddrHigh(dmabuf->phys);
20549                 if (written + SLI4_PAGE_SIZE >= size) {
20550                         wr_object->u.request.bde[i].tus.f.bdeSize =
20551                                 (size - written);
20552                         written += (size - written);
20553                         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
20554                         bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
20555                         check_change_status = true;
20556                 } else {
20557                         wr_object->u.request.bde[i].tus.f.bdeSize =
20558                                 SLI4_PAGE_SIZE;
20559                         written += SLI4_PAGE_SIZE;
20560                 }
20561                 i++;
20562         }
20563         wr_object->u.request.bde_count = i;
20564         bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
20565         if (!phba->sli4_hba.intr_enable)
20566                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
20567         else {
20568                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
20569                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
20570         }
20571         /* The IOCTL status is embedded in the mailbox subheader. */
20572         shdr_status = bf_get(lpfc_mbox_hdr_status,
20573                              &wr_object->header.cfg_shdr.response);
20574         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20575                                  &wr_object->header.cfg_shdr.response);
20576         shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
20577                                    &wr_object->header.cfg_shdr.response);
20578         if (check_change_status) {
20579                 shdr_change_status = bf_get(lpfc_wr_object_change_status,
20580                                             &wr_object->u.response);
20581                 shdr_csf = bf_get(lpfc_wr_object_csf,
20582                                   &wr_object->u.response);
20583         }
20584
20585         if (!phba->sli4_hba.intr_enable)
20586                 mempool_free(mbox, phba->mbox_mem_pool);
20587         else if (rc != MBX_TIMEOUT)
20588                 mempool_free(mbox, phba->mbox_mem_pool);
20589         if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
20590                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20591                                 "3025 Write Object mailbox failed with "
20592                                 "status x%x add_status x%x, add_status_2 x%x, "
20593                                 "mbx status x%x\n",
20594                                 shdr_status, shdr_add_status, shdr_add_status_2,
20595                                 rc);
20596                 rc = -ENXIO;
20597                 *offset = shdr_add_status;
20598         } else {
20599                 *offset += wr_object->u.response.actual_write_length;
20600         }
20601
20602         if (rc || check_change_status)
20603                 lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
20604                                        shdr_add_status_2, shdr_change_status,
20605                                        shdr_csf);
20606         return rc;
20607 }
20608
20609 /**
20610  * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
20611  * @vport: pointer to vport data structure.
20612  *
20613  * This function iterate through the mailboxq and clean up all REG_LOGIN
20614  * and REG_VPI mailbox commands associated with the vport. This function
20615  * is called when driver want to restart discovery of the vport due to
20616  * a Clear Virtual Link event.
20617  **/
20618 void
20619 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
20620 {
20621         struct lpfc_hba *phba = vport->phba;
20622         LPFC_MBOXQ_t *mb, *nextmb;
20623         struct lpfc_dmabuf *mp;
20624         struct lpfc_nodelist *ndlp;
20625         struct lpfc_nodelist *act_mbx_ndlp = NULL;
20626         LIST_HEAD(mbox_cmd_list);
20627         uint8_t restart_loop;
20628
20629         /* Clean up internally queued mailbox commands with the vport */
20630         spin_lock_irq(&phba->hbalock);
20631         list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
20632                 if (mb->vport != vport)
20633                         continue;
20634
20635                 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20636                         (mb->u.mb.mbxCommand != MBX_REG_VPI))
20637                         continue;
20638
20639                 list_move_tail(&mb->list, &mbox_cmd_list);
20640         }
20641         /* Clean up active mailbox command with the vport */
20642         mb = phba->sli.mbox_active;
20643         if (mb && (mb->vport == vport)) {
20644                 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
20645                         (mb->u.mb.mbxCommand == MBX_REG_VPI))
20646                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20647                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20648                         act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20649                         /* Put reference count for delayed processing */
20650                         act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
20651                         /* Unregister the RPI when mailbox complete */
20652                         mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20653                 }
20654         }
20655         /* Cleanup any mailbox completions which are not yet processed */
20656         do {
20657                 restart_loop = 0;
20658                 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
20659                         /*
20660                          * If this mailox is already processed or it is
20661                          * for another vport ignore it.
20662                          */
20663                         if ((mb->vport != vport) ||
20664                                 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
20665                                 continue;
20666
20667                         if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20668                                 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20669                                 continue;
20670
20671                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20672                         if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20673                                 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20674                                 /* Unregister the RPI when mailbox complete */
20675                                 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20676                                 restart_loop = 1;
20677                                 spin_unlock_irq(&phba->hbalock);
20678                                 spin_lock(&ndlp->lock);
20679                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20680                                 spin_unlock(&ndlp->lock);
20681                                 spin_lock_irq(&phba->hbalock);
20682                                 break;
20683                         }
20684                 }
20685         } while (restart_loop);
20686
20687         spin_unlock_irq(&phba->hbalock);
20688
20689         /* Release the cleaned-up mailbox commands */
20690         while (!list_empty(&mbox_cmd_list)) {
20691                 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
20692                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20693                         mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
20694                         if (mp) {
20695                                 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
20696                                 kfree(mp);
20697                         }
20698                         mb->ctx_buf = NULL;
20699                         ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20700                         mb->ctx_ndlp = NULL;
20701                         if (ndlp) {
20702                                 spin_lock(&ndlp->lock);
20703                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20704                                 spin_unlock(&ndlp->lock);
20705                                 lpfc_nlp_put(ndlp);
20706                         }
20707                 }
20708                 mempool_free(mb, phba->mbox_mem_pool);
20709         }
20710
20711         /* Release the ndlp with the cleaned-up active mailbox command */
20712         if (act_mbx_ndlp) {
20713                 spin_lock(&act_mbx_ndlp->lock);
20714                 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20715                 spin_unlock(&act_mbx_ndlp->lock);
20716                 lpfc_nlp_put(act_mbx_ndlp);
20717         }
20718 }
20719
20720 /**
20721  * lpfc_drain_txq - Drain the txq
20722  * @phba: Pointer to HBA context object.
20723  *
20724  * This function attempt to submit IOCBs on the txq
20725  * to the adapter.  For SLI4 adapters, the txq contains
20726  * ELS IOCBs that have been deferred because the there
20727  * are no SGLs.  This congestion can occur with large
20728  * vport counts during node discovery.
20729  **/
20730
20731 uint32_t
20732 lpfc_drain_txq(struct lpfc_hba *phba)
20733 {
20734         LIST_HEAD(completions);
20735         struct lpfc_sli_ring *pring;
20736         struct lpfc_iocbq *piocbq = NULL;
20737         unsigned long iflags = 0;
20738         char *fail_msg = NULL;
20739         uint32_t txq_cnt = 0;
20740         struct lpfc_queue *wq;
20741         int ret = 0;
20742
20743         if (phba->link_flag & LS_MDS_LOOPBACK) {
20744                 /* MDS WQE are posted only to first WQ*/
20745                 wq = phba->sli4_hba.hdwq[0].io_wq;
20746                 if (unlikely(!wq))
20747                         return 0;
20748                 pring = wq->pring;
20749         } else {
20750                 wq = phba->sli4_hba.els_wq;
20751                 if (unlikely(!wq))
20752                         return 0;
20753                 pring = lpfc_phba_elsring(phba);
20754         }
20755
20756         if (unlikely(!pring) || list_empty(&pring->txq))
20757                 return 0;
20758
20759         spin_lock_irqsave(&pring->ring_lock, iflags);
20760         list_for_each_entry(piocbq, &pring->txq, list) {
20761                 txq_cnt++;
20762         }
20763
20764         if (txq_cnt > pring->txq_max)
20765                 pring->txq_max = txq_cnt;
20766
20767         spin_unlock_irqrestore(&pring->ring_lock, iflags);
20768
20769         while (!list_empty(&pring->txq)) {
20770                 spin_lock_irqsave(&pring->ring_lock, iflags);
20771
20772                 piocbq = lpfc_sli_ringtx_get(phba, pring);
20773                 if (!piocbq) {
20774                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
20775                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20776                                 "2823 txq empty and txq_cnt is %d\n ",
20777                                 txq_cnt);
20778                         break;
20779                 }
20780                 txq_cnt--;
20781
20782                 ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0);
20783
20784                 if (ret && ret != IOCB_BUSY) {
20785                         fail_msg = " - Cannot send IO ";
20786                         piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
20787                 }
20788                 if (fail_msg) {
20789                         piocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
20790                         /* Failed means we can't issue and need to cancel */
20791                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20792                                         "2822 IOCB failed %s iotag 0x%x "
20793                                         "xri 0x%x %d flg x%x\n",
20794                                         fail_msg, piocbq->iotag,
20795                                         piocbq->sli4_xritag, ret,
20796                                         piocbq->cmd_flag);
20797                         list_add_tail(&piocbq->list, &completions);
20798                         fail_msg = NULL;
20799                 }
20800                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20801                 if (txq_cnt == 0 || ret == IOCB_BUSY)
20802                         break;
20803         }
20804         /* Cancel all the IOCBs that cannot be issued */
20805         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
20806                               IOERR_SLI_ABORTED);
20807
20808         return txq_cnt;
20809 }
20810
20811 /**
20812  * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
20813  * @phba: Pointer to HBA context object.
20814  * @pwqeq: Pointer to command WQE.
20815  * @sglq: Pointer to the scatter gather queue object.
20816  *
20817  * This routine converts the bpl or bde that is in the WQE
20818  * to a sgl list for the sli4 hardware. The physical address
20819  * of the bpl/bde is converted back to a virtual address.
20820  * If the WQE contains a BPL then the list of BDE's is
20821  * converted to sli4_sge's. If the WQE contains a single
20822  * BDE then it is converted to a single sli_sge.
20823  * The WQE is still in cpu endianness so the contents of
20824  * the bpl can be used without byte swapping.
20825  *
20826  * Returns valid XRI = Success, NO_XRI = Failure.
20827  */
20828 static uint16_t
20829 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
20830                  struct lpfc_sglq *sglq)
20831 {
20832         uint16_t xritag = NO_XRI;
20833         struct ulp_bde64 *bpl = NULL;
20834         struct ulp_bde64 bde;
20835         struct sli4_sge *sgl  = NULL;
20836         struct lpfc_dmabuf *dmabuf;
20837         union lpfc_wqe128 *wqe;
20838         int numBdes = 0;
20839         int i = 0;
20840         uint32_t offset = 0; /* accumulated offset in the sg request list */
20841         int inbound = 0; /* number of sg reply entries inbound from firmware */
20842         uint32_t cmd;
20843
20844         if (!pwqeq || !sglq)
20845                 return xritag;
20846
20847         sgl  = (struct sli4_sge *)sglq->sgl;
20848         wqe = &pwqeq->wqe;
20849         pwqeq->iocb.ulpIoTag = pwqeq->iotag;
20850
20851         cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
20852         if (cmd == CMD_XMIT_BLS_RSP64_WQE)
20853                 return sglq->sli4_xritag;
20854         numBdes = pwqeq->num_bdes;
20855         if (numBdes) {
20856                 /* The addrHigh and addrLow fields within the WQE
20857                  * have not been byteswapped yet so there is no
20858                  * need to swap them back.
20859                  */
20860                 if (pwqeq->context3)
20861                         dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
20862                 else
20863                         return xritag;
20864
20865                 bpl  = (struct ulp_bde64 *)dmabuf->virt;
20866                 if (!bpl)
20867                         return xritag;
20868
20869                 for (i = 0; i < numBdes; i++) {
20870                         /* Should already be byte swapped. */
20871                         sgl->addr_hi = bpl->addrHigh;
20872                         sgl->addr_lo = bpl->addrLow;
20873
20874                         sgl->word2 = le32_to_cpu(sgl->word2);
20875                         if ((i+1) == numBdes)
20876                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
20877                         else
20878                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
20879                         /* swap the size field back to the cpu so we
20880                          * can assign it to the sgl.
20881                          */
20882                         bde.tus.w = le32_to_cpu(bpl->tus.w);
20883                         sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
20884                         /* The offsets in the sgl need to be accumulated
20885                          * separately for the request and reply lists.
20886                          * The request is always first, the reply follows.
20887                          */
20888                         switch (cmd) {
20889                         case CMD_GEN_REQUEST64_WQE:
20890                                 /* add up the reply sg entries */
20891                                 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
20892                                         inbound++;
20893                                 /* first inbound? reset the offset */
20894                                 if (inbound == 1)
20895                                         offset = 0;
20896                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20897                                 bf_set(lpfc_sli4_sge_type, sgl,
20898                                         LPFC_SGE_TYPE_DATA);
20899                                 offset += bde.tus.f.bdeSize;
20900                                 break;
20901                         case CMD_FCP_TRSP64_WQE:
20902                                 bf_set(lpfc_sli4_sge_offset, sgl, 0);
20903                                 bf_set(lpfc_sli4_sge_type, sgl,
20904                                         LPFC_SGE_TYPE_DATA);
20905                                 break;
20906                         case CMD_FCP_TSEND64_WQE:
20907                         case CMD_FCP_TRECEIVE64_WQE:
20908                                 bf_set(lpfc_sli4_sge_type, sgl,
20909                                         bpl->tus.f.bdeFlags);
20910                                 if (i < 3)
20911                                         offset = 0;
20912                                 else
20913                                         offset += bde.tus.f.bdeSize;
20914                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20915                                 break;
20916                         }
20917                         sgl->word2 = cpu_to_le32(sgl->word2);
20918                         bpl++;
20919                         sgl++;
20920                 }
20921         } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
20922                 /* The addrHigh and addrLow fields of the BDE have not
20923                  * been byteswapped yet so they need to be swapped
20924                  * before putting them in the sgl.
20925                  */
20926                 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
20927                 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
20928                 sgl->word2 = le32_to_cpu(sgl->word2);
20929                 bf_set(lpfc_sli4_sge_last, sgl, 1);
20930                 sgl->word2 = cpu_to_le32(sgl->word2);
20931                 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
20932         }
20933         return sglq->sli4_xritag;
20934 }
20935
20936 /**
20937  * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
20938  * @phba: Pointer to HBA context object.
20939  * @qp: Pointer to HDW queue.
20940  * @pwqe: Pointer to command WQE.
20941  **/
20942 int
20943 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20944                     struct lpfc_iocbq *pwqe)
20945 {
20946         union lpfc_wqe128 *wqe = &pwqe->wqe;
20947         struct lpfc_async_xchg_ctx *ctxp;
20948         struct lpfc_queue *wq;
20949         struct lpfc_sglq *sglq;
20950         struct lpfc_sli_ring *pring;
20951         unsigned long iflags;
20952         uint32_t ret = 0;
20953
20954         /* NVME_LS and NVME_LS ABTS requests. */
20955         if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
20956                 pring =  phba->sli4_hba.nvmels_wq->pring;
20957                 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20958                                           qp, wq_access);
20959                 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
20960                 if (!sglq) {
20961                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
20962                         return WQE_BUSY;
20963                 }
20964                 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20965                 pwqe->sli4_xritag = sglq->sli4_xritag;
20966                 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
20967                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
20968                         return WQE_ERROR;
20969                 }
20970                 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20971                        pwqe->sli4_xritag);
20972                 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
20973                 if (ret) {
20974                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
20975                         return ret;
20976                 }
20977
20978                 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20979                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20980
20981                 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20982                 return 0;
20983         }
20984
20985         /* NVME_FCREQ and NVME_ABTS requests */
20986         if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
20987                 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
20988                 wq = qp->io_wq;
20989                 pring = wq->pring;
20990
20991                 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20992
20993                 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20994                                           qp, wq_access);
20995                 ret = lpfc_sli4_wq_put(wq, wqe);
20996                 if (ret) {
20997                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
20998                         return ret;
20999                 }
21000                 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21001                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21002
21003                 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21004                 return 0;
21005         }
21006
21007         /* NVMET requests */
21008         if (pwqe->cmd_flag & LPFC_IO_NVMET) {
21009                 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21010                 wq = qp->io_wq;
21011                 pring = wq->pring;
21012
21013                 ctxp = pwqe->context2;
21014                 sglq = ctxp->ctxbuf->sglq;
21015                 if (pwqe->sli4_xritag ==  NO_XRI) {
21016                         pwqe->sli4_lxritag = sglq->sli4_lxritag;
21017                         pwqe->sli4_xritag = sglq->sli4_xritag;
21018                 }
21019                 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21020                        pwqe->sli4_xritag);
21021                 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21022
21023                 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21024                                           qp, wq_access);
21025                 ret = lpfc_sli4_wq_put(wq, wqe);
21026                 if (ret) {
21027                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
21028                         return ret;
21029                 }
21030                 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21031                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21032
21033                 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21034                 return 0;
21035         }
21036         return WQE_ERROR;
21037 }
21038
21039 /**
21040  * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
21041  * @phba: Pointer to HBA context object.
21042  * @cmdiocb: Pointer to driver command iocb object.
21043  * @cmpl: completion function.
21044  *
21045  * Fill the appropriate fields for the abort WQE and call
21046  * internal routine lpfc_sli4_issue_wqe to send the WQE
21047  * This function is called with hbalock held and no ring_lock held.
21048  *
21049  * RETURNS 0 - SUCCESS
21050  **/
21051
21052 int
21053 lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
21054                             void *cmpl)
21055 {
21056         struct lpfc_vport *vport = cmdiocb->vport;
21057         struct lpfc_iocbq *abtsiocb = NULL;
21058         union lpfc_wqe128 *abtswqe;
21059         struct lpfc_io_buf *lpfc_cmd;
21060         int retval = IOCB_ERROR;
21061         u16 xritag = cmdiocb->sli4_xritag;
21062
21063         /*
21064          * The scsi command can not be in txq and it is in flight because the
21065          * pCmd is still pointing at the SCSI command we have to abort. There
21066          * is no need to search the txcmplq. Just send an abort to the FW.
21067          */
21068
21069         abtsiocb = __lpfc_sli_get_iocbq(phba);
21070         if (!abtsiocb)
21071                 return WQE_NORESOURCE;
21072
21073         /* Indicate the IO is being aborted by the driver. */
21074         cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
21075
21076         abtswqe = &abtsiocb->wqe;
21077         memset(abtswqe, 0, sizeof(*abtswqe));
21078
21079         if (!lpfc_is_link_up(phba))
21080                 bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
21081         bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
21082         abtswqe->abort_cmd.rsrvd5 = 0;
21083         abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
21084         bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
21085         bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
21086         bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
21087         bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
21088         bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
21089         bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
21090
21091         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
21092         abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
21093         abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
21094         if (cmdiocb->cmd_flag & LPFC_IO_FCP)
21095                 abtsiocb->cmd_flag |= LPFC_IO_FCP;
21096         if (cmdiocb->cmd_flag & LPFC_IO_NVME)
21097                 abtsiocb->cmd_flag |= LPFC_IO_NVME;
21098         if (cmdiocb->cmd_flag & LPFC_IO_FOF)
21099                 abtsiocb->cmd_flag |= LPFC_IO_FOF;
21100         abtsiocb->vport = vport;
21101         abtsiocb->cmd_cmpl = cmpl;
21102
21103         lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
21104         retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
21105
21106         lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21107                          "0359 Abort xri x%x, original iotag x%x, "
21108                          "abort cmd iotag x%x retval x%x\n",
21109                          xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
21110
21111         if (retval) {
21112                 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21113                 __lpfc_sli_release_iocbq(phba, abtsiocb);
21114         }
21115
21116         return retval;
21117 }
21118
21119 #ifdef LPFC_MXP_STAT
21120 /**
21121  * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
21122  * @phba: pointer to lpfc hba data structure.
21123  * @hwqid: belong to which HWQ.
21124  *
21125  * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
21126  * 15 seconds after a test case is running.
21127  *
21128  * The user should call lpfc_debugfs_multixripools_write before running a test
21129  * case to clear stat_snapshot_taken. Then the user starts a test case. During
21130  * test case is running, stat_snapshot_taken is incremented by 1 every time when
21131  * this routine is called from heartbeat timer. When stat_snapshot_taken is
21132  * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
21133  **/
21134 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
21135 {
21136         struct lpfc_sli4_hdw_queue *qp;
21137         struct lpfc_multixri_pool *multixri_pool;
21138         struct lpfc_pvt_pool *pvt_pool;
21139         struct lpfc_pbl_pool *pbl_pool;
21140         u32 txcmplq_cnt;
21141
21142         qp = &phba->sli4_hba.hdwq[hwqid];
21143         multixri_pool = qp->p_multixri_pool;
21144         if (!multixri_pool)
21145                 return;
21146
21147         if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
21148                 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21149                 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21150                 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21151
21152                 multixri_pool->stat_pbl_count = pbl_pool->count;
21153                 multixri_pool->stat_pvt_count = pvt_pool->count;
21154                 multixri_pool->stat_busy_count = txcmplq_cnt;
21155         }
21156
21157         multixri_pool->stat_snapshot_taken++;
21158 }
21159 #endif
21160
21161 /**
21162  * lpfc_adjust_pvt_pool_count - Adjust private pool count
21163  * @phba: pointer to lpfc hba data structure.
21164  * @hwqid: belong to which HWQ.
21165  *
21166  * This routine moves some XRIs from private to public pool when private pool
21167  * is not busy.
21168  **/
21169 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
21170 {
21171         struct lpfc_multixri_pool *multixri_pool;
21172         u32 io_req_count;
21173         u32 prev_io_req_count;
21174
21175         multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21176         if (!multixri_pool)
21177                 return;
21178         io_req_count = multixri_pool->io_req_count;
21179         prev_io_req_count = multixri_pool->prev_io_req_count;
21180
21181         if (prev_io_req_count != io_req_count) {
21182                 /* Private pool is busy */
21183                 multixri_pool->prev_io_req_count = io_req_count;
21184         } else {
21185                 /* Private pool is not busy.
21186                  * Move XRIs from private to public pool.
21187                  */
21188                 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
21189         }
21190 }
21191
21192 /**
21193  * lpfc_adjust_high_watermark - Adjust high watermark
21194  * @phba: pointer to lpfc hba data structure.
21195  * @hwqid: belong to which HWQ.
21196  *
21197  * This routine sets high watermark as number of outstanding XRIs,
21198  * but make sure the new value is between xri_limit/2 and xri_limit.
21199  **/
21200 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
21201 {
21202         u32 new_watermark;
21203         u32 watermark_max;
21204         u32 watermark_min;
21205         u32 xri_limit;
21206         u32 txcmplq_cnt;
21207         u32 abts_io_bufs;
21208         struct lpfc_multixri_pool *multixri_pool;
21209         struct lpfc_sli4_hdw_queue *qp;
21210
21211         qp = &phba->sli4_hba.hdwq[hwqid];
21212         multixri_pool = qp->p_multixri_pool;
21213         if (!multixri_pool)
21214                 return;
21215         xri_limit = multixri_pool->xri_limit;
21216
21217         watermark_max = xri_limit;
21218         watermark_min = xri_limit / 2;
21219
21220         txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21221         abts_io_bufs = qp->abts_scsi_io_bufs;
21222         abts_io_bufs += qp->abts_nvme_io_bufs;
21223
21224         new_watermark = txcmplq_cnt + abts_io_bufs;
21225         new_watermark = min(watermark_max, new_watermark);
21226         new_watermark = max(watermark_min, new_watermark);
21227         multixri_pool->pvt_pool.high_watermark = new_watermark;
21228
21229 #ifdef LPFC_MXP_STAT
21230         multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
21231                                           new_watermark);
21232 #endif
21233 }
21234
21235 /**
21236  * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
21237  * @phba: pointer to lpfc hba data structure.
21238  * @hwqid: belong to which HWQ.
21239  *
21240  * This routine is called from hearbeat timer when pvt_pool is idle.
21241  * All free XRIs are moved from private to public pool on hwqid with 2 steps.
21242  * The first step moves (all - low_watermark) amount of XRIs.
21243  * The second step moves the rest of XRIs.
21244  **/
21245 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
21246 {
21247         struct lpfc_pbl_pool *pbl_pool;
21248         struct lpfc_pvt_pool *pvt_pool;
21249         struct lpfc_sli4_hdw_queue *qp;
21250         struct lpfc_io_buf *lpfc_ncmd;
21251         struct lpfc_io_buf *lpfc_ncmd_next;
21252         unsigned long iflag;
21253         struct list_head tmp_list;
21254         u32 tmp_count;
21255
21256         qp = &phba->sli4_hba.hdwq[hwqid];
21257         pbl_pool = &qp->p_multixri_pool->pbl_pool;
21258         pvt_pool = &qp->p_multixri_pool->pvt_pool;
21259         tmp_count = 0;
21260
21261         lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
21262         lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
21263
21264         if (pvt_pool->count > pvt_pool->low_watermark) {
21265                 /* Step 1: move (all - low_watermark) from pvt_pool
21266                  * to pbl_pool
21267                  */
21268
21269                 /* Move low watermark of bufs from pvt_pool to tmp_list */
21270                 INIT_LIST_HEAD(&tmp_list);
21271                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21272                                          &pvt_pool->list, list) {
21273                         list_move_tail(&lpfc_ncmd->list, &tmp_list);
21274                         tmp_count++;
21275                         if (tmp_count >= pvt_pool->low_watermark)
21276                                 break;
21277                 }
21278
21279                 /* Move all bufs from pvt_pool to pbl_pool */
21280                 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21281
21282                 /* Move all bufs from tmp_list to pvt_pool */
21283                 list_splice(&tmp_list, &pvt_pool->list);
21284
21285                 pbl_pool->count += (pvt_pool->count - tmp_count);
21286                 pvt_pool->count = tmp_count;
21287         } else {
21288                 /* Step 2: move the rest from pvt_pool to pbl_pool */
21289                 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21290                 pbl_pool->count += pvt_pool->count;
21291                 pvt_pool->count = 0;
21292         }
21293
21294         spin_unlock(&pvt_pool->lock);
21295         spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21296 }
21297
21298 /**
21299  * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21300  * @phba: pointer to lpfc hba data structure
21301  * @qp: pointer to HDW queue
21302  * @pbl_pool: specified public free XRI pool
21303  * @pvt_pool: specified private free XRI pool
21304  * @count: number of XRIs to move
21305  *
21306  * This routine tries to move some free common bufs from the specified pbl_pool
21307  * to the specified pvt_pool. It might move less than count XRIs if there's not
21308  * enough in public pool.
21309  *
21310  * Return:
21311  *   true - if XRIs are successfully moved from the specified pbl_pool to the
21312  *          specified pvt_pool
21313  *   false - if the specified pbl_pool is empty or locked by someone else
21314  **/
21315 static bool
21316 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21317                           struct lpfc_pbl_pool *pbl_pool,
21318                           struct lpfc_pvt_pool *pvt_pool, u32 count)
21319 {
21320         struct lpfc_io_buf *lpfc_ncmd;
21321         struct lpfc_io_buf *lpfc_ncmd_next;
21322         unsigned long iflag;
21323         int ret;
21324
21325         ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
21326         if (ret) {
21327                 if (pbl_pool->count) {
21328                         /* Move a batch of XRIs from public to private pool */
21329                         lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
21330                         list_for_each_entry_safe(lpfc_ncmd,
21331                                                  lpfc_ncmd_next,
21332                                                  &pbl_pool->list,
21333                                                  list) {
21334                                 list_move_tail(&lpfc_ncmd->list,
21335                                                &pvt_pool->list);
21336                                 pvt_pool->count++;
21337                                 pbl_pool->count--;
21338                                 count--;
21339                                 if (count == 0)
21340                                         break;
21341                         }
21342
21343                         spin_unlock(&pvt_pool->lock);
21344                         spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21345                         return true;
21346                 }
21347                 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21348         }
21349
21350         return false;
21351 }
21352
21353 /**
21354  * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21355  * @phba: pointer to lpfc hba data structure.
21356  * @hwqid: belong to which HWQ.
21357  * @count: number of XRIs to move
21358  *
21359  * This routine tries to find some free common bufs in one of public pools with
21360  * Round Robin method. The search always starts from local hwqid, then the next
21361  * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
21362  * a batch of free common bufs are moved to private pool on hwqid.
21363  * It might move less than count XRIs if there's not enough in public pool.
21364  **/
21365 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
21366 {
21367         struct lpfc_multixri_pool *multixri_pool;
21368         struct lpfc_multixri_pool *next_multixri_pool;
21369         struct lpfc_pvt_pool *pvt_pool;
21370         struct lpfc_pbl_pool *pbl_pool;
21371         struct lpfc_sli4_hdw_queue *qp;
21372         u32 next_hwqid;
21373         u32 hwq_count;
21374         int ret;
21375
21376         qp = &phba->sli4_hba.hdwq[hwqid];
21377         multixri_pool = qp->p_multixri_pool;
21378         pvt_pool = &multixri_pool->pvt_pool;
21379         pbl_pool = &multixri_pool->pbl_pool;
21380
21381         /* Check if local pbl_pool is available */
21382         ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
21383         if (ret) {
21384 #ifdef LPFC_MXP_STAT
21385                 multixri_pool->local_pbl_hit_count++;
21386 #endif
21387                 return;
21388         }
21389
21390         hwq_count = phba->cfg_hdw_queue;
21391
21392         /* Get the next hwqid which was found last time */
21393         next_hwqid = multixri_pool->rrb_next_hwqid;
21394
21395         do {
21396                 /* Go to next hwq */
21397                 next_hwqid = (next_hwqid + 1) % hwq_count;
21398
21399                 next_multixri_pool =
21400                         phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
21401                 pbl_pool = &next_multixri_pool->pbl_pool;
21402
21403                 /* Check if the public free xri pool is available */
21404                 ret = _lpfc_move_xri_pbl_to_pvt(
21405                         phba, qp, pbl_pool, pvt_pool, count);
21406
21407                 /* Exit while-loop if success or all hwqid are checked */
21408         } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
21409
21410         /* Starting point for the next time */
21411         multixri_pool->rrb_next_hwqid = next_hwqid;
21412
21413         if (!ret) {
21414                 /* stats: all public pools are empty*/
21415                 multixri_pool->pbl_empty_count++;
21416         }
21417
21418 #ifdef LPFC_MXP_STAT
21419         if (ret) {
21420                 if (next_hwqid == hwqid)
21421                         multixri_pool->local_pbl_hit_count++;
21422                 else
21423                         multixri_pool->other_pbl_hit_count++;
21424         }
21425 #endif
21426 }
21427
21428 /**
21429  * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
21430  * @phba: pointer to lpfc hba data structure.
21431  * @hwqid: belong to which HWQ.
21432  *
21433  * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
21434  * low watermark.
21435  **/
21436 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
21437 {
21438         struct lpfc_multixri_pool *multixri_pool;
21439         struct lpfc_pvt_pool *pvt_pool;
21440
21441         multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21442         pvt_pool = &multixri_pool->pvt_pool;
21443
21444         if (pvt_pool->count < pvt_pool->low_watermark)
21445                 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21446 }
21447
21448 /**
21449  * lpfc_release_io_buf - Return one IO buf back to free pool
21450  * @phba: pointer to lpfc hba data structure.
21451  * @lpfc_ncmd: IO buf to be returned.
21452  * @qp: belong to which HWQ.
21453  *
21454  * This routine returns one IO buf back to free pool. If this is an urgent IO,
21455  * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
21456  * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
21457  * xri_limit.  If cfg_xri_rebalancing==0, the IO buf is returned to
21458  * lpfc_io_buf_list_put.
21459  **/
21460 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
21461                          struct lpfc_sli4_hdw_queue *qp)
21462 {
21463         unsigned long iflag;
21464         struct lpfc_pbl_pool *pbl_pool;
21465         struct lpfc_pvt_pool *pvt_pool;
21466         struct lpfc_epd_pool *epd_pool;
21467         u32 txcmplq_cnt;
21468         u32 xri_owned;
21469         u32 xri_limit;
21470         u32 abts_io_bufs;
21471
21472         /* MUST zero fields if buffer is reused by another protocol */
21473         lpfc_ncmd->nvmeCmd = NULL;
21474         lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
21475
21476         if (phba->cfg_xpsgl && !phba->nvmet_support &&
21477             !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
21478                 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
21479
21480         if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
21481                 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
21482
21483         if (phba->cfg_xri_rebalancing) {
21484                 if (lpfc_ncmd->expedite) {
21485                         /* Return to expedite pool */
21486                         epd_pool = &phba->epd_pool;
21487                         spin_lock_irqsave(&epd_pool->lock, iflag);
21488                         list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
21489                         epd_pool->count++;
21490                         spin_unlock_irqrestore(&epd_pool->lock, iflag);
21491                         return;
21492                 }
21493
21494                 /* Avoid invalid access if an IO sneaks in and is being rejected
21495                  * just _after_ xri pools are destroyed in lpfc_offline.
21496                  * Nothing much can be done at this point.
21497                  */
21498                 if (!qp->p_multixri_pool)
21499                         return;
21500
21501                 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21502                 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21503
21504                 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21505                 abts_io_bufs = qp->abts_scsi_io_bufs;
21506                 abts_io_bufs += qp->abts_nvme_io_bufs;
21507
21508                 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21509                 xri_limit = qp->p_multixri_pool->xri_limit;
21510
21511 #ifdef LPFC_MXP_STAT
21512                 if (xri_owned <= xri_limit)
21513                         qp->p_multixri_pool->below_limit_count++;
21514                 else
21515                         qp->p_multixri_pool->above_limit_count++;
21516 #endif
21517
21518                 /* XRI goes to either public or private free xri pool
21519                  *     based on watermark and xri_limit
21520                  */
21521                 if ((pvt_pool->count < pvt_pool->low_watermark) ||
21522                     (xri_owned < xri_limit &&
21523                      pvt_pool->count < pvt_pool->high_watermark)) {
21524                         lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21525                                                   qp, free_pvt_pool);
21526                         list_add_tail(&lpfc_ncmd->list,
21527                                       &pvt_pool->list);
21528                         pvt_pool->count++;
21529                         spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21530                 } else {
21531                         lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21532                                                   qp, free_pub_pool);
21533                         list_add_tail(&lpfc_ncmd->list,
21534                                       &pbl_pool->list);
21535                         pbl_pool->count++;
21536                         spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21537                 }
21538         } else {
21539                 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
21540                                           qp, free_xri);
21541                 list_add_tail(&lpfc_ncmd->list,
21542                               &qp->lpfc_io_buf_list_put);
21543                 qp->put_io_bufs++;
21544                 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
21545                                        iflag);
21546         }
21547 }
21548
21549 /**
21550  * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
21551  * @phba: pointer to lpfc hba data structure.
21552  * @qp: pointer to HDW queue
21553  * @pvt_pool: pointer to private pool data structure.
21554  * @ndlp: pointer to lpfc nodelist data structure.
21555  *
21556  * This routine tries to get one free IO buf from private pool.
21557  *
21558  * Return:
21559  *   pointer to one free IO buf - if private pool is not empty
21560  *   NULL - if private pool is empty
21561  **/
21562 static struct lpfc_io_buf *
21563 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
21564                                   struct lpfc_sli4_hdw_queue *qp,
21565                                   struct lpfc_pvt_pool *pvt_pool,
21566                                   struct lpfc_nodelist *ndlp)
21567 {
21568         struct lpfc_io_buf *lpfc_ncmd;
21569         struct lpfc_io_buf *lpfc_ncmd_next;
21570         unsigned long iflag;
21571
21572         lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
21573         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21574                                  &pvt_pool->list, list) {
21575                 if (lpfc_test_rrq_active(
21576                         phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
21577                         continue;
21578                 list_del(&lpfc_ncmd->list);
21579                 pvt_pool->count--;
21580                 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21581                 return lpfc_ncmd;
21582         }
21583         spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21584
21585         return NULL;
21586 }
21587
21588 /**
21589  * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
21590  * @phba: pointer to lpfc hba data structure.
21591  *
21592  * This routine tries to get one free IO buf from expedite pool.
21593  *
21594  * Return:
21595  *   pointer to one free IO buf - if expedite pool is not empty
21596  *   NULL - if expedite pool is empty
21597  **/
21598 static struct lpfc_io_buf *
21599 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
21600 {
21601         struct lpfc_io_buf *lpfc_ncmd;
21602         struct lpfc_io_buf *lpfc_ncmd_next;
21603         unsigned long iflag;
21604         struct lpfc_epd_pool *epd_pool;
21605
21606         epd_pool = &phba->epd_pool;
21607         lpfc_ncmd = NULL;
21608
21609         spin_lock_irqsave(&epd_pool->lock, iflag);
21610         if (epd_pool->count > 0) {
21611                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21612                                          &epd_pool->list, list) {
21613                         list_del(&lpfc_ncmd->list);
21614                         epd_pool->count--;
21615                         break;
21616                 }
21617         }
21618         spin_unlock_irqrestore(&epd_pool->lock, iflag);
21619
21620         return lpfc_ncmd;
21621 }
21622
21623 /**
21624  * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
21625  * @phba: pointer to lpfc hba data structure.
21626  * @ndlp: pointer to lpfc nodelist data structure.
21627  * @hwqid: belong to which HWQ
21628  * @expedite: 1 means this request is urgent.
21629  *
21630  * This routine will do the following actions and then return a pointer to
21631  * one free IO buf.
21632  *
21633  * 1. If private free xri count is empty, move some XRIs from public to
21634  *    private pool.
21635  * 2. Get one XRI from private free xri pool.
21636  * 3. If we fail to get one from pvt_pool and this is an expedite request,
21637  *    get one free xri from expedite pool.
21638  *
21639  * Note: ndlp is only used on SCSI side for RRQ testing.
21640  *       The caller should pass NULL for ndlp on NVME side.
21641  *
21642  * Return:
21643  *   pointer to one free IO buf - if private pool is not empty
21644  *   NULL - if private pool is empty
21645  **/
21646 static struct lpfc_io_buf *
21647 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
21648                                     struct lpfc_nodelist *ndlp,
21649                                     int hwqid, int expedite)
21650 {
21651         struct lpfc_sli4_hdw_queue *qp;
21652         struct lpfc_multixri_pool *multixri_pool;
21653         struct lpfc_pvt_pool *pvt_pool;
21654         struct lpfc_io_buf *lpfc_ncmd;
21655
21656         qp = &phba->sli4_hba.hdwq[hwqid];
21657         lpfc_ncmd = NULL;
21658         if (!qp) {
21659                 lpfc_printf_log(phba, KERN_INFO,
21660                                 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21661                                 "5556 NULL qp for hwqid  x%x\n", hwqid);
21662                 return lpfc_ncmd;
21663         }
21664         multixri_pool = qp->p_multixri_pool;
21665         if (!multixri_pool) {
21666                 lpfc_printf_log(phba, KERN_INFO,
21667                                 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21668                                 "5557 NULL multixri for hwqid  x%x\n", hwqid);
21669                 return lpfc_ncmd;
21670         }
21671         pvt_pool = &multixri_pool->pvt_pool;
21672         if (!pvt_pool) {
21673                 lpfc_printf_log(phba, KERN_INFO,
21674                                 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21675                                 "5558 NULL pvt_pool for hwqid  x%x\n", hwqid);
21676                 return lpfc_ncmd;
21677         }
21678         multixri_pool->io_req_count++;
21679
21680         /* If pvt_pool is empty, move some XRIs from public to private pool */
21681         if (pvt_pool->count == 0)
21682                 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21683
21684         /* Get one XRI from private free xri pool */
21685         lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
21686
21687         if (lpfc_ncmd) {
21688                 lpfc_ncmd->hdwq = qp;
21689                 lpfc_ncmd->hdwq_no = hwqid;
21690         } else if (expedite) {
21691                 /* If we fail to get one from pvt_pool and this is an expedite
21692                  * request, get one free xri from expedite pool.
21693                  */
21694                 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
21695         }
21696
21697         return lpfc_ncmd;
21698 }
21699
21700 static inline struct lpfc_io_buf *
21701 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
21702 {
21703         struct lpfc_sli4_hdw_queue *qp;
21704         struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
21705
21706         qp = &phba->sli4_hba.hdwq[idx];
21707         list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
21708                                  &qp->lpfc_io_buf_list_get, list) {
21709                 if (lpfc_test_rrq_active(phba, ndlp,
21710                                          lpfc_cmd->cur_iocbq.sli4_lxritag))
21711                         continue;
21712
21713                 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
21714                         continue;
21715
21716                 list_del_init(&lpfc_cmd->list);
21717                 qp->get_io_bufs--;
21718                 lpfc_cmd->hdwq = qp;
21719                 lpfc_cmd->hdwq_no = idx;
21720                 return lpfc_cmd;
21721         }
21722         return NULL;
21723 }
21724
21725 /**
21726  * lpfc_get_io_buf - Get one IO buffer from free pool
21727  * @phba: The HBA for which this call is being executed.
21728  * @ndlp: pointer to lpfc nodelist data structure.
21729  * @hwqid: belong to which HWQ
21730  * @expedite: 1 means this request is urgent.
21731  *
21732  * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
21733  * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
21734  * a IO buffer from head of @hdwq io_buf_list and returns to caller.
21735  *
21736  * Note: ndlp is only used on SCSI side for RRQ testing.
21737  *       The caller should pass NULL for ndlp on NVME side.
21738  *
21739  * Return codes:
21740  *   NULL - Error
21741  *   Pointer to lpfc_io_buf - Success
21742  **/
21743 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
21744                                     struct lpfc_nodelist *ndlp,
21745                                     u32 hwqid, int expedite)
21746 {
21747         struct lpfc_sli4_hdw_queue *qp;
21748         unsigned long iflag;
21749         struct lpfc_io_buf *lpfc_cmd;
21750
21751         qp = &phba->sli4_hba.hdwq[hwqid];
21752         lpfc_cmd = NULL;
21753         if (!qp) {
21754                 lpfc_printf_log(phba, KERN_WARNING,
21755                                 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21756                                 "5555 NULL qp for hwqid  x%x\n", hwqid);
21757                 return lpfc_cmd;
21758         }
21759
21760         if (phba->cfg_xri_rebalancing)
21761                 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
21762                         phba, ndlp, hwqid, expedite);
21763         else {
21764                 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
21765                                           qp, alloc_xri_get);
21766                 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
21767                         lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
21768                 if (!lpfc_cmd) {
21769                         lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
21770                                           qp, alloc_xri_put);
21771                         list_splice(&qp->lpfc_io_buf_list_put,
21772                                     &qp->lpfc_io_buf_list_get);
21773                         qp->get_io_bufs += qp->put_io_bufs;
21774                         INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
21775                         qp->put_io_bufs = 0;
21776                         spin_unlock(&qp->io_buf_list_put_lock);
21777                         if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
21778                             expedite)
21779                                 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
21780                 }
21781                 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
21782         }
21783
21784         return lpfc_cmd;
21785 }
21786
21787 /**
21788  * lpfc_read_object - Retrieve object data from HBA
21789  * @phba: The HBA for which this call is being executed.
21790  * @rdobject: Pathname of object data we want to read.
21791  * @datap: Pointer to where data will be copied to.
21792  * @datasz: size of data area
21793  *
21794  * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less.
21795  * The data will be truncated if datasz is not large enough.
21796  * Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
21797  * Returns the actual bytes read from the object.
21798  */
21799 int
21800 lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
21801                  uint32_t datasz)
21802 {
21803         struct lpfc_mbx_read_object *read_object;
21804         LPFC_MBOXQ_t *mbox;
21805         int rc, length, eof, j, byte_cnt = 0;
21806         uint32_t shdr_status, shdr_add_status;
21807         union lpfc_sli4_cfg_shdr *shdr;
21808         struct lpfc_dmabuf *pcmd;
21809         u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
21810
21811         /* sanity check on queue memory */
21812         if (!datap)
21813                 return -ENODEV;
21814
21815         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
21816         if (!mbox)
21817                 return -ENOMEM;
21818         length = (sizeof(struct lpfc_mbx_read_object) -
21819                   sizeof(struct lpfc_sli4_cfg_mhdr));
21820         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
21821                          LPFC_MBOX_OPCODE_READ_OBJECT,
21822                          length, LPFC_SLI4_MBX_EMBED);
21823         read_object = &mbox->u.mqe.un.read_object;
21824         shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
21825
21826         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
21827         bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
21828         read_object->u.request.rd_object_offset = 0;
21829         read_object->u.request.rd_object_cnt = 1;
21830
21831         memset((void *)read_object->u.request.rd_object_name, 0,
21832                LPFC_OBJ_NAME_SZ);
21833         scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
21834         for (j = 0; j < strlen(rdobject); j++)
21835                 read_object->u.request.rd_object_name[j] =
21836                         cpu_to_le32(rd_object_name[j]);
21837
21838         pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
21839         if (pcmd)
21840                 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
21841         if (!pcmd || !pcmd->virt) {
21842                 kfree(pcmd);
21843                 mempool_free(mbox, phba->mbox_mem_pool);
21844                 return -ENOMEM;
21845         }
21846         memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
21847         read_object->u.request.rd_object_hbuf[0].pa_lo =
21848                 putPaddrLow(pcmd->phys);
21849         read_object->u.request.rd_object_hbuf[0].pa_hi =
21850                 putPaddrHigh(pcmd->phys);
21851         read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
21852
21853         mbox->vport = phba->pport;
21854         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
21855         mbox->ctx_buf = NULL;
21856         mbox->ctx_ndlp = NULL;
21857
21858         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
21859         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
21860         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
21861
21862         if (shdr_status == STATUS_FAILED &&
21863             shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
21864                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
21865                                 "4674 No port cfg file in FW.\n");
21866                 byte_cnt = -ENOENT;
21867         } else if (shdr_status || shdr_add_status || rc) {
21868                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
21869                                 "2625 READ_OBJECT mailbox failed with "
21870                                 "status x%x add_status x%x, mbx status x%x\n",
21871                                 shdr_status, shdr_add_status, rc);
21872                 byte_cnt = -ENXIO;
21873         } else {
21874                 /* Success */
21875                 length = read_object->u.response.rd_object_actual_rlen;
21876                 eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
21877                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
21878                                 "2626 READ_OBJECT Success len %d:%d, EOF %d\n",
21879                                 length, datasz, eof);
21880
21881                 /* Detect the port config file exists but is empty */
21882                 if (!length && eof) {
21883                         byte_cnt = 0;
21884                         goto exit;
21885                 }
21886
21887                 byte_cnt = length;
21888                 lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
21889         }
21890
21891  exit:
21892         lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
21893         kfree(pcmd);
21894         mempool_free(mbox, phba->mbox_mem_pool);
21895         return byte_cnt;
21896 }
21897
21898 /**
21899  * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
21900  * @phba: The HBA for which this call is being executed.
21901  * @lpfc_buf: IO buf structure to append the SGL chunk
21902  *
21903  * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
21904  * and will allocate an SGL chunk if the pool is empty.
21905  *
21906  * Return codes:
21907  *   NULL - Error
21908  *   Pointer to sli4_hybrid_sgl - Success
21909  **/
21910 struct sli4_hybrid_sgl *
21911 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
21912 {
21913         struct sli4_hybrid_sgl *list_entry = NULL;
21914         struct sli4_hybrid_sgl *tmp = NULL;
21915         struct sli4_hybrid_sgl *allocated_sgl = NULL;
21916         struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21917         struct list_head *buf_list = &hdwq->sgl_list;
21918         unsigned long iflags;
21919
21920         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21921
21922         if (likely(!list_empty(buf_list))) {
21923                 /* break off 1 chunk from the sgl_list */
21924                 list_for_each_entry_safe(list_entry, tmp,
21925                                          buf_list, list_node) {
21926                         list_move_tail(&list_entry->list_node,
21927                                        &lpfc_buf->dma_sgl_xtra_list);
21928                         break;
21929                 }
21930         } else {
21931                 /* allocate more */
21932                 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21933                 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21934                                    cpu_to_node(hdwq->io_wq->chann));
21935                 if (!tmp) {
21936                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21937                                         "8353 error kmalloc memory for HDWQ "
21938                                         "%d %s\n",
21939                                         lpfc_buf->hdwq_no, __func__);
21940                         return NULL;
21941                 }
21942
21943                 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
21944                                               GFP_ATOMIC, &tmp->dma_phys_sgl);
21945                 if (!tmp->dma_sgl) {
21946                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21947                                         "8354 error pool_alloc memory for HDWQ "
21948                                         "%d %s\n",
21949                                         lpfc_buf->hdwq_no, __func__);
21950                         kfree(tmp);
21951                         return NULL;
21952                 }
21953
21954                 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21955                 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
21956         }
21957
21958         allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
21959                                         struct sli4_hybrid_sgl,
21960                                         list_node);
21961
21962         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21963
21964         return allocated_sgl;
21965 }
21966
21967 /**
21968  * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
21969  * @phba: The HBA for which this call is being executed.
21970  * @lpfc_buf: IO buf structure with the SGL chunk
21971  *
21972  * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
21973  *
21974  * Return codes:
21975  *   0 - Success
21976  *   -EINVAL - Error
21977  **/
21978 int
21979 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
21980 {
21981         int rc = 0;
21982         struct sli4_hybrid_sgl *list_entry = NULL;
21983         struct sli4_hybrid_sgl *tmp = NULL;
21984         struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21985         struct list_head *buf_list = &hdwq->sgl_list;
21986         unsigned long iflags;
21987
21988         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21989
21990         if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
21991                 list_for_each_entry_safe(list_entry, tmp,
21992                                          &lpfc_buf->dma_sgl_xtra_list,
21993                                          list_node) {
21994                         list_move_tail(&list_entry->list_node,
21995                                        buf_list);
21996                 }
21997         } else {
21998                 rc = -EINVAL;
21999         }
22000
22001         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22002         return rc;
22003 }
22004
22005 /**
22006  * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
22007  * @phba: phba object
22008  * @hdwq: hdwq to cleanup sgl buff resources on
22009  *
22010  * This routine frees all SGL chunks of hdwq SGL chunk pool.
22011  *
22012  * Return codes:
22013  *   None
22014  **/
22015 void
22016 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
22017                        struct lpfc_sli4_hdw_queue *hdwq)
22018 {
22019         struct list_head *buf_list = &hdwq->sgl_list;
22020         struct sli4_hybrid_sgl *list_entry = NULL;
22021         struct sli4_hybrid_sgl *tmp = NULL;
22022         unsigned long iflags;
22023
22024         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22025
22026         /* Free sgl pool */
22027         list_for_each_entry_safe(list_entry, tmp,
22028                                  buf_list, list_node) {
22029                 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
22030                               list_entry->dma_sgl,
22031                               list_entry->dma_phys_sgl);
22032                 list_del(&list_entry->list_node);
22033                 kfree(list_entry);
22034         }
22035
22036         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22037 }
22038
22039 /**
22040  * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
22041  * @phba: The HBA for which this call is being executed.
22042  * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
22043  *
22044  * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
22045  * and will allocate an CMD/RSP buffer if the pool is empty.
22046  *
22047  * Return codes:
22048  *   NULL - Error
22049  *   Pointer to fcp_cmd_rsp_buf - Success
22050  **/
22051 struct fcp_cmd_rsp_buf *
22052 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22053                               struct lpfc_io_buf *lpfc_buf)
22054 {
22055         struct fcp_cmd_rsp_buf *list_entry = NULL;
22056         struct fcp_cmd_rsp_buf *tmp = NULL;
22057         struct fcp_cmd_rsp_buf *allocated_buf = NULL;
22058         struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22059         struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22060         unsigned long iflags;
22061
22062         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22063
22064         if (likely(!list_empty(buf_list))) {
22065                 /* break off 1 chunk from the list */
22066                 list_for_each_entry_safe(list_entry, tmp,
22067                                          buf_list,
22068                                          list_node) {
22069                         list_move_tail(&list_entry->list_node,
22070                                        &lpfc_buf->dma_cmd_rsp_list);
22071                         break;
22072                 }
22073         } else {
22074                 /* allocate more */
22075                 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22076                 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22077                                    cpu_to_node(hdwq->io_wq->chann));
22078                 if (!tmp) {
22079                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22080                                         "8355 error kmalloc memory for HDWQ "
22081                                         "%d %s\n",
22082                                         lpfc_buf->hdwq_no, __func__);
22083                         return NULL;
22084                 }
22085
22086                 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
22087                                                 GFP_ATOMIC,
22088                                                 &tmp->fcp_cmd_rsp_dma_handle);
22089
22090                 if (!tmp->fcp_cmnd) {
22091                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22092                                         "8356 error pool_alloc memory for HDWQ "
22093                                         "%d %s\n",
22094                                         lpfc_buf->hdwq_no, __func__);
22095                         kfree(tmp);
22096                         return NULL;
22097                 }
22098
22099                 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
22100                                 sizeof(struct fcp_cmnd));
22101
22102                 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22103                 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
22104         }
22105
22106         allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
22107                                         struct fcp_cmd_rsp_buf,
22108                                         list_node);
22109
22110         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22111
22112         return allocated_buf;
22113 }
22114
22115 /**
22116  * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
22117  * @phba: The HBA for which this call is being executed.
22118  * @lpfc_buf: IO buf structure with the CMD/RSP buf
22119  *
22120  * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
22121  *
22122  * Return codes:
22123  *   0 - Success
22124  *   -EINVAL - Error
22125  **/
22126 int
22127 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22128                               struct lpfc_io_buf *lpfc_buf)
22129 {
22130         int rc = 0;
22131         struct fcp_cmd_rsp_buf *list_entry = NULL;
22132         struct fcp_cmd_rsp_buf *tmp = NULL;
22133         struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22134         struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22135         unsigned long iflags;
22136
22137         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22138
22139         if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
22140                 list_for_each_entry_safe(list_entry, tmp,
22141                                          &lpfc_buf->dma_cmd_rsp_list,
22142                                          list_node) {
22143                         list_move_tail(&list_entry->list_node,
22144                                        buf_list);
22145                 }
22146         } else {
22147                 rc = -EINVAL;
22148         }
22149
22150         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22151         return rc;
22152 }
22153
22154 /**
22155  * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
22156  * @phba: phba object
22157  * @hdwq: hdwq to cleanup cmd rsp buff resources on
22158  *
22159  * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
22160  *
22161  * Return codes:
22162  *   None
22163  **/
22164 void
22165 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22166                                struct lpfc_sli4_hdw_queue *hdwq)
22167 {
22168         struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22169         struct fcp_cmd_rsp_buf *list_entry = NULL;
22170         struct fcp_cmd_rsp_buf *tmp = NULL;
22171         unsigned long iflags;
22172
22173         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22174
22175         /* Free cmd_rsp buf pool */
22176         list_for_each_entry_safe(list_entry, tmp,
22177                                  buf_list,
22178                                  list_node) {
22179                 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
22180                               list_entry->fcp_cmnd,
22181                               list_entry->fcp_cmd_rsp_dma_handle);
22182                 list_del(&list_entry->list_node);
22183                 kfree(list_entry);
22184         }
22185
22186         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22187 }
22188
22189 /**
22190  * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted
22191  * @phba: phba object
22192  * @job: job entry of the command to be posted.
22193  *
22194  * Fill the common fields of the wqe for each of the command.
22195  *
22196  * Return codes:
22197  *      None
22198  **/
22199 void
22200 lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
22201 {
22202         u8 cmnd;
22203         u32 *pcmd;
22204         u32 if_type = 0;
22205         u32 fip, abort_tag;
22206         struct lpfc_nodelist *ndlp = NULL;
22207         union lpfc_wqe128 *wqe = &job->wqe;
22208         struct lpfc_dmabuf *context2;
22209         u32 els_id = LPFC_ELS_ID_DEFAULT;
22210         u8 command_type = ELS_COMMAND_NON_FIP;
22211
22212         fip = phba->hba_flag & HBA_FIP_SUPPORT;
22213         /* The fcp commands will set command type */
22214         if (job->cmd_flag &  LPFC_IO_FCP)
22215                 command_type = FCP_COMMAND;
22216         else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK))
22217                 command_type = ELS_COMMAND_FIP;
22218         else
22219                 command_type = ELS_COMMAND_NON_FIP;
22220
22221         abort_tag = job->iotag;
22222         cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com);
22223
22224         switch (cmnd) {
22225         case CMD_ELS_REQUEST64_WQE:
22226                 if (job->cmd_flag & LPFC_IO_LIBDFC)
22227                         ndlp = job->context_un.ndlp;
22228                 else
22229                         ndlp = (struct lpfc_nodelist *)job->context1;
22230
22231                 /* CCP CCPE PV PRI in word10 were set in the memcpy */
22232                 if (command_type == ELS_COMMAND_FIP)
22233                         els_id = ((job->cmd_flag & LPFC_FIP_ELS_ID_MASK)
22234                                   >> LPFC_FIP_ELS_ID_SHIFT);
22235
22236                 if_type = bf_get(lpfc_sli_intf_if_type,
22237                                  &phba->sli4_hba.sli_intf);
22238                 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22239                         context2 = (struct lpfc_dmabuf *)job->context2;
22240                         pcmd = (u32 *)context2->virt;
22241                         if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
22242                                      *pcmd == ELS_CMD_SCR ||
22243                                      *pcmd == ELS_CMD_RDF ||
22244                                      *pcmd == ELS_CMD_EDC ||
22245                                      *pcmd == ELS_CMD_RSCN_XMT ||
22246                                      *pcmd == ELS_CMD_FDISC ||
22247                                      *pcmd == ELS_CMD_LOGO ||
22248                                      *pcmd == ELS_CMD_QFPA ||
22249                                      *pcmd == ELS_CMD_UVEM ||
22250                                      *pcmd == ELS_CMD_PLOGI)) {
22251                                 bf_set(els_req64_sp, &wqe->els_req, 1);
22252                                 bf_set(els_req64_sid, &wqe->els_req,
22253                                        job->vport->fc_myDID);
22254
22255                                 if ((*pcmd == ELS_CMD_FLOGI) &&
22256                                     !(phba->fc_topology ==
22257                                       LPFC_TOPOLOGY_LOOP))
22258                                         bf_set(els_req64_sid, &wqe->els_req, 0);
22259
22260                                 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
22261                                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22262                                        phba->vpi_ids[job->vport->vpi]);
22263                         } else if (pcmd) {
22264                                 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
22265                                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22266                                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22267                         }
22268                 }
22269
22270                 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
22271                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22272
22273                 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
22274                 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
22275                 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
22276                 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
22277                 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22278                 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
22279                 break;
22280         case CMD_XMIT_ELS_RSP64_WQE:
22281                 ndlp = (struct lpfc_nodelist *)job->context1;
22282
22283                 /* word4 */
22284                 wqe->xmit_els_rsp.word4 = 0;
22285
22286                 if_type = bf_get(lpfc_sli_intf_if_type,
22287                                  &phba->sli4_hba.sli_intf);
22288                 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22289                         if (job->vport->fc_flag & FC_PT2PT) {
22290                                 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22291                                 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22292                                        job->vport->fc_myDID);
22293                                 if (job->vport->fc_myDID == Fabric_DID) {
22294                                         bf_set(wqe_els_did,
22295                                                &wqe->xmit_els_rsp.wqe_dest, 0);
22296                                 }
22297                         }
22298                 }
22299
22300                 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
22301                 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
22302                 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
22303                 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
22304                        LPFC_WQE_LENLOC_WORD3);
22305                 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
22306
22307                 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
22308                         bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22309                         bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22310                                job->vport->fc_myDID);
22311                         bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
22312                 }
22313
22314                 if (phba->sli_rev == LPFC_SLI_REV4) {
22315                         bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
22316                                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22317
22318                         if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com))
22319                                 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
22320                                        phba->vpi_ids[job->vport->vpi]);
22321                 }
22322                 command_type = OTHER_COMMAND;
22323                 break;
22324         case CMD_GEN_REQUEST64_WQE:
22325                 /* Word 10 */
22326                 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
22327                 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
22328                 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
22329                 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22330                 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
22331                 command_type = OTHER_COMMAND;
22332                 break;
22333         case CMD_XMIT_SEQUENCE64_WQE:
22334                 if (phba->link_flag & LS_LOOPBACK_MODE)
22335                         bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
22336
22337                 wqe->xmit_sequence.rsvd3 = 0;
22338                 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
22339                 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
22340                 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
22341                        LPFC_WQE_IOD_WRITE);
22342                 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
22343                        LPFC_WQE_LENLOC_WORD12);
22344                 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
22345                 command_type = OTHER_COMMAND;
22346                 break;
22347         case CMD_XMIT_BLS_RSP64_WQE:
22348                 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
22349                 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
22350                 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
22351                 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
22352                        phba->vpi_ids[phba->pport->vpi]);
22353                 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
22354                 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
22355                        LPFC_WQE_LENLOC_NONE);
22356                 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
22357                 command_type = OTHER_COMMAND;
22358                 break;
22359         case CMD_FCP_ICMND64_WQE:       /* task mgmt commands */
22360         case CMD_ABORT_XRI_WQE:         /* abort iotag */
22361         case CMD_SEND_FRAME:            /* mds loopback */
22362                 /* cases already formatted for sli4 wqe - no chgs necessary */
22363                 return;
22364         default:
22365                 dump_stack();
22366                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
22367                                 "6207 Invalid command 0x%x\n",
22368                                 cmnd);
22369                 break;
22370         }
22371
22372         wqe->generic.wqe_com.abort_tag = abort_tag;
22373         bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag);
22374         bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
22375         bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
22376 }