scsi: lpfc: Add embedded data pointers for enhanced performance
[linux-2.6-microblaze.git] / drivers / scsi / lpfc / lpfc_sli.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
38 #ifdef CONFIG_X86
39 #include <asm/set_memory.h>
40 #endif
41
42 #include <linux/nvme-fc-driver.h>
43
44 #include "lpfc_hw4.h"
45 #include "lpfc_hw.h"
46 #include "lpfc_sli.h"
47 #include "lpfc_sli4.h"
48 #include "lpfc_nl.h"
49 #include "lpfc_disc.h"
50 #include "lpfc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_logmsg.h"
56 #include "lpfc_compat.h"
57 #include "lpfc_debugfs.h"
58 #include "lpfc_vport.h"
59 #include "lpfc_version.h"
60
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type {
63         LPFC_UNKNOWN_IOCB,
64         LPFC_UNSOL_IOCB,
65         LPFC_SOL_IOCB,
66         LPFC_ABORT_IOCB
67 } lpfc_iocb_type;
68
69
70 /* Provide function prototypes local to this module. */
71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
72                                   uint32_t);
73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
74                               uint8_t *, uint32_t *);
75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
76                                                          struct lpfc_iocbq *);
77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
78                                       struct hbq_dmabuf *);
79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80                                           struct hbq_dmabuf *dmabuf);
81 static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
82                                     struct lpfc_cqe *);
83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
84                                        int);
85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
86                                      struct lpfc_eqe *eqe, uint32_t qidx);
87 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
88 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
89 static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
90                                    struct lpfc_sli_ring *pring,
91                                    struct lpfc_iocbq *cmdiocb);
92
93 static IOCB_t *
94 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
95 {
96         return &iocbq->iocb;
97 }
98
99 /**
100  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
101  * @q: The Work Queue to operate on.
102  * @wqe: The work Queue Entry to put on the Work queue.
103  *
104  * This routine will copy the contents of @wqe to the next available entry on
105  * the @q. This function will then ring the Work Queue Doorbell to signal the
106  * HBA to start processing the Work Queue Entry. This function returns 0 if
107  * successful. If no entries are available on @q then this function will return
108  * -ENOMEM.
109  * The caller is expected to hold the hbalock when calling this routine.
110  **/
111 static int
112 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
113 {
114         union lpfc_wqe *temp_wqe;
115         struct lpfc_register doorbell;
116         uint32_t host_index;
117         uint32_t idx;
118         uint32_t i = 0;
119         uint8_t *tmp;
120
121         /* sanity check on queue memory */
122         if (unlikely(!q))
123                 return -ENOMEM;
124         temp_wqe = q->qe[q->host_index].wqe;
125
126         /* If the host has not yet processed the next entry then we are done */
127         idx = ((q->host_index + 1) % q->entry_count);
128         if (idx == q->hba_index) {
129                 q->WQ_overflow++;
130                 return -EBUSY;
131         }
132         q->WQ_posted++;
133         /* set consumption flag every once in a while */
134         if (!((q->host_index + 1) % q->entry_repost))
135                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
136         else
137                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
138         if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
139                 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
140         lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
141         if (q->dpp_enable && q->phba->cfg_enable_dpp) {
142                 /* write to DPP aperture taking advatage of Combined Writes */
143                 tmp = (uint8_t *)wqe;
144                 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
145                         writeq(*((uint64_t *)(tmp + i)), q->dpp_regaddr + i);
146         }
147         /* ensure WQE bcopy and DPP flushed before doorbell write */
148         wmb();
149
150         /* Update the host index before invoking device */
151         host_index = q->host_index;
152
153         q->host_index = idx;
154
155         /* Ring Doorbell */
156         doorbell.word0 = 0;
157         if (q->db_format == LPFC_DB_LIST_FORMAT) {
158                 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
159                         bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
160                         bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
161                         bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
162                             q->dpp_id);
163                         bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
164                             q->queue_id);
165                 } else {
166                         bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
167                         bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
168                         bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
169                 }
170         } else if (q->db_format == LPFC_DB_RING_FORMAT) {
171                 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
172                 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
173         } else {
174                 return -EINVAL;
175         }
176         writel(doorbell.word0, q->db_regaddr);
177
178         return 0;
179 }
180
181 /**
182  * lpfc_sli4_wq_release - Updates internal hba index for WQ
183  * @q: The Work Queue to operate on.
184  * @index: The index to advance the hba index to.
185  *
186  * This routine will update the HBA index of a queue to reflect consumption of
187  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
188  * an entry the host calls this function to update the queue's internal
189  * pointers. This routine returns the number of entries that were consumed by
190  * the HBA.
191  **/
192 static uint32_t
193 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
194 {
195         uint32_t released = 0;
196
197         /* sanity check on queue memory */
198         if (unlikely(!q))
199                 return 0;
200
201         if (q->hba_index == index)
202                 return 0;
203         do {
204                 q->hba_index = ((q->hba_index + 1) % q->entry_count);
205                 released++;
206         } while (q->hba_index != index);
207         return released;
208 }
209
210 /**
211  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
212  * @q: The Mailbox Queue to operate on.
213  * @wqe: The Mailbox Queue Entry to put on the Work queue.
214  *
215  * This routine will copy the contents of @mqe to the next available entry on
216  * the @q. This function will then ring the Work Queue Doorbell to signal the
217  * HBA to start processing the Work Queue Entry. This function returns 0 if
218  * successful. If no entries are available on @q then this function will return
219  * -ENOMEM.
220  * The caller is expected to hold the hbalock when calling this routine.
221  **/
222 static uint32_t
223 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
224 {
225         struct lpfc_mqe *temp_mqe;
226         struct lpfc_register doorbell;
227
228         /* sanity check on queue memory */
229         if (unlikely(!q))
230                 return -ENOMEM;
231         temp_mqe = q->qe[q->host_index].mqe;
232
233         /* If the host has not yet processed the next entry then we are done */
234         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
235                 return -ENOMEM;
236         lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
237         /* Save off the mailbox pointer for completion */
238         q->phba->mbox = (MAILBOX_t *)temp_mqe;
239
240         /* Update the host index before invoking device */
241         q->host_index = ((q->host_index + 1) % q->entry_count);
242
243         /* Ring Doorbell */
244         doorbell.word0 = 0;
245         bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
246         bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
247         writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
248         return 0;
249 }
250
251 /**
252  * lpfc_sli4_mq_release - Updates internal hba index for MQ
253  * @q: The Mailbox Queue to operate on.
254  *
255  * This routine will update the HBA index of a queue to reflect consumption of
256  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
257  * an entry the host calls this function to update the queue's internal
258  * pointers. This routine returns the number of entries that were consumed by
259  * the HBA.
260  **/
261 static uint32_t
262 lpfc_sli4_mq_release(struct lpfc_queue *q)
263 {
264         /* sanity check on queue memory */
265         if (unlikely(!q))
266                 return 0;
267
268         /* Clear the mailbox pointer for completion */
269         q->phba->mbox = NULL;
270         q->hba_index = ((q->hba_index + 1) % q->entry_count);
271         return 1;
272 }
273
274 /**
275  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
276  * @q: The Event Queue to get the first valid EQE from
277  *
278  * This routine will get the first valid Event Queue Entry from @q, update
279  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
280  * the Queue (no more work to do), or the Queue is full of EQEs that have been
281  * processed, but not popped back to the HBA then this routine will return NULL.
282  **/
283 static struct lpfc_eqe *
284 lpfc_sli4_eq_get(struct lpfc_queue *q)
285 {
286         struct lpfc_hba *phba;
287         struct lpfc_eqe *eqe;
288         uint32_t idx;
289
290         /* sanity check on queue memory */
291         if (unlikely(!q))
292                 return NULL;
293         phba = q->phba;
294         eqe = q->qe[q->hba_index].eqe;
295
296         /* If the next EQE is not valid then we are done */
297         if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
298                 return NULL;
299         /* If the host has not yet processed the next entry then we are done */
300         idx = ((q->hba_index + 1) % q->entry_count);
301         if (idx == q->host_index)
302                 return NULL;
303
304         q->hba_index = idx;
305         /* if the index wrapped around, toggle the valid bit */
306         if (phba->sli4_hba.pc_sli4_params.eqav && !q->hba_index)
307                 q->qe_valid = (q->qe_valid) ? 0 : 1;
308
309
310         /*
311          * insert barrier for instruction interlock : data from the hardware
312          * must have the valid bit checked before it can be copied and acted
313          * upon. Speculative instructions were allowing a bcopy at the start
314          * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
315          * after our return, to copy data before the valid bit check above
316          * was done. As such, some of the copied data was stale. The barrier
317          * ensures the check is before any data is copied.
318          */
319         mb();
320         return eqe;
321 }
322
323 /**
324  * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
325  * @q: The Event Queue to disable interrupts
326  *
327  **/
328 inline void
329 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
330 {
331         struct lpfc_register doorbell;
332
333         doorbell.word0 = 0;
334         bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
335         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
336         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
337                 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
338         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
339         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
340 }
341
342 /**
343  * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
344  * @q: The Event Queue to disable interrupts
345  *
346  **/
347 inline void
348 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
349 {
350         struct lpfc_register doorbell;
351
352         doorbell.word0 = 0;
353         bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
354         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
355         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
356                 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
357         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
358         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
359 }
360
361 /**
362  * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
363  * @q: The Event Queue that the host has completed processing for.
364  * @arm: Indicates whether the host wants to arms this CQ.
365  *
366  * This routine will mark all Event Queue Entries on @q, from the last
367  * known completed entry to the last entry that was processed, as completed
368  * by clearing the valid bit for each completion queue entry. Then it will
369  * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
370  * The internal host index in the @q will be updated by this routine to indicate
371  * that the host has finished processing the entries. The @arm parameter
372  * indicates that the queue should be rearmed when ringing the doorbell.
373  *
374  * This function will return the number of EQEs that were popped.
375  **/
376 uint32_t
377 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
378 {
379         uint32_t released = 0;
380         struct lpfc_hba *phba;
381         struct lpfc_eqe *temp_eqe;
382         struct lpfc_register doorbell;
383
384         /* sanity check on queue memory */
385         if (unlikely(!q))
386                 return 0;
387         phba = q->phba;
388
389         /* while there are valid entries */
390         while (q->hba_index != q->host_index) {
391                 if (!phba->sli4_hba.pc_sli4_params.eqav) {
392                         temp_eqe = q->qe[q->host_index].eqe;
393                         bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
394                 }
395                 released++;
396                 q->host_index = ((q->host_index + 1) % q->entry_count);
397         }
398         if (unlikely(released == 0 && !arm))
399                 return 0;
400
401         /* ring doorbell for number popped */
402         doorbell.word0 = 0;
403         if (arm) {
404                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
405                 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
406         }
407         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
408         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
409         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
410                         (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
411         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
412         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
413         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
414         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
415                 readl(q->phba->sli4_hba.EQDBregaddr);
416         return released;
417 }
418
419 /**
420  * lpfc_sli4_if6_eq_release - Indicates the host has finished processing an EQ
421  * @q: The Event Queue that the host has completed processing for.
422  * @arm: Indicates whether the host wants to arms this CQ.
423  *
424  * This routine will mark all Event Queue Entries on @q, from the last
425  * known completed entry to the last entry that was processed, as completed
426  * by clearing the valid bit for each completion queue entry. Then it will
427  * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
428  * The internal host index in the @q will be updated by this routine to indicate
429  * that the host has finished processing the entries. The @arm parameter
430  * indicates that the queue should be rearmed when ringing the doorbell.
431  *
432  * This function will return the number of EQEs that were popped.
433  **/
434 uint32_t
435 lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm)
436 {
437         uint32_t released = 0;
438         struct lpfc_hba *phba;
439         struct lpfc_eqe *temp_eqe;
440         struct lpfc_register doorbell;
441
442         /* sanity check on queue memory */
443         if (unlikely(!q))
444                 return 0;
445         phba = q->phba;
446
447         /* while there are valid entries */
448         while (q->hba_index != q->host_index) {
449                 if (!phba->sli4_hba.pc_sli4_params.eqav) {
450                         temp_eqe = q->qe[q->host_index].eqe;
451                         bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
452                 }
453                 released++;
454                 q->host_index = ((q->host_index + 1) % q->entry_count);
455         }
456         if (unlikely(released == 0 && !arm))
457                 return 0;
458
459         /* ring doorbell for number popped */
460         doorbell.word0 = 0;
461         if (arm)
462                 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
463         bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, released);
464         bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
465         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
466         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
467         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
468                 readl(q->phba->sli4_hba.EQDBregaddr);
469         return released;
470 }
471
472 /**
473  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
474  * @q: The Completion Queue to get the first valid CQE from
475  *
476  * This routine will get the first valid Completion Queue Entry from @q, update
477  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
478  * the Queue (no more work to do), or the Queue is full of CQEs that have been
479  * processed, but not popped back to the HBA then this routine will return NULL.
480  **/
481 static struct lpfc_cqe *
482 lpfc_sli4_cq_get(struct lpfc_queue *q)
483 {
484         struct lpfc_hba *phba;
485         struct lpfc_cqe *cqe;
486         uint32_t idx;
487
488         /* sanity check on queue memory */
489         if (unlikely(!q))
490                 return NULL;
491         phba = q->phba;
492         cqe = q->qe[q->hba_index].cqe;
493
494         /* If the next CQE is not valid then we are done */
495         if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
496                 return NULL;
497         /* If the host has not yet processed the next entry then we are done */
498         idx = ((q->hba_index + 1) % q->entry_count);
499         if (idx == q->host_index)
500                 return NULL;
501
502         q->hba_index = idx;
503         /* if the index wrapped around, toggle the valid bit */
504         if (phba->sli4_hba.pc_sli4_params.cqav && !q->hba_index)
505                 q->qe_valid = (q->qe_valid) ? 0 : 1;
506
507         /*
508          * insert barrier for instruction interlock : data from the hardware
509          * must have the valid bit checked before it can be copied and acted
510          * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
511          * instructions allowing action on content before valid bit checked,
512          * add barrier here as well. May not be needed as "content" is a
513          * single 32-bit entity here (vs multi word structure for cq's).
514          */
515         mb();
516         return cqe;
517 }
518
519 /**
520  * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
521  * @q: The Completion Queue that the host has completed processing for.
522  * @arm: Indicates whether the host wants to arms this CQ.
523  *
524  * This routine will mark all Completion queue entries on @q, from the last
525  * known completed entry to the last entry that was processed, as completed
526  * by clearing the valid bit for each completion queue entry. Then it will
527  * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
528  * The internal host index in the @q will be updated by this routine to indicate
529  * that the host has finished processing the entries. The @arm parameter
530  * indicates that the queue should be rearmed when ringing the doorbell.
531  *
532  * This function will return the number of CQEs that were released.
533  **/
534 uint32_t
535 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
536 {
537         uint32_t released = 0;
538         struct lpfc_hba *phba;
539         struct lpfc_cqe *temp_qe;
540         struct lpfc_register doorbell;
541
542         /* sanity check on queue memory */
543         if (unlikely(!q))
544                 return 0;
545         phba = q->phba;
546
547         /* while there are valid entries */
548         while (q->hba_index != q->host_index) {
549                 if (!phba->sli4_hba.pc_sli4_params.cqav) {
550                         temp_qe = q->qe[q->host_index].cqe;
551                         bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
552                 }
553                 released++;
554                 q->host_index = ((q->host_index + 1) % q->entry_count);
555         }
556         if (unlikely(released == 0 && !arm))
557                 return 0;
558
559         /* ring doorbell for number popped */
560         doorbell.word0 = 0;
561         if (arm)
562                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
563         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
564         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
565         bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
566                         (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
567         bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
568         writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
569         return released;
570 }
571
572 /**
573  * lpfc_sli4_if6_cq_release - Indicates the host has finished processing a CQ
574  * @q: The Completion Queue that the host has completed processing for.
575  * @arm: Indicates whether the host wants to arms this CQ.
576  *
577  * This routine will mark all Completion queue entries on @q, from the last
578  * known completed entry to the last entry that was processed, as completed
579  * by clearing the valid bit for each completion queue entry. Then it will
580  * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
581  * The internal host index in the @q will be updated by this routine to indicate
582  * that the host has finished processing the entries. The @arm parameter
583  * indicates that the queue should be rearmed when ringing the doorbell.
584  *
585  * This function will return the number of CQEs that were released.
586  **/
587 uint32_t
588 lpfc_sli4_if6_cq_release(struct lpfc_queue *q, bool arm)
589 {
590         uint32_t released = 0;
591         struct lpfc_hba *phba;
592         struct lpfc_cqe *temp_qe;
593         struct lpfc_register doorbell;
594
595         /* sanity check on queue memory */
596         if (unlikely(!q))
597                 return 0;
598         phba = q->phba;
599
600         /* while there are valid entries */
601         while (q->hba_index != q->host_index) {
602                 if (!phba->sli4_hba.pc_sli4_params.cqav) {
603                         temp_qe = q->qe[q->host_index].cqe;
604                         bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
605                 }
606                 released++;
607                 q->host_index = ((q->host_index + 1) % q->entry_count);
608         }
609         if (unlikely(released == 0 && !arm))
610                 return 0;
611
612         /* ring doorbell for number popped */
613         doorbell.word0 = 0;
614         if (arm)
615                 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
616         bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, released);
617         bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
618         writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
619         return released;
620 }
621
622 /**
623  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
624  * @q: The Header Receive Queue to operate on.
625  * @wqe: The Receive Queue Entry to put on the Receive queue.
626  *
627  * This routine will copy the contents of @wqe to the next available entry on
628  * the @q. This function will then ring the Receive Queue Doorbell to signal the
629  * HBA to start processing the Receive Queue Entry. This function returns the
630  * index that the rqe was copied to if successful. If no entries are available
631  * on @q then this function will return -ENOMEM.
632  * The caller is expected to hold the hbalock when calling this routine.
633  **/
634 int
635 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
636                  struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
637 {
638         struct lpfc_rqe *temp_hrqe;
639         struct lpfc_rqe *temp_drqe;
640         struct lpfc_register doorbell;
641         int hq_put_index;
642         int dq_put_index;
643
644         /* sanity check on queue memory */
645         if (unlikely(!hq) || unlikely(!dq))
646                 return -ENOMEM;
647         hq_put_index = hq->host_index;
648         dq_put_index = dq->host_index;
649         temp_hrqe = hq->qe[hq_put_index].rqe;
650         temp_drqe = dq->qe[dq_put_index].rqe;
651
652         if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
653                 return -EINVAL;
654         if (hq_put_index != dq_put_index)
655                 return -EINVAL;
656         /* If the host has not yet processed the next entry then we are done */
657         if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
658                 return -EBUSY;
659         lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
660         lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
661
662         /* Update the host index to point to the next slot */
663         hq->host_index = ((hq_put_index + 1) % hq->entry_count);
664         dq->host_index = ((dq_put_index + 1) % dq->entry_count);
665         hq->RQ_buf_posted++;
666
667         /* Ring The Header Receive Queue Doorbell */
668         if (!(hq->host_index % hq->entry_repost)) {
669                 doorbell.word0 = 0;
670                 if (hq->db_format == LPFC_DB_RING_FORMAT) {
671                         bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
672                                hq->entry_repost);
673                         bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
674                 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
675                         bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
676                                hq->entry_repost);
677                         bf_set(lpfc_rq_db_list_fm_index, &doorbell,
678                                hq->host_index);
679                         bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
680                 } else {
681                         return -EINVAL;
682                 }
683                 writel(doorbell.word0, hq->db_regaddr);
684         }
685         return hq_put_index;
686 }
687
688 /**
689  * lpfc_sli4_rq_release - Updates internal hba index for RQ
690  * @q: The Header Receive Queue to operate on.
691  *
692  * This routine will update the HBA index of a queue to reflect consumption of
693  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
694  * consumed an entry the host calls this function to update the queue's
695  * internal pointers. This routine returns the number of entries that were
696  * consumed by the HBA.
697  **/
698 static uint32_t
699 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
700 {
701         /* sanity check on queue memory */
702         if (unlikely(!hq) || unlikely(!dq))
703                 return 0;
704
705         if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
706                 return 0;
707         hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
708         dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
709         return 1;
710 }
711
712 /**
713  * lpfc_cmd_iocb - Get next command iocb entry in the ring
714  * @phba: Pointer to HBA context object.
715  * @pring: Pointer to driver SLI ring object.
716  *
717  * This function returns pointer to next command iocb entry
718  * in the command ring. The caller must hold hbalock to prevent
719  * other threads consume the next command iocb.
720  * SLI-2/SLI-3 provide different sized iocbs.
721  **/
722 static inline IOCB_t *
723 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
724 {
725         return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
726                            pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
727 }
728
729 /**
730  * lpfc_resp_iocb - Get next response iocb entry in the ring
731  * @phba: Pointer to HBA context object.
732  * @pring: Pointer to driver SLI ring object.
733  *
734  * This function returns pointer to next response iocb entry
735  * in the response ring. The caller must hold hbalock to make sure
736  * that no other thread consume the next response iocb.
737  * SLI-2/SLI-3 provide different sized iocbs.
738  **/
739 static inline IOCB_t *
740 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
741 {
742         return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
743                            pring->sli.sli3.rspidx * phba->iocb_rsp_size);
744 }
745
746 /**
747  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
748  * @phba: Pointer to HBA context object.
749  *
750  * This function is called with hbalock held. This function
751  * allocates a new driver iocb object from the iocb pool. If the
752  * allocation is successful, it returns pointer to the newly
753  * allocated iocb object else it returns NULL.
754  **/
755 struct lpfc_iocbq *
756 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
757 {
758         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
759         struct lpfc_iocbq * iocbq = NULL;
760
761         lockdep_assert_held(&phba->hbalock);
762
763         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
764         if (iocbq)
765                 phba->iocb_cnt++;
766         if (phba->iocb_cnt > phba->iocb_max)
767                 phba->iocb_max = phba->iocb_cnt;
768         return iocbq;
769 }
770
771 /**
772  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
773  * @phba: Pointer to HBA context object.
774  * @xritag: XRI value.
775  *
776  * This function clears the sglq pointer from the array of acive
777  * sglq's. The xritag that is passed in is used to index into the
778  * array. Before the xritag can be used it needs to be adjusted
779  * by subtracting the xribase.
780  *
781  * Returns sglq ponter = success, NULL = Failure.
782  **/
783 struct lpfc_sglq *
784 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
785 {
786         struct lpfc_sglq *sglq;
787
788         sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
789         phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
790         return sglq;
791 }
792
793 /**
794  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
795  * @phba: Pointer to HBA context object.
796  * @xritag: XRI value.
797  *
798  * This function returns the sglq pointer from the array of acive
799  * sglq's. The xritag that is passed in is used to index into the
800  * array. Before the xritag can be used it needs to be adjusted
801  * by subtracting the xribase.
802  *
803  * Returns sglq ponter = success, NULL = Failure.
804  **/
805 struct lpfc_sglq *
806 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
807 {
808         struct lpfc_sglq *sglq;
809
810         sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
811         return sglq;
812 }
813
814 /**
815  * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
816  * @phba: Pointer to HBA context object.
817  * @xritag: xri used in this exchange.
818  * @rrq: The RRQ to be cleared.
819  *
820  **/
821 void
822 lpfc_clr_rrq_active(struct lpfc_hba *phba,
823                     uint16_t xritag,
824                     struct lpfc_node_rrq *rrq)
825 {
826         struct lpfc_nodelist *ndlp = NULL;
827
828         if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
829                 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
830
831         /* The target DID could have been swapped (cable swap)
832          * we should use the ndlp from the findnode if it is
833          * available.
834          */
835         if ((!ndlp) && rrq->ndlp)
836                 ndlp = rrq->ndlp;
837
838         if (!ndlp)
839                 goto out;
840
841         if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
842                 rrq->send_rrq = 0;
843                 rrq->xritag = 0;
844                 rrq->rrq_stop_time = 0;
845         }
846 out:
847         mempool_free(rrq, phba->rrq_pool);
848 }
849
850 /**
851  * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
852  * @phba: Pointer to HBA context object.
853  *
854  * This function is called with hbalock held. This function
855  * Checks if stop_time (ratov from setting rrq active) has
856  * been reached, if it has and the send_rrq flag is set then
857  * it will call lpfc_send_rrq. If the send_rrq flag is not set
858  * then it will just call the routine to clear the rrq and
859  * free the rrq resource.
860  * The timer is set to the next rrq that is going to expire before
861  * leaving the routine.
862  *
863  **/
864 void
865 lpfc_handle_rrq_active(struct lpfc_hba *phba)
866 {
867         struct lpfc_node_rrq *rrq;
868         struct lpfc_node_rrq *nextrrq;
869         unsigned long next_time;
870         unsigned long iflags;
871         LIST_HEAD(send_rrq);
872
873         spin_lock_irqsave(&phba->hbalock, iflags);
874         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
875         next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
876         list_for_each_entry_safe(rrq, nextrrq,
877                                  &phba->active_rrq_list, list) {
878                 if (time_after(jiffies, rrq->rrq_stop_time))
879                         list_move(&rrq->list, &send_rrq);
880                 else if (time_before(rrq->rrq_stop_time, next_time))
881                         next_time = rrq->rrq_stop_time;
882         }
883         spin_unlock_irqrestore(&phba->hbalock, iflags);
884         if ((!list_empty(&phba->active_rrq_list)) &&
885             (!(phba->pport->load_flag & FC_UNLOADING)))
886                 mod_timer(&phba->rrq_tmr, next_time);
887         list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
888                 list_del(&rrq->list);
889                 if (!rrq->send_rrq)
890                         /* this call will free the rrq */
891                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
892                 else if (lpfc_send_rrq(phba, rrq)) {
893                         /* if we send the rrq then the completion handler
894                         *  will clear the bit in the xribitmap.
895                         */
896                         lpfc_clr_rrq_active(phba, rrq->xritag,
897                                             rrq);
898                 }
899         }
900 }
901
902 /**
903  * lpfc_get_active_rrq - Get the active RRQ for this exchange.
904  * @vport: Pointer to vport context object.
905  * @xri: The xri used in the exchange.
906  * @did: The targets DID for this exchange.
907  *
908  * returns NULL = rrq not found in the phba->active_rrq_list.
909  *         rrq = rrq for this xri and target.
910  **/
911 struct lpfc_node_rrq *
912 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
913 {
914         struct lpfc_hba *phba = vport->phba;
915         struct lpfc_node_rrq *rrq;
916         struct lpfc_node_rrq *nextrrq;
917         unsigned long iflags;
918
919         if (phba->sli_rev != LPFC_SLI_REV4)
920                 return NULL;
921         spin_lock_irqsave(&phba->hbalock, iflags);
922         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
923                 if (rrq->vport == vport && rrq->xritag == xri &&
924                                 rrq->nlp_DID == did){
925                         list_del(&rrq->list);
926                         spin_unlock_irqrestore(&phba->hbalock, iflags);
927                         return rrq;
928                 }
929         }
930         spin_unlock_irqrestore(&phba->hbalock, iflags);
931         return NULL;
932 }
933
934 /**
935  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
936  * @vport: Pointer to vport context object.
937  * @ndlp: Pointer to the lpfc_node_list structure.
938  * If ndlp is NULL Remove all active RRQs for this vport from the
939  * phba->active_rrq_list and clear the rrq.
940  * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
941  **/
942 void
943 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
944
945 {
946         struct lpfc_hba *phba = vport->phba;
947         struct lpfc_node_rrq *rrq;
948         struct lpfc_node_rrq *nextrrq;
949         unsigned long iflags;
950         LIST_HEAD(rrq_list);
951
952         if (phba->sli_rev != LPFC_SLI_REV4)
953                 return;
954         if (!ndlp) {
955                 lpfc_sli4_vport_delete_els_xri_aborted(vport);
956                 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
957         }
958         spin_lock_irqsave(&phba->hbalock, iflags);
959         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
960                 if ((rrq->vport == vport) && (!ndlp  || rrq->ndlp == ndlp))
961                         list_move(&rrq->list, &rrq_list);
962         spin_unlock_irqrestore(&phba->hbalock, iflags);
963
964         list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
965                 list_del(&rrq->list);
966                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
967         }
968 }
969
970 /**
971  * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
972  * @phba: Pointer to HBA context object.
973  * @ndlp: Targets nodelist pointer for this exchange.
974  * @xritag the xri in the bitmap to test.
975  *
976  * This function is called with hbalock held. This function
977  * returns 0 = rrq not active for this xri
978  *         1 = rrq is valid for this xri.
979  **/
980 int
981 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
982                         uint16_t  xritag)
983 {
984         lockdep_assert_held(&phba->hbalock);
985         if (!ndlp)
986                 return 0;
987         if (!ndlp->active_rrqs_xri_bitmap)
988                 return 0;
989         if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
990                         return 1;
991         else
992                 return 0;
993 }
994
995 /**
996  * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
997  * @phba: Pointer to HBA context object.
998  * @ndlp: nodelist pointer for this target.
999  * @xritag: xri used in this exchange.
1000  * @rxid: Remote Exchange ID.
1001  * @send_rrq: Flag used to determine if we should send rrq els cmd.
1002  *
1003  * This function takes the hbalock.
1004  * The active bit is always set in the active rrq xri_bitmap even
1005  * if there is no slot avaiable for the other rrq information.
1006  *
1007  * returns 0 rrq actived for this xri
1008  *         < 0 No memory or invalid ndlp.
1009  **/
1010 int
1011 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1012                     uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1013 {
1014         unsigned long iflags;
1015         struct lpfc_node_rrq *rrq;
1016         int empty;
1017
1018         if (!ndlp)
1019                 return -EINVAL;
1020
1021         if (!phba->cfg_enable_rrq)
1022                 return -EINVAL;
1023
1024         spin_lock_irqsave(&phba->hbalock, iflags);
1025         if (phba->pport->load_flag & FC_UNLOADING) {
1026                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1027                 goto out;
1028         }
1029
1030         /*
1031          * set the active bit even if there is no mem available.
1032          */
1033         if (NLP_CHK_FREE_REQ(ndlp))
1034                 goto out;
1035
1036         if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1037                 goto out;
1038
1039         if (!ndlp->active_rrqs_xri_bitmap)
1040                 goto out;
1041
1042         if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1043                 goto out;
1044
1045         spin_unlock_irqrestore(&phba->hbalock, iflags);
1046         rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1047         if (!rrq) {
1048                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1049                                 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1050                                 " DID:0x%x Send:%d\n",
1051                                 xritag, rxid, ndlp->nlp_DID, send_rrq);
1052                 return -EINVAL;
1053         }
1054         if (phba->cfg_enable_rrq == 1)
1055                 rrq->send_rrq = send_rrq;
1056         else
1057                 rrq->send_rrq = 0;
1058         rrq->xritag = xritag;
1059         rrq->rrq_stop_time = jiffies +
1060                                 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1061         rrq->ndlp = ndlp;
1062         rrq->nlp_DID = ndlp->nlp_DID;
1063         rrq->vport = ndlp->vport;
1064         rrq->rxid = rxid;
1065         spin_lock_irqsave(&phba->hbalock, iflags);
1066         empty = list_empty(&phba->active_rrq_list);
1067         list_add_tail(&rrq->list, &phba->active_rrq_list);
1068         phba->hba_flag |= HBA_RRQ_ACTIVE;
1069         if (empty)
1070                 lpfc_worker_wake_up(phba);
1071         spin_unlock_irqrestore(&phba->hbalock, iflags);
1072         return 0;
1073 out:
1074         spin_unlock_irqrestore(&phba->hbalock, iflags);
1075         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1076                         "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1077                         " DID:0x%x Send:%d\n",
1078                         xritag, rxid, ndlp->nlp_DID, send_rrq);
1079         return -EINVAL;
1080 }
1081
1082 /**
1083  * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1084  * @phba: Pointer to HBA context object.
1085  * @piocb: Pointer to the iocbq.
1086  *
1087  * This function is called with the ring lock held. This function
1088  * gets a new driver sglq object from the sglq list. If the
1089  * list is not empty then it is successful, it returns pointer to the newly
1090  * allocated sglq object else it returns NULL.
1091  **/
1092 static struct lpfc_sglq *
1093 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1094 {
1095         struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1096         struct lpfc_sglq *sglq = NULL;
1097         struct lpfc_sglq *start_sglq = NULL;
1098         struct lpfc_scsi_buf *lpfc_cmd;
1099         struct lpfc_nodelist *ndlp;
1100         int found = 0;
1101
1102         lockdep_assert_held(&phba->hbalock);
1103
1104         if (piocbq->iocb_flag &  LPFC_IO_FCP) {
1105                 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
1106                 ndlp = lpfc_cmd->rdata->pnode;
1107         } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1108                         !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1109                 ndlp = piocbq->context_un.ndlp;
1110         } else  if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1111                 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1112                         ndlp = NULL;
1113                 else
1114                         ndlp = piocbq->context_un.ndlp;
1115         } else {
1116                 ndlp = piocbq->context1;
1117         }
1118
1119         spin_lock(&phba->sli4_hba.sgl_list_lock);
1120         list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1121         start_sglq = sglq;
1122         while (!found) {
1123                 if (!sglq)
1124                         break;
1125                 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1126                     test_bit(sglq->sli4_lxritag,
1127                     ndlp->active_rrqs_xri_bitmap)) {
1128                         /* This xri has an rrq outstanding for this DID.
1129                          * put it back in the list and get another xri.
1130                          */
1131                         list_add_tail(&sglq->list, lpfc_els_sgl_list);
1132                         sglq = NULL;
1133                         list_remove_head(lpfc_els_sgl_list, sglq,
1134                                                 struct lpfc_sglq, list);
1135                         if (sglq == start_sglq) {
1136                                 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1137                                 sglq = NULL;
1138                                 break;
1139                         } else
1140                                 continue;
1141                 }
1142                 sglq->ndlp = ndlp;
1143                 found = 1;
1144                 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1145                 sglq->state = SGL_ALLOCATED;
1146         }
1147         spin_unlock(&phba->sli4_hba.sgl_list_lock);
1148         return sglq;
1149 }
1150
1151 /**
1152  * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1153  * @phba: Pointer to HBA context object.
1154  * @piocb: Pointer to the iocbq.
1155  *
1156  * This function is called with the sgl_list lock held. This function
1157  * gets a new driver sglq object from the sglq list. If the
1158  * list is not empty then it is successful, it returns pointer to the newly
1159  * allocated sglq object else it returns NULL.
1160  **/
1161 struct lpfc_sglq *
1162 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1163 {
1164         struct list_head *lpfc_nvmet_sgl_list;
1165         struct lpfc_sglq *sglq = NULL;
1166
1167         lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1168
1169         lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1170
1171         list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1172         if (!sglq)
1173                 return NULL;
1174         phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1175         sglq->state = SGL_ALLOCATED;
1176         return sglq;
1177 }
1178
1179 /**
1180  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1181  * @phba: Pointer to HBA context object.
1182  *
1183  * This function is called with no lock held. This function
1184  * allocates a new driver iocb object from the iocb pool. If the
1185  * allocation is successful, it returns pointer to the newly
1186  * allocated iocb object else it returns NULL.
1187  **/
1188 struct lpfc_iocbq *
1189 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1190 {
1191         struct lpfc_iocbq * iocbq = NULL;
1192         unsigned long iflags;
1193
1194         spin_lock_irqsave(&phba->hbalock, iflags);
1195         iocbq = __lpfc_sli_get_iocbq(phba);
1196         spin_unlock_irqrestore(&phba->hbalock, iflags);
1197         return iocbq;
1198 }
1199
1200 /**
1201  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1202  * @phba: Pointer to HBA context object.
1203  * @iocbq: Pointer to driver iocb object.
1204  *
1205  * This function is called with hbalock held to release driver
1206  * iocb object to the iocb pool. The iotag in the iocb object
1207  * does not change for each use of the iocb object. This function
1208  * clears all other fields of the iocb object when it is freed.
1209  * The sqlq structure that holds the xritag and phys and virtual
1210  * mappings for the scatter gather list is retrieved from the
1211  * active array of sglq. The get of the sglq pointer also clears
1212  * the entry in the array. If the status of the IO indiactes that
1213  * this IO was aborted then the sglq entry it put on the
1214  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1215  * IO has good status or fails for any other reason then the sglq
1216  * entry is added to the free list (lpfc_els_sgl_list).
1217  **/
1218 static void
1219 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1220 {
1221         struct lpfc_sglq *sglq;
1222         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1223         unsigned long iflag = 0;
1224         struct lpfc_sli_ring *pring;
1225
1226         lockdep_assert_held(&phba->hbalock);
1227
1228         if (iocbq->sli4_xritag == NO_XRI)
1229                 sglq = NULL;
1230         else
1231                 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1232
1233
1234         if (sglq)  {
1235                 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1236                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1237                                           iflag);
1238                         sglq->state = SGL_FREED;
1239                         sglq->ndlp = NULL;
1240                         list_add_tail(&sglq->list,
1241                                       &phba->sli4_hba.lpfc_nvmet_sgl_list);
1242                         spin_unlock_irqrestore(
1243                                 &phba->sli4_hba.sgl_list_lock, iflag);
1244                         goto out;
1245                 }
1246
1247                 pring = phba->sli4_hba.els_wq->pring;
1248                 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1249                         (sglq->state != SGL_XRI_ABORTED)) {
1250                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1251                                           iflag);
1252                         list_add(&sglq->list,
1253                                  &phba->sli4_hba.lpfc_abts_els_sgl_list);
1254                         spin_unlock_irqrestore(
1255                                 &phba->sli4_hba.sgl_list_lock, iflag);
1256                 } else {
1257                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1258                                           iflag);
1259                         sglq->state = SGL_FREED;
1260                         sglq->ndlp = NULL;
1261                         list_add_tail(&sglq->list,
1262                                       &phba->sli4_hba.lpfc_els_sgl_list);
1263                         spin_unlock_irqrestore(
1264                                 &phba->sli4_hba.sgl_list_lock, iflag);
1265
1266                         /* Check if TXQ queue needs to be serviced */
1267                         if (!list_empty(&pring->txq))
1268                                 lpfc_worker_wake_up(phba);
1269                 }
1270         }
1271
1272 out:
1273         /*
1274          * Clean all volatile data fields, preserve iotag and node struct.
1275          */
1276         memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1277         iocbq->sli4_lxritag = NO_XRI;
1278         iocbq->sli4_xritag = NO_XRI;
1279         iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1280                               LPFC_IO_NVME_LS);
1281         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1282 }
1283
1284
1285 /**
1286  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1287  * @phba: Pointer to HBA context object.
1288  * @iocbq: Pointer to driver iocb object.
1289  *
1290  * This function is called with hbalock held to release driver
1291  * iocb object to the iocb pool. The iotag in the iocb object
1292  * does not change for each use of the iocb object. This function
1293  * clears all other fields of the iocb object when it is freed.
1294  **/
1295 static void
1296 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1297 {
1298         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1299
1300         lockdep_assert_held(&phba->hbalock);
1301
1302         /*
1303          * Clean all volatile data fields, preserve iotag and node struct.
1304          */
1305         memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1306         iocbq->sli4_xritag = NO_XRI;
1307         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1308 }
1309
1310 /**
1311  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1312  * @phba: Pointer to HBA context object.
1313  * @iocbq: Pointer to driver iocb object.
1314  *
1315  * This function is called with hbalock held to release driver
1316  * iocb object to the iocb pool. The iotag in the iocb object
1317  * does not change for each use of the iocb object. This function
1318  * clears all other fields of the iocb object when it is freed.
1319  **/
1320 static void
1321 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1322 {
1323         lockdep_assert_held(&phba->hbalock);
1324
1325         phba->__lpfc_sli_release_iocbq(phba, iocbq);
1326         phba->iocb_cnt--;
1327 }
1328
1329 /**
1330  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1331  * @phba: Pointer to HBA context object.
1332  * @iocbq: Pointer to driver iocb object.
1333  *
1334  * This function is called with no lock held to release the iocb to
1335  * iocb pool.
1336  **/
1337 void
1338 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1339 {
1340         unsigned long iflags;
1341
1342         /*
1343          * Clean all volatile data fields, preserve iotag and node struct.
1344          */
1345         spin_lock_irqsave(&phba->hbalock, iflags);
1346         __lpfc_sli_release_iocbq(phba, iocbq);
1347         spin_unlock_irqrestore(&phba->hbalock, iflags);
1348 }
1349
1350 /**
1351  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1352  * @phba: Pointer to HBA context object.
1353  * @iocblist: List of IOCBs.
1354  * @ulpstatus: ULP status in IOCB command field.
1355  * @ulpWord4: ULP word-4 in IOCB command field.
1356  *
1357  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1358  * on the list by invoking the complete callback function associated with the
1359  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1360  * fields.
1361  **/
1362 void
1363 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1364                       uint32_t ulpstatus, uint32_t ulpWord4)
1365 {
1366         struct lpfc_iocbq *piocb;
1367
1368         while (!list_empty(iocblist)) {
1369                 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1370                 if (!piocb->iocb_cmpl)
1371                         lpfc_sli_release_iocbq(phba, piocb);
1372                 else {
1373                         piocb->iocb.ulpStatus = ulpstatus;
1374                         piocb->iocb.un.ulpWord[4] = ulpWord4;
1375                         (piocb->iocb_cmpl) (phba, piocb, piocb);
1376                 }
1377         }
1378         return;
1379 }
1380
1381 /**
1382  * lpfc_sli_iocb_cmd_type - Get the iocb type
1383  * @iocb_cmnd: iocb command code.
1384  *
1385  * This function is called by ring event handler function to get the iocb type.
1386  * This function translates the iocb command to an iocb command type used to
1387  * decide the final disposition of each completed IOCB.
1388  * The function returns
1389  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1390  * LPFC_SOL_IOCB     if it is a solicited iocb completion
1391  * LPFC_ABORT_IOCB   if it is an abort iocb
1392  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1393  *
1394  * The caller is not required to hold any lock.
1395  **/
1396 static lpfc_iocb_type
1397 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1398 {
1399         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1400
1401         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1402                 return 0;
1403
1404         switch (iocb_cmnd) {
1405         case CMD_XMIT_SEQUENCE_CR:
1406         case CMD_XMIT_SEQUENCE_CX:
1407         case CMD_XMIT_BCAST_CN:
1408         case CMD_XMIT_BCAST_CX:
1409         case CMD_ELS_REQUEST_CR:
1410         case CMD_ELS_REQUEST_CX:
1411         case CMD_CREATE_XRI_CR:
1412         case CMD_CREATE_XRI_CX:
1413         case CMD_GET_RPI_CN:
1414         case CMD_XMIT_ELS_RSP_CX:
1415         case CMD_GET_RPI_CR:
1416         case CMD_FCP_IWRITE_CR:
1417         case CMD_FCP_IWRITE_CX:
1418         case CMD_FCP_IREAD_CR:
1419         case CMD_FCP_IREAD_CX:
1420         case CMD_FCP_ICMND_CR:
1421         case CMD_FCP_ICMND_CX:
1422         case CMD_FCP_TSEND_CX:
1423         case CMD_FCP_TRSP_CX:
1424         case CMD_FCP_TRECEIVE_CX:
1425         case CMD_FCP_AUTO_TRSP_CX:
1426         case CMD_ADAPTER_MSG:
1427         case CMD_ADAPTER_DUMP:
1428         case CMD_XMIT_SEQUENCE64_CR:
1429         case CMD_XMIT_SEQUENCE64_CX:
1430         case CMD_XMIT_BCAST64_CN:
1431         case CMD_XMIT_BCAST64_CX:
1432         case CMD_ELS_REQUEST64_CR:
1433         case CMD_ELS_REQUEST64_CX:
1434         case CMD_FCP_IWRITE64_CR:
1435         case CMD_FCP_IWRITE64_CX:
1436         case CMD_FCP_IREAD64_CR:
1437         case CMD_FCP_IREAD64_CX:
1438         case CMD_FCP_ICMND64_CR:
1439         case CMD_FCP_ICMND64_CX:
1440         case CMD_FCP_TSEND64_CX:
1441         case CMD_FCP_TRSP64_CX:
1442         case CMD_FCP_TRECEIVE64_CX:
1443         case CMD_GEN_REQUEST64_CR:
1444         case CMD_GEN_REQUEST64_CX:
1445         case CMD_XMIT_ELS_RSP64_CX:
1446         case DSSCMD_IWRITE64_CR:
1447         case DSSCMD_IWRITE64_CX:
1448         case DSSCMD_IREAD64_CR:
1449         case DSSCMD_IREAD64_CX:
1450                 type = LPFC_SOL_IOCB;
1451                 break;
1452         case CMD_ABORT_XRI_CN:
1453         case CMD_ABORT_XRI_CX:
1454         case CMD_CLOSE_XRI_CN:
1455         case CMD_CLOSE_XRI_CX:
1456         case CMD_XRI_ABORTED_CX:
1457         case CMD_ABORT_MXRI64_CN:
1458         case CMD_XMIT_BLS_RSP64_CX:
1459                 type = LPFC_ABORT_IOCB;
1460                 break;
1461         case CMD_RCV_SEQUENCE_CX:
1462         case CMD_RCV_ELS_REQ_CX:
1463         case CMD_RCV_SEQUENCE64_CX:
1464         case CMD_RCV_ELS_REQ64_CX:
1465         case CMD_ASYNC_STATUS:
1466         case CMD_IOCB_RCV_SEQ64_CX:
1467         case CMD_IOCB_RCV_ELS64_CX:
1468         case CMD_IOCB_RCV_CONT64_CX:
1469         case CMD_IOCB_RET_XRI64_CX:
1470                 type = LPFC_UNSOL_IOCB;
1471                 break;
1472         case CMD_IOCB_XMIT_MSEQ64_CR:
1473         case CMD_IOCB_XMIT_MSEQ64_CX:
1474         case CMD_IOCB_RCV_SEQ_LIST64_CX:
1475         case CMD_IOCB_RCV_ELS_LIST64_CX:
1476         case CMD_IOCB_CLOSE_EXTENDED_CN:
1477         case CMD_IOCB_ABORT_EXTENDED_CN:
1478         case CMD_IOCB_RET_HBQE64_CN:
1479         case CMD_IOCB_FCP_IBIDIR64_CR:
1480         case CMD_IOCB_FCP_IBIDIR64_CX:
1481         case CMD_IOCB_FCP_ITASKMGT64_CX:
1482         case CMD_IOCB_LOGENTRY_CN:
1483         case CMD_IOCB_LOGENTRY_ASYNC_CN:
1484                 printk("%s - Unhandled SLI-3 Command x%x\n",
1485                                 __func__, iocb_cmnd);
1486                 type = LPFC_UNKNOWN_IOCB;
1487                 break;
1488         default:
1489                 type = LPFC_UNKNOWN_IOCB;
1490                 break;
1491         }
1492
1493         return type;
1494 }
1495
1496 /**
1497  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1498  * @phba: Pointer to HBA context object.
1499  *
1500  * This function is called from SLI initialization code
1501  * to configure every ring of the HBA's SLI interface. The
1502  * caller is not required to hold any lock. This function issues
1503  * a config_ring mailbox command for each ring.
1504  * This function returns zero if successful else returns a negative
1505  * error code.
1506  **/
1507 static int
1508 lpfc_sli_ring_map(struct lpfc_hba *phba)
1509 {
1510         struct lpfc_sli *psli = &phba->sli;
1511         LPFC_MBOXQ_t *pmb;
1512         MAILBOX_t *pmbox;
1513         int i, rc, ret = 0;
1514
1515         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1516         if (!pmb)
1517                 return -ENOMEM;
1518         pmbox = &pmb->u.mb;
1519         phba->link_state = LPFC_INIT_MBX_CMDS;
1520         for (i = 0; i < psli->num_rings; i++) {
1521                 lpfc_config_ring(phba, i, pmb);
1522                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1523                 if (rc != MBX_SUCCESS) {
1524                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1525                                         "0446 Adapter failed to init (%d), "
1526                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1527                                         "ring %d\n",
1528                                         rc, pmbox->mbxCommand,
1529                                         pmbox->mbxStatus, i);
1530                         phba->link_state = LPFC_HBA_ERROR;
1531                         ret = -ENXIO;
1532                         break;
1533                 }
1534         }
1535         mempool_free(pmb, phba->mbox_mem_pool);
1536         return ret;
1537 }
1538
1539 /**
1540  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1541  * @phba: Pointer to HBA context object.
1542  * @pring: Pointer to driver SLI ring object.
1543  * @piocb: Pointer to the driver iocb object.
1544  *
1545  * This function is called with hbalock held. The function adds the
1546  * new iocb to txcmplq of the given ring. This function always returns
1547  * 0. If this function is called for ELS ring, this function checks if
1548  * there is a vport associated with the ELS command. This function also
1549  * starts els_tmofunc timer if this is an ELS command.
1550  **/
1551 static int
1552 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1553                         struct lpfc_iocbq *piocb)
1554 {
1555         lockdep_assert_held(&phba->hbalock);
1556
1557         BUG_ON(!piocb);
1558
1559         list_add_tail(&piocb->list, &pring->txcmplq);
1560         piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1561
1562         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1563            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1564            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1565                 BUG_ON(!piocb->vport);
1566                 if (!(piocb->vport->load_flag & FC_UNLOADING))
1567                         mod_timer(&piocb->vport->els_tmofunc,
1568                                   jiffies +
1569                                   msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1570         }
1571
1572         return 0;
1573 }
1574
1575 /**
1576  * lpfc_sli_ringtx_get - Get first element of the txq
1577  * @phba: Pointer to HBA context object.
1578  * @pring: Pointer to driver SLI ring object.
1579  *
1580  * This function is called with hbalock held to get next
1581  * iocb in txq of the given ring. If there is any iocb in
1582  * the txq, the function returns first iocb in the list after
1583  * removing the iocb from the list, else it returns NULL.
1584  **/
1585 struct lpfc_iocbq *
1586 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1587 {
1588         struct lpfc_iocbq *cmd_iocb;
1589
1590         lockdep_assert_held(&phba->hbalock);
1591
1592         list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1593         return cmd_iocb;
1594 }
1595
1596 /**
1597  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1598  * @phba: Pointer to HBA context object.
1599  * @pring: Pointer to driver SLI ring object.
1600  *
1601  * This function is called with hbalock held and the caller must post the
1602  * iocb without releasing the lock. If the caller releases the lock,
1603  * iocb slot returned by the function is not guaranteed to be available.
1604  * The function returns pointer to the next available iocb slot if there
1605  * is available slot in the ring, else it returns NULL.
1606  * If the get index of the ring is ahead of the put index, the function
1607  * will post an error attention event to the worker thread to take the
1608  * HBA to offline state.
1609  **/
1610 static IOCB_t *
1611 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1612 {
1613         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1614         uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
1615
1616         lockdep_assert_held(&phba->hbalock);
1617
1618         if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1619            (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1620                 pring->sli.sli3.next_cmdidx = 0;
1621
1622         if (unlikely(pring->sli.sli3.local_getidx ==
1623                 pring->sli.sli3.next_cmdidx)) {
1624
1625                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1626
1627                 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1628                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1629                                         "0315 Ring %d issue: portCmdGet %d "
1630                                         "is bigger than cmd ring %d\n",
1631                                         pring->ringno,
1632                                         pring->sli.sli3.local_getidx,
1633                                         max_cmd_idx);
1634
1635                         phba->link_state = LPFC_HBA_ERROR;
1636                         /*
1637                          * All error attention handlers are posted to
1638                          * worker thread
1639                          */
1640                         phba->work_ha |= HA_ERATT;
1641                         phba->work_hs = HS_FFER3;
1642
1643                         lpfc_worker_wake_up(phba);
1644
1645                         return NULL;
1646                 }
1647
1648                 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1649                         return NULL;
1650         }
1651
1652         return lpfc_cmd_iocb(phba, pring);
1653 }
1654
1655 /**
1656  * lpfc_sli_next_iotag - Get an iotag for the iocb
1657  * @phba: Pointer to HBA context object.
1658  * @iocbq: Pointer to driver iocb object.
1659  *
1660  * This function gets an iotag for the iocb. If there is no unused iotag and
1661  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1662  * array and assigns a new iotag.
1663  * The function returns the allocated iotag if successful, else returns zero.
1664  * Zero is not a valid iotag.
1665  * The caller is not required to hold any lock.
1666  **/
1667 uint16_t
1668 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1669 {
1670         struct lpfc_iocbq **new_arr;
1671         struct lpfc_iocbq **old_arr;
1672         size_t new_len;
1673         struct lpfc_sli *psli = &phba->sli;
1674         uint16_t iotag;
1675
1676         spin_lock_irq(&phba->hbalock);
1677         iotag = psli->last_iotag;
1678         if(++iotag < psli->iocbq_lookup_len) {
1679                 psli->last_iotag = iotag;
1680                 psli->iocbq_lookup[iotag] = iocbq;
1681                 spin_unlock_irq(&phba->hbalock);
1682                 iocbq->iotag = iotag;
1683                 return iotag;
1684         } else if (psli->iocbq_lookup_len < (0xffff
1685                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1686                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1687                 spin_unlock_irq(&phba->hbalock);
1688                 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1689                                   GFP_KERNEL);
1690                 if (new_arr) {
1691                         spin_lock_irq(&phba->hbalock);
1692                         old_arr = psli->iocbq_lookup;
1693                         if (new_len <= psli->iocbq_lookup_len) {
1694                                 /* highly unprobable case */
1695                                 kfree(new_arr);
1696                                 iotag = psli->last_iotag;
1697                                 if(++iotag < psli->iocbq_lookup_len) {
1698                                         psli->last_iotag = iotag;
1699                                         psli->iocbq_lookup[iotag] = iocbq;
1700                                         spin_unlock_irq(&phba->hbalock);
1701                                         iocbq->iotag = iotag;
1702                                         return iotag;
1703                                 }
1704                                 spin_unlock_irq(&phba->hbalock);
1705                                 return 0;
1706                         }
1707                         if (psli->iocbq_lookup)
1708                                 memcpy(new_arr, old_arr,
1709                                        ((psli->last_iotag  + 1) *
1710                                         sizeof (struct lpfc_iocbq *)));
1711                         psli->iocbq_lookup = new_arr;
1712                         psli->iocbq_lookup_len = new_len;
1713                         psli->last_iotag = iotag;
1714                         psli->iocbq_lookup[iotag] = iocbq;
1715                         spin_unlock_irq(&phba->hbalock);
1716                         iocbq->iotag = iotag;
1717                         kfree(old_arr);
1718                         return iotag;
1719                 }
1720         } else
1721                 spin_unlock_irq(&phba->hbalock);
1722
1723         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1724                         "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1725                         psli->last_iotag);
1726
1727         return 0;
1728 }
1729
1730 /**
1731  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1732  * @phba: Pointer to HBA context object.
1733  * @pring: Pointer to driver SLI ring object.
1734  * @iocb: Pointer to iocb slot in the ring.
1735  * @nextiocb: Pointer to driver iocb object which need to be
1736  *            posted to firmware.
1737  *
1738  * This function is called with hbalock held to post a new iocb to
1739  * the firmware. This function copies the new iocb to ring iocb slot and
1740  * updates the ring pointers. It adds the new iocb to txcmplq if there is
1741  * a completion call back for this iocb else the function will free the
1742  * iocb object.
1743  **/
1744 static void
1745 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1746                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1747 {
1748         lockdep_assert_held(&phba->hbalock);
1749         /*
1750          * Set up an iotag
1751          */
1752         nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1753
1754
1755         if (pring->ringno == LPFC_ELS_RING) {
1756                 lpfc_debugfs_slow_ring_trc(phba,
1757                         "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1758                         *(((uint32_t *) &nextiocb->iocb) + 4),
1759                         *(((uint32_t *) &nextiocb->iocb) + 6),
1760                         *(((uint32_t *) &nextiocb->iocb) + 7));
1761         }
1762
1763         /*
1764          * Issue iocb command to adapter
1765          */
1766         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1767         wmb();
1768         pring->stats.iocb_cmd++;
1769
1770         /*
1771          * If there is no completion routine to call, we can release the
1772          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1773          * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1774          */
1775         if (nextiocb->iocb_cmpl)
1776                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1777         else
1778                 __lpfc_sli_release_iocbq(phba, nextiocb);
1779
1780         /*
1781          * Let the HBA know what IOCB slot will be the next one the
1782          * driver will put a command into.
1783          */
1784         pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1785         writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1786 }
1787
1788 /**
1789  * lpfc_sli_update_full_ring - Update the chip attention register
1790  * @phba: Pointer to HBA context object.
1791  * @pring: Pointer to driver SLI ring object.
1792  *
1793  * The caller is not required to hold any lock for calling this function.
1794  * This function updates the chip attention bits for the ring to inform firmware
1795  * that there are pending work to be done for this ring and requests an
1796  * interrupt when there is space available in the ring. This function is
1797  * called when the driver is unable to post more iocbs to the ring due
1798  * to unavailability of space in the ring.
1799  **/
1800 static void
1801 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1802 {
1803         int ringno = pring->ringno;
1804
1805         pring->flag |= LPFC_CALL_RING_AVAILABLE;
1806
1807         wmb();
1808
1809         /*
1810          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1811          * The HBA will tell us when an IOCB entry is available.
1812          */
1813         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1814         readl(phba->CAregaddr); /* flush */
1815
1816         pring->stats.iocb_cmd_full++;
1817 }
1818
1819 /**
1820  * lpfc_sli_update_ring - Update chip attention register
1821  * @phba: Pointer to HBA context object.
1822  * @pring: Pointer to driver SLI ring object.
1823  *
1824  * This function updates the chip attention register bit for the
1825  * given ring to inform HBA that there is more work to be done
1826  * in this ring. The caller is not required to hold any lock.
1827  **/
1828 static void
1829 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1830 {
1831         int ringno = pring->ringno;
1832
1833         /*
1834          * Tell the HBA that there is work to do in this ring.
1835          */
1836         if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1837                 wmb();
1838                 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1839                 readl(phba->CAregaddr); /* flush */
1840         }
1841 }
1842
1843 /**
1844  * lpfc_sli_resume_iocb - Process iocbs in the txq
1845  * @phba: Pointer to HBA context object.
1846  * @pring: Pointer to driver SLI ring object.
1847  *
1848  * This function is called with hbalock held to post pending iocbs
1849  * in the txq to the firmware. This function is called when driver
1850  * detects space available in the ring.
1851  **/
1852 static void
1853 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1854 {
1855         IOCB_t *iocb;
1856         struct lpfc_iocbq *nextiocb;
1857
1858         lockdep_assert_held(&phba->hbalock);
1859
1860         /*
1861          * Check to see if:
1862          *  (a) there is anything on the txq to send
1863          *  (b) link is up
1864          *  (c) link attention events can be processed (fcp ring only)
1865          *  (d) IOCB processing is not blocked by the outstanding mbox command.
1866          */
1867
1868         if (lpfc_is_link_up(phba) &&
1869             (!list_empty(&pring->txq)) &&
1870             (pring->ringno != LPFC_FCP_RING ||
1871              phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1872
1873                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1874                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1875                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1876
1877                 if (iocb)
1878                         lpfc_sli_update_ring(phba, pring);
1879                 else
1880                         lpfc_sli_update_full_ring(phba, pring);
1881         }
1882
1883         return;
1884 }
1885
1886 /**
1887  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1888  * @phba: Pointer to HBA context object.
1889  * @hbqno: HBQ number.
1890  *
1891  * This function is called with hbalock held to get the next
1892  * available slot for the given HBQ. If there is free slot
1893  * available for the HBQ it will return pointer to the next available
1894  * HBQ entry else it will return NULL.
1895  **/
1896 static struct lpfc_hbq_entry *
1897 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1898 {
1899         struct hbq_s *hbqp = &phba->hbqs[hbqno];
1900
1901         lockdep_assert_held(&phba->hbalock);
1902
1903         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1904             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1905                 hbqp->next_hbqPutIdx = 0;
1906
1907         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1908                 uint32_t raw_index = phba->hbq_get[hbqno];
1909                 uint32_t getidx = le32_to_cpu(raw_index);
1910
1911                 hbqp->local_hbqGetIdx = getidx;
1912
1913                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1914                         lpfc_printf_log(phba, KERN_ERR,
1915                                         LOG_SLI | LOG_VPORT,
1916                                         "1802 HBQ %d: local_hbqGetIdx "
1917                                         "%u is > than hbqp->entry_count %u\n",
1918                                         hbqno, hbqp->local_hbqGetIdx,
1919                                         hbqp->entry_count);
1920
1921                         phba->link_state = LPFC_HBA_ERROR;
1922                         return NULL;
1923                 }
1924
1925                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1926                         return NULL;
1927         }
1928
1929         return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1930                         hbqp->hbqPutIdx;
1931 }
1932
1933 /**
1934  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1935  * @phba: Pointer to HBA context object.
1936  *
1937  * This function is called with no lock held to free all the
1938  * hbq buffers while uninitializing the SLI interface. It also
1939  * frees the HBQ buffers returned by the firmware but not yet
1940  * processed by the upper layers.
1941  **/
1942 void
1943 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1944 {
1945         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1946         struct hbq_dmabuf *hbq_buf;
1947         unsigned long flags;
1948         int i, hbq_count;
1949
1950         hbq_count = lpfc_sli_hbq_count();
1951         /* Return all memory used by all HBQs */
1952         spin_lock_irqsave(&phba->hbalock, flags);
1953         for (i = 0; i < hbq_count; ++i) {
1954                 list_for_each_entry_safe(dmabuf, next_dmabuf,
1955                                 &phba->hbqs[i].hbq_buffer_list, list) {
1956                         hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1957                         list_del(&hbq_buf->dbuf.list);
1958                         (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1959                 }
1960                 phba->hbqs[i].buffer_count = 0;
1961         }
1962
1963         /* Mark the HBQs not in use */
1964         phba->hbq_in_use = 0;
1965         spin_unlock_irqrestore(&phba->hbalock, flags);
1966 }
1967
1968 /**
1969  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1970  * @phba: Pointer to HBA context object.
1971  * @hbqno: HBQ number.
1972  * @hbq_buf: Pointer to HBQ buffer.
1973  *
1974  * This function is called with the hbalock held to post a
1975  * hbq buffer to the firmware. If the function finds an empty
1976  * slot in the HBQ, it will post the buffer. The function will return
1977  * pointer to the hbq entry if it successfully post the buffer
1978  * else it will return NULL.
1979  **/
1980 static int
1981 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1982                          struct hbq_dmabuf *hbq_buf)
1983 {
1984         lockdep_assert_held(&phba->hbalock);
1985         return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1986 }
1987
1988 /**
1989  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1990  * @phba: Pointer to HBA context object.
1991  * @hbqno: HBQ number.
1992  * @hbq_buf: Pointer to HBQ buffer.
1993  *
1994  * This function is called with the hbalock held to post a hbq buffer to the
1995  * firmware. If the function finds an empty slot in the HBQ, it will post the
1996  * buffer and place it on the hbq_buffer_list. The function will return zero if
1997  * it successfully post the buffer else it will return an error.
1998  **/
1999 static int
2000 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2001                             struct hbq_dmabuf *hbq_buf)
2002 {
2003         struct lpfc_hbq_entry *hbqe;
2004         dma_addr_t physaddr = hbq_buf->dbuf.phys;
2005
2006         lockdep_assert_held(&phba->hbalock);
2007         /* Get next HBQ entry slot to use */
2008         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2009         if (hbqe) {
2010                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2011
2012                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2013                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
2014                 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2015                 hbqe->bde.tus.f.bdeFlags = 0;
2016                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2017                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2018                                 /* Sync SLIM */
2019                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2020                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2021                                 /* flush */
2022                 readl(phba->hbq_put + hbqno);
2023                 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2024                 return 0;
2025         } else
2026                 return -ENOMEM;
2027 }
2028
2029 /**
2030  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2031  * @phba: Pointer to HBA context object.
2032  * @hbqno: HBQ number.
2033  * @hbq_buf: Pointer to HBQ buffer.
2034  *
2035  * This function is called with the hbalock held to post an RQE to the SLI4
2036  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2037  * the hbq_buffer_list and return zero, otherwise it will return an error.
2038  **/
2039 static int
2040 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2041                             struct hbq_dmabuf *hbq_buf)
2042 {
2043         int rc;
2044         struct lpfc_rqe hrqe;
2045         struct lpfc_rqe drqe;
2046         struct lpfc_queue *hrq;
2047         struct lpfc_queue *drq;
2048
2049         if (hbqno != LPFC_ELS_HBQ)
2050                 return 1;
2051         hrq = phba->sli4_hba.hdr_rq;
2052         drq = phba->sli4_hba.dat_rq;
2053
2054         lockdep_assert_held(&phba->hbalock);
2055         hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2056         hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2057         drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2058         drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2059         rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2060         if (rc < 0)
2061                 return rc;
2062         hbq_buf->tag = (rc | (hbqno << 16));
2063         list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2064         return 0;
2065 }
2066
2067 /* HBQ for ELS and CT traffic. */
2068 static struct lpfc_hbq_init lpfc_els_hbq = {
2069         .rn = 1,
2070         .entry_count = 256,
2071         .mask_count = 0,
2072         .profile = 0,
2073         .ring_mask = (1 << LPFC_ELS_RING),
2074         .buffer_count = 0,
2075         .init_count = 40,
2076         .add_count = 40,
2077 };
2078
2079 /* Array of HBQs */
2080 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2081         &lpfc_els_hbq,
2082 };
2083
2084 /**
2085  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2086  * @phba: Pointer to HBA context object.
2087  * @hbqno: HBQ number.
2088  * @count: Number of HBQ buffers to be posted.
2089  *
2090  * This function is called with no lock held to post more hbq buffers to the
2091  * given HBQ. The function returns the number of HBQ buffers successfully
2092  * posted.
2093  **/
2094 static int
2095 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2096 {
2097         uint32_t i, posted = 0;
2098         unsigned long flags;
2099         struct hbq_dmabuf *hbq_buffer;
2100         LIST_HEAD(hbq_buf_list);
2101         if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2102                 return 0;
2103
2104         if ((phba->hbqs[hbqno].buffer_count + count) >
2105             lpfc_hbq_defs[hbqno]->entry_count)
2106                 count = lpfc_hbq_defs[hbqno]->entry_count -
2107                                         phba->hbqs[hbqno].buffer_count;
2108         if (!count)
2109                 return 0;
2110         /* Allocate HBQ entries */
2111         for (i = 0; i < count; i++) {
2112                 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2113                 if (!hbq_buffer)
2114                         break;
2115                 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2116         }
2117         /* Check whether HBQ is still in use */
2118         spin_lock_irqsave(&phba->hbalock, flags);
2119         if (!phba->hbq_in_use)
2120                 goto err;
2121         while (!list_empty(&hbq_buf_list)) {
2122                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2123                                  dbuf.list);
2124                 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2125                                       (hbqno << 16));
2126                 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2127                         phba->hbqs[hbqno].buffer_count++;
2128                         posted++;
2129                 } else
2130                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2131         }
2132         spin_unlock_irqrestore(&phba->hbalock, flags);
2133         return posted;
2134 err:
2135         spin_unlock_irqrestore(&phba->hbalock, flags);
2136         while (!list_empty(&hbq_buf_list)) {
2137                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2138                                  dbuf.list);
2139                 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2140         }
2141         return 0;
2142 }
2143
2144 /**
2145  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2146  * @phba: Pointer to HBA context object.
2147  * @qno: HBQ number.
2148  *
2149  * This function posts more buffers to the HBQ. This function
2150  * is called with no lock held. The function returns the number of HBQ entries
2151  * successfully allocated.
2152  **/
2153 int
2154 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2155 {
2156         if (phba->sli_rev == LPFC_SLI_REV4)
2157                 return 0;
2158         else
2159                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2160                                          lpfc_hbq_defs[qno]->add_count);
2161 }
2162
2163 /**
2164  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2165  * @phba: Pointer to HBA context object.
2166  * @qno:  HBQ queue number.
2167  *
2168  * This function is called from SLI initialization code path with
2169  * no lock held to post initial HBQ buffers to firmware. The
2170  * function returns the number of HBQ entries successfully allocated.
2171  **/
2172 static int
2173 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2174 {
2175         if (phba->sli_rev == LPFC_SLI_REV4)
2176                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2177                                         lpfc_hbq_defs[qno]->entry_count);
2178         else
2179                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2180                                          lpfc_hbq_defs[qno]->init_count);
2181 }
2182
2183 /**
2184  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2185  * @phba: Pointer to HBA context object.
2186  * @hbqno: HBQ number.
2187  *
2188  * This function removes the first hbq buffer on an hbq list and returns a
2189  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2190  **/
2191 static struct hbq_dmabuf *
2192 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2193 {
2194         struct lpfc_dmabuf *d_buf;
2195
2196         list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2197         if (!d_buf)
2198                 return NULL;
2199         return container_of(d_buf, struct hbq_dmabuf, dbuf);
2200 }
2201
2202 /**
2203  * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2204  * @phba: Pointer to HBA context object.
2205  * @hbqno: HBQ number.
2206  *
2207  * This function removes the first RQ buffer on an RQ buffer list and returns a
2208  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2209  **/
2210 static struct rqb_dmabuf *
2211 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2212 {
2213         struct lpfc_dmabuf *h_buf;
2214         struct lpfc_rqb *rqbp;
2215
2216         rqbp = hrq->rqbp;
2217         list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2218                          struct lpfc_dmabuf, list);
2219         if (!h_buf)
2220                 return NULL;
2221         rqbp->buffer_count--;
2222         return container_of(h_buf, struct rqb_dmabuf, hbuf);
2223 }
2224
2225 /**
2226  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2227  * @phba: Pointer to HBA context object.
2228  * @tag: Tag of the hbq buffer.
2229  *
2230  * This function searches for the hbq buffer associated with the given tag in
2231  * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2232  * otherwise it returns NULL.
2233  **/
2234 static struct hbq_dmabuf *
2235 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2236 {
2237         struct lpfc_dmabuf *d_buf;
2238         struct hbq_dmabuf *hbq_buf;
2239         uint32_t hbqno;
2240
2241         hbqno = tag >> 16;
2242         if (hbqno >= LPFC_MAX_HBQS)
2243                 return NULL;
2244
2245         spin_lock_irq(&phba->hbalock);
2246         list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2247                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2248                 if (hbq_buf->tag == tag) {
2249                         spin_unlock_irq(&phba->hbalock);
2250                         return hbq_buf;
2251                 }
2252         }
2253         spin_unlock_irq(&phba->hbalock);
2254         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2255                         "1803 Bad hbq tag. Data: x%x x%x\n",
2256                         tag, phba->hbqs[tag >> 16].buffer_count);
2257         return NULL;
2258 }
2259
2260 /**
2261  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2262  * @phba: Pointer to HBA context object.
2263  * @hbq_buffer: Pointer to HBQ buffer.
2264  *
2265  * This function is called with hbalock. This function gives back
2266  * the hbq buffer to firmware. If the HBQ does not have space to
2267  * post the buffer, it will free the buffer.
2268  **/
2269 void
2270 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2271 {
2272         uint32_t hbqno;
2273
2274         if (hbq_buffer) {
2275                 hbqno = hbq_buffer->tag >> 16;
2276                 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2277                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2278         }
2279 }
2280
2281 /**
2282  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2283  * @mbxCommand: mailbox command code.
2284  *
2285  * This function is called by the mailbox event handler function to verify
2286  * that the completed mailbox command is a legitimate mailbox command. If the
2287  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2288  * and the mailbox event handler will take the HBA offline.
2289  **/
2290 static int
2291 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2292 {
2293         uint8_t ret;
2294
2295         switch (mbxCommand) {
2296         case MBX_LOAD_SM:
2297         case MBX_READ_NV:
2298         case MBX_WRITE_NV:
2299         case MBX_WRITE_VPARMS:
2300         case MBX_RUN_BIU_DIAG:
2301         case MBX_INIT_LINK:
2302         case MBX_DOWN_LINK:
2303         case MBX_CONFIG_LINK:
2304         case MBX_CONFIG_RING:
2305         case MBX_RESET_RING:
2306         case MBX_READ_CONFIG:
2307         case MBX_READ_RCONFIG:
2308         case MBX_READ_SPARM:
2309         case MBX_READ_STATUS:
2310         case MBX_READ_RPI:
2311         case MBX_READ_XRI:
2312         case MBX_READ_REV:
2313         case MBX_READ_LNK_STAT:
2314         case MBX_REG_LOGIN:
2315         case MBX_UNREG_LOGIN:
2316         case MBX_CLEAR_LA:
2317         case MBX_DUMP_MEMORY:
2318         case MBX_DUMP_CONTEXT:
2319         case MBX_RUN_DIAGS:
2320         case MBX_RESTART:
2321         case MBX_UPDATE_CFG:
2322         case MBX_DOWN_LOAD:
2323         case MBX_DEL_LD_ENTRY:
2324         case MBX_RUN_PROGRAM:
2325         case MBX_SET_MASK:
2326         case MBX_SET_VARIABLE:
2327         case MBX_UNREG_D_ID:
2328         case MBX_KILL_BOARD:
2329         case MBX_CONFIG_FARP:
2330         case MBX_BEACON:
2331         case MBX_LOAD_AREA:
2332         case MBX_RUN_BIU_DIAG64:
2333         case MBX_CONFIG_PORT:
2334         case MBX_READ_SPARM64:
2335         case MBX_READ_RPI64:
2336         case MBX_REG_LOGIN64:
2337         case MBX_READ_TOPOLOGY:
2338         case MBX_WRITE_WWN:
2339         case MBX_SET_DEBUG:
2340         case MBX_LOAD_EXP_ROM:
2341         case MBX_ASYNCEVT_ENABLE:
2342         case MBX_REG_VPI:
2343         case MBX_UNREG_VPI:
2344         case MBX_HEARTBEAT:
2345         case MBX_PORT_CAPABILITIES:
2346         case MBX_PORT_IOV_CONTROL:
2347         case MBX_SLI4_CONFIG:
2348         case MBX_SLI4_REQ_FTRS:
2349         case MBX_REG_FCFI:
2350         case MBX_UNREG_FCFI:
2351         case MBX_REG_VFI:
2352         case MBX_UNREG_VFI:
2353         case MBX_INIT_VPI:
2354         case MBX_INIT_VFI:
2355         case MBX_RESUME_RPI:
2356         case MBX_READ_EVENT_LOG_STATUS:
2357         case MBX_READ_EVENT_LOG:
2358         case MBX_SECURITY_MGMT:
2359         case MBX_AUTH_PORT:
2360         case MBX_ACCESS_VDATA:
2361                 ret = mbxCommand;
2362                 break;
2363         default:
2364                 ret = MBX_SHUTDOWN;
2365                 break;
2366         }
2367         return ret;
2368 }
2369
2370 /**
2371  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2372  * @phba: Pointer to HBA context object.
2373  * @pmboxq: Pointer to mailbox command.
2374  *
2375  * This is completion handler function for mailbox commands issued from
2376  * lpfc_sli_issue_mbox_wait function. This function is called by the
2377  * mailbox event handler function with no lock held. This function
2378  * will wake up thread waiting on the wait queue pointed by context1
2379  * of the mailbox.
2380  **/
2381 void
2382 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2383 {
2384         wait_queue_head_t *pdone_q;
2385         unsigned long drvr_flag;
2386
2387         /*
2388          * If pdone_q is empty, the driver thread gave up waiting and
2389          * continued running.
2390          */
2391         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2392         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2393         pdone_q = (wait_queue_head_t *) pmboxq->context1;
2394         if (pdone_q)
2395                 wake_up_interruptible(pdone_q);
2396         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2397         return;
2398 }
2399
2400
2401 /**
2402  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2403  * @phba: Pointer to HBA context object.
2404  * @pmb: Pointer to mailbox object.
2405  *
2406  * This function is the default mailbox completion handler. It
2407  * frees the memory resources associated with the completed mailbox
2408  * command. If the completed command is a REG_LOGIN mailbox command,
2409  * this function will issue a UREG_LOGIN to re-claim the RPI.
2410  **/
2411 void
2412 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2413 {
2414         struct lpfc_vport  *vport = pmb->vport;
2415         struct lpfc_dmabuf *mp;
2416         struct lpfc_nodelist *ndlp;
2417         struct Scsi_Host *shost;
2418         uint16_t rpi, vpi;
2419         int rc;
2420
2421         mp = (struct lpfc_dmabuf *) (pmb->context1);
2422
2423         if (mp) {
2424                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2425                 kfree(mp);
2426         }
2427
2428         /*
2429          * If a REG_LOGIN succeeded  after node is destroyed or node
2430          * is in re-discovery driver need to cleanup the RPI.
2431          */
2432         if (!(phba->pport->load_flag & FC_UNLOADING) &&
2433             pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2434             !pmb->u.mb.mbxStatus) {
2435                 rpi = pmb->u.mb.un.varWords[0];
2436                 vpi = pmb->u.mb.un.varRegLogin.vpi;
2437                 lpfc_unreg_login(phba, vpi, rpi, pmb);
2438                 pmb->vport = vport;
2439                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2440                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2441                 if (rc != MBX_NOT_FINISHED)
2442                         return;
2443         }
2444
2445         if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2446                 !(phba->pport->load_flag & FC_UNLOADING) &&
2447                 !pmb->u.mb.mbxStatus) {
2448                 shost = lpfc_shost_from_vport(vport);
2449                 spin_lock_irq(shost->host_lock);
2450                 vport->vpi_state |= LPFC_VPI_REGISTERED;
2451                 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2452                 spin_unlock_irq(shost->host_lock);
2453         }
2454
2455         if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2456                 ndlp = (struct lpfc_nodelist *)pmb->context2;
2457                 lpfc_nlp_put(ndlp);
2458                 pmb->context2 = NULL;
2459         }
2460
2461         /* Check security permission status on INIT_LINK mailbox command */
2462         if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2463             (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2464                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2465                                 "2860 SLI authentication is required "
2466                                 "for INIT_LINK but has not done yet\n");
2467
2468         if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2469                 lpfc_sli4_mbox_cmd_free(phba, pmb);
2470         else
2471                 mempool_free(pmb, phba->mbox_mem_pool);
2472 }
2473  /**
2474  * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2475  * @phba: Pointer to HBA context object.
2476  * @pmb: Pointer to mailbox object.
2477  *
2478  * This function is the unreg rpi mailbox completion handler. It
2479  * frees the memory resources associated with the completed mailbox
2480  * command. An additional refrenece is put on the ndlp to prevent
2481  * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2482  * the unreg mailbox command completes, this routine puts the
2483  * reference back.
2484  *
2485  **/
2486 void
2487 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2488 {
2489         struct lpfc_vport  *vport = pmb->vport;
2490         struct lpfc_nodelist *ndlp;
2491
2492         ndlp = pmb->context1;
2493         if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2494                 if (phba->sli_rev == LPFC_SLI_REV4 &&
2495                     (bf_get(lpfc_sli_intf_if_type,
2496                      &phba->sli4_hba.sli_intf) >=
2497                      LPFC_SLI_INTF_IF_TYPE_2)) {
2498                         if (ndlp) {
2499                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2500                                                  "0010 UNREG_LOGIN vpi:%x "
2501                                                  "rpi:%x DID:%x map:%x %p\n",
2502                                                  vport->vpi, ndlp->nlp_rpi,
2503                                                  ndlp->nlp_DID,
2504                                                  ndlp->nlp_usg_map, ndlp);
2505                                 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2506                                 lpfc_nlp_put(ndlp);
2507                         }
2508                 }
2509         }
2510
2511         mempool_free(pmb, phba->mbox_mem_pool);
2512 }
2513
2514 /**
2515  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2516  * @phba: Pointer to HBA context object.
2517  *
2518  * This function is called with no lock held. This function processes all
2519  * the completed mailbox commands and gives it to upper layers. The interrupt
2520  * service routine processes mailbox completion interrupt and adds completed
2521  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2522  * Worker thread call lpfc_sli_handle_mb_event, which will return the
2523  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2524  * function returns the mailbox commands to the upper layer by calling the
2525  * completion handler function of each mailbox.
2526  **/
2527 int
2528 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2529 {
2530         MAILBOX_t *pmbox;
2531         LPFC_MBOXQ_t *pmb;
2532         int rc;
2533         LIST_HEAD(cmplq);
2534
2535         phba->sli.slistat.mbox_event++;
2536
2537         /* Get all completed mailboxe buffers into the cmplq */
2538         spin_lock_irq(&phba->hbalock);
2539         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2540         spin_unlock_irq(&phba->hbalock);
2541
2542         /* Get a Mailbox buffer to setup mailbox commands for callback */
2543         do {
2544                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2545                 if (pmb == NULL)
2546                         break;
2547
2548                 pmbox = &pmb->u.mb;
2549
2550                 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2551                         if (pmb->vport) {
2552                                 lpfc_debugfs_disc_trc(pmb->vport,
2553                                         LPFC_DISC_TRC_MBOX_VPORT,
2554                                         "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2555                                         (uint32_t)pmbox->mbxCommand,
2556                                         pmbox->un.varWords[0],
2557                                         pmbox->un.varWords[1]);
2558                         }
2559                         else {
2560                                 lpfc_debugfs_disc_trc(phba->pport,
2561                                         LPFC_DISC_TRC_MBOX,
2562                                         "MBOX cmpl:       cmd:x%x mb:x%x x%x",
2563                                         (uint32_t)pmbox->mbxCommand,
2564                                         pmbox->un.varWords[0],
2565                                         pmbox->un.varWords[1]);
2566                         }
2567                 }
2568
2569                 /*
2570                  * It is a fatal error if unknown mbox command completion.
2571                  */
2572                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2573                     MBX_SHUTDOWN) {
2574                         /* Unknown mailbox command compl */
2575                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2576                                         "(%d):0323 Unknown Mailbox command "
2577                                         "x%x (x%x/x%x) Cmpl\n",
2578                                         pmb->vport ? pmb->vport->vpi : 0,
2579                                         pmbox->mbxCommand,
2580                                         lpfc_sli_config_mbox_subsys_get(phba,
2581                                                                         pmb),
2582                                         lpfc_sli_config_mbox_opcode_get(phba,
2583                                                                         pmb));
2584                         phba->link_state = LPFC_HBA_ERROR;
2585                         phba->work_hs = HS_FFER3;
2586                         lpfc_handle_eratt(phba);
2587                         continue;
2588                 }
2589
2590                 if (pmbox->mbxStatus) {
2591                         phba->sli.slistat.mbox_stat_err++;
2592                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2593                                 /* Mbox cmd cmpl error - RETRYing */
2594                                 lpfc_printf_log(phba, KERN_INFO,
2595                                         LOG_MBOX | LOG_SLI,
2596                                         "(%d):0305 Mbox cmd cmpl "
2597                                         "error - RETRYing Data: x%x "
2598                                         "(x%x/x%x) x%x x%x x%x\n",
2599                                         pmb->vport ? pmb->vport->vpi : 0,
2600                                         pmbox->mbxCommand,
2601                                         lpfc_sli_config_mbox_subsys_get(phba,
2602                                                                         pmb),
2603                                         lpfc_sli_config_mbox_opcode_get(phba,
2604                                                                         pmb),
2605                                         pmbox->mbxStatus,
2606                                         pmbox->un.varWords[0],
2607                                         pmb->vport->port_state);
2608                                 pmbox->mbxStatus = 0;
2609                                 pmbox->mbxOwner = OWN_HOST;
2610                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2611                                 if (rc != MBX_NOT_FINISHED)
2612                                         continue;
2613                         }
2614                 }
2615
2616                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2617                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2618                                 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2619                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2620                                 "x%x x%x x%x\n",
2621                                 pmb->vport ? pmb->vport->vpi : 0,
2622                                 pmbox->mbxCommand,
2623                                 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2624                                 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2625                                 pmb->mbox_cmpl,
2626                                 *((uint32_t *) pmbox),
2627                                 pmbox->un.varWords[0],
2628                                 pmbox->un.varWords[1],
2629                                 pmbox->un.varWords[2],
2630                                 pmbox->un.varWords[3],
2631                                 pmbox->un.varWords[4],
2632                                 pmbox->un.varWords[5],
2633                                 pmbox->un.varWords[6],
2634                                 pmbox->un.varWords[7],
2635                                 pmbox->un.varWords[8],
2636                                 pmbox->un.varWords[9],
2637                                 pmbox->un.varWords[10]);
2638
2639                 if (pmb->mbox_cmpl)
2640                         pmb->mbox_cmpl(phba,pmb);
2641         } while (1);
2642         return 0;
2643 }
2644
2645 /**
2646  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2647  * @phba: Pointer to HBA context object.
2648  * @pring: Pointer to driver SLI ring object.
2649  * @tag: buffer tag.
2650  *
2651  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2652  * is set in the tag the buffer is posted for a particular exchange,
2653  * the function will return the buffer without replacing the buffer.
2654  * If the buffer is for unsolicited ELS or CT traffic, this function
2655  * returns the buffer and also posts another buffer to the firmware.
2656  **/
2657 static struct lpfc_dmabuf *
2658 lpfc_sli_get_buff(struct lpfc_hba *phba,
2659                   struct lpfc_sli_ring *pring,
2660                   uint32_t tag)
2661 {
2662         struct hbq_dmabuf *hbq_entry;
2663
2664         if (tag & QUE_BUFTAG_BIT)
2665                 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2666         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2667         if (!hbq_entry)
2668                 return NULL;
2669         return &hbq_entry->dbuf;
2670 }
2671
2672 /**
2673  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2674  * @phba: Pointer to HBA context object.
2675  * @pring: Pointer to driver SLI ring object.
2676  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2677  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2678  * @fch_type: the type for the first frame of the sequence.
2679  *
2680  * This function is called with no lock held. This function uses the r_ctl and
2681  * type of the received sequence to find the correct callback function to call
2682  * to process the sequence.
2683  **/
2684 static int
2685 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2686                          struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2687                          uint32_t fch_type)
2688 {
2689         int i;
2690
2691         switch (fch_type) {
2692         case FC_TYPE_NVME:
2693                 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
2694                 return 1;
2695         default:
2696                 break;
2697         }
2698
2699         /* unSolicited Responses */
2700         if (pring->prt[0].profile) {
2701                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2702                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2703                                                                         saveq);
2704                 return 1;
2705         }
2706         /* We must search, based on rctl / type
2707            for the right routine */
2708         for (i = 0; i < pring->num_mask; i++) {
2709                 if ((pring->prt[i].rctl == fch_r_ctl) &&
2710                     (pring->prt[i].type == fch_type)) {
2711                         if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2712                                 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2713                                                 (phba, pring, saveq);
2714                         return 1;
2715                 }
2716         }
2717         return 0;
2718 }
2719
2720 /**
2721  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2722  * @phba: Pointer to HBA context object.
2723  * @pring: Pointer to driver SLI ring object.
2724  * @saveq: Pointer to the unsolicited iocb.
2725  *
2726  * This function is called with no lock held by the ring event handler
2727  * when there is an unsolicited iocb posted to the response ring by the
2728  * firmware. This function gets the buffer associated with the iocbs
2729  * and calls the event handler for the ring. This function handles both
2730  * qring buffers and hbq buffers.
2731  * When the function returns 1 the caller can free the iocb object otherwise
2732  * upper layer functions will free the iocb objects.
2733  **/
2734 static int
2735 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2736                             struct lpfc_iocbq *saveq)
2737 {
2738         IOCB_t           * irsp;
2739         WORD5            * w5p;
2740         uint32_t           Rctl, Type;
2741         struct lpfc_iocbq *iocbq;
2742         struct lpfc_dmabuf *dmzbuf;
2743
2744         irsp = &(saveq->iocb);
2745
2746         if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2747                 if (pring->lpfc_sli_rcv_async_status)
2748                         pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2749                 else
2750                         lpfc_printf_log(phba,
2751                                         KERN_WARNING,
2752                                         LOG_SLI,
2753                                         "0316 Ring %d handler: unexpected "
2754                                         "ASYNC_STATUS iocb received evt_code "
2755                                         "0x%x\n",
2756                                         pring->ringno,
2757                                         irsp->un.asyncstat.evt_code);
2758                 return 1;
2759         }
2760
2761         if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2762                 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2763                 if (irsp->ulpBdeCount > 0) {
2764                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2765                                         irsp->un.ulpWord[3]);
2766                         lpfc_in_buf_free(phba, dmzbuf);
2767                 }
2768
2769                 if (irsp->ulpBdeCount > 1) {
2770                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2771                                         irsp->unsli3.sli3Words[3]);
2772                         lpfc_in_buf_free(phba, dmzbuf);
2773                 }
2774
2775                 if (irsp->ulpBdeCount > 2) {
2776                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2777                                 irsp->unsli3.sli3Words[7]);
2778                         lpfc_in_buf_free(phba, dmzbuf);
2779                 }
2780
2781                 return 1;
2782         }
2783
2784         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2785                 if (irsp->ulpBdeCount != 0) {
2786                         saveq->context2 = lpfc_sli_get_buff(phba, pring,
2787                                                 irsp->un.ulpWord[3]);
2788                         if (!saveq->context2)
2789                                 lpfc_printf_log(phba,
2790                                         KERN_ERR,
2791                                         LOG_SLI,
2792                                         "0341 Ring %d Cannot find buffer for "
2793                                         "an unsolicited iocb. tag 0x%x\n",
2794                                         pring->ringno,
2795                                         irsp->un.ulpWord[3]);
2796                 }
2797                 if (irsp->ulpBdeCount == 2) {
2798                         saveq->context3 = lpfc_sli_get_buff(phba, pring,
2799                                                 irsp->unsli3.sli3Words[7]);
2800                         if (!saveq->context3)
2801                                 lpfc_printf_log(phba,
2802                                         KERN_ERR,
2803                                         LOG_SLI,
2804                                         "0342 Ring %d Cannot find buffer for an"
2805                                         " unsolicited iocb. tag 0x%x\n",
2806                                         pring->ringno,
2807                                         irsp->unsli3.sli3Words[7]);
2808                 }
2809                 list_for_each_entry(iocbq, &saveq->list, list) {
2810                         irsp = &(iocbq->iocb);
2811                         if (irsp->ulpBdeCount != 0) {
2812                                 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2813                                                         irsp->un.ulpWord[3]);
2814                                 if (!iocbq->context2)
2815                                         lpfc_printf_log(phba,
2816                                                 KERN_ERR,
2817                                                 LOG_SLI,
2818                                                 "0343 Ring %d Cannot find "
2819                                                 "buffer for an unsolicited iocb"
2820                                                 ". tag 0x%x\n", pring->ringno,
2821                                                 irsp->un.ulpWord[3]);
2822                         }
2823                         if (irsp->ulpBdeCount == 2) {
2824                                 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2825                                                 irsp->unsli3.sli3Words[7]);
2826                                 if (!iocbq->context3)
2827                                         lpfc_printf_log(phba,
2828                                                 KERN_ERR,
2829                                                 LOG_SLI,
2830                                                 "0344 Ring %d Cannot find "
2831                                                 "buffer for an unsolicited "
2832                                                 "iocb. tag 0x%x\n",
2833                                                 pring->ringno,
2834                                                 irsp->unsli3.sli3Words[7]);
2835                         }
2836                 }
2837         }
2838         if (irsp->ulpBdeCount != 0 &&
2839             (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2840              irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2841                 int found = 0;
2842
2843                 /* search continue save q for same XRI */
2844                 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2845                         if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2846                                 saveq->iocb.unsli3.rcvsli3.ox_id) {
2847                                 list_add_tail(&saveq->list, &iocbq->list);
2848                                 found = 1;
2849                                 break;
2850                         }
2851                 }
2852                 if (!found)
2853                         list_add_tail(&saveq->clist,
2854                                       &pring->iocb_continue_saveq);
2855                 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2856                         list_del_init(&iocbq->clist);
2857                         saveq = iocbq;
2858                         irsp = &(saveq->iocb);
2859                 } else
2860                         return 0;
2861         }
2862         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2863             (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2864             (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2865                 Rctl = FC_RCTL_ELS_REQ;
2866                 Type = FC_TYPE_ELS;
2867         } else {
2868                 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2869                 Rctl = w5p->hcsw.Rctl;
2870                 Type = w5p->hcsw.Type;
2871
2872                 /* Firmware Workaround */
2873                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2874                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2875                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2876                         Rctl = FC_RCTL_ELS_REQ;
2877                         Type = FC_TYPE_ELS;
2878                         w5p->hcsw.Rctl = Rctl;
2879                         w5p->hcsw.Type = Type;
2880                 }
2881         }
2882
2883         if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2884                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2885                                 "0313 Ring %d handler: unexpected Rctl x%x "
2886                                 "Type x%x received\n",
2887                                 pring->ringno, Rctl, Type);
2888
2889         return 1;
2890 }
2891
2892 /**
2893  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2894  * @phba: Pointer to HBA context object.
2895  * @pring: Pointer to driver SLI ring object.
2896  * @prspiocb: Pointer to response iocb object.
2897  *
2898  * This function looks up the iocb_lookup table to get the command iocb
2899  * corresponding to the given response iocb using the iotag of the
2900  * response iocb. This function is called with the hbalock held
2901  * for sli3 devices or the ring_lock for sli4 devices.
2902  * This function returns the command iocb object if it finds the command
2903  * iocb else returns NULL.
2904  **/
2905 static struct lpfc_iocbq *
2906 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2907                       struct lpfc_sli_ring *pring,
2908                       struct lpfc_iocbq *prspiocb)
2909 {
2910         struct lpfc_iocbq *cmd_iocb = NULL;
2911         uint16_t iotag;
2912         lockdep_assert_held(&phba->hbalock);
2913
2914         iotag = prspiocb->iocb.ulpIoTag;
2915
2916         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2917                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2918                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2919                         /* remove from txcmpl queue list */
2920                         list_del_init(&cmd_iocb->list);
2921                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2922                         return cmd_iocb;
2923                 }
2924         }
2925
2926         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2927                         "0317 iotag x%x is out of "
2928                         "range: max iotag x%x wd0 x%x\n",
2929                         iotag, phba->sli.last_iotag,
2930                         *(((uint32_t *) &prspiocb->iocb) + 7));
2931         return NULL;
2932 }
2933
2934 /**
2935  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2936  * @phba: Pointer to HBA context object.
2937  * @pring: Pointer to driver SLI ring object.
2938  * @iotag: IOCB tag.
2939  *
2940  * This function looks up the iocb_lookup table to get the command iocb
2941  * corresponding to the given iotag. This function is called with the
2942  * hbalock held.
2943  * This function returns the command iocb object if it finds the command
2944  * iocb else returns NULL.
2945  **/
2946 static struct lpfc_iocbq *
2947 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2948                              struct lpfc_sli_ring *pring, uint16_t iotag)
2949 {
2950         struct lpfc_iocbq *cmd_iocb = NULL;
2951
2952         lockdep_assert_held(&phba->hbalock);
2953         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2954                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2955                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2956                         /* remove from txcmpl queue list */
2957                         list_del_init(&cmd_iocb->list);
2958                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2959                         return cmd_iocb;
2960                 }
2961         }
2962
2963         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2964                         "0372 iotag x%x lookup error: max iotag (x%x) "
2965                         "iocb_flag x%x\n",
2966                         iotag, phba->sli.last_iotag,
2967                         cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
2968         return NULL;
2969 }
2970
2971 /**
2972  * lpfc_sli_process_sol_iocb - process solicited iocb completion
2973  * @phba: Pointer to HBA context object.
2974  * @pring: Pointer to driver SLI ring object.
2975  * @saveq: Pointer to the response iocb to be processed.
2976  *
2977  * This function is called by the ring event handler for non-fcp
2978  * rings when there is a new response iocb in the response ring.
2979  * The caller is not required to hold any locks. This function
2980  * gets the command iocb associated with the response iocb and
2981  * calls the completion handler for the command iocb. If there
2982  * is no completion handler, the function will free the resources
2983  * associated with command iocb. If the response iocb is for
2984  * an already aborted command iocb, the status of the completion
2985  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2986  * This function always returns 1.
2987  **/
2988 static int
2989 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2990                           struct lpfc_iocbq *saveq)
2991 {
2992         struct lpfc_iocbq *cmdiocbp;
2993         int rc = 1;
2994         unsigned long iflag;
2995
2996         /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2997         if (phba->sli_rev == LPFC_SLI_REV4)
2998                 spin_lock_irqsave(&pring->ring_lock, iflag);
2999         else
3000                 spin_lock_irqsave(&phba->hbalock, iflag);
3001         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3002         if (phba->sli_rev == LPFC_SLI_REV4)
3003                 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3004         else
3005                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3006
3007         if (cmdiocbp) {
3008                 if (cmdiocbp->iocb_cmpl) {
3009                         /*
3010                          * If an ELS command failed send an event to mgmt
3011                          * application.
3012                          */
3013                         if (saveq->iocb.ulpStatus &&
3014                              (pring->ringno == LPFC_ELS_RING) &&
3015                              (cmdiocbp->iocb.ulpCommand ==
3016                                 CMD_ELS_REQUEST64_CR))
3017                                 lpfc_send_els_failure_event(phba,
3018                                         cmdiocbp, saveq);
3019
3020                         /*
3021                          * Post all ELS completions to the worker thread.
3022                          * All other are passed to the completion callback.
3023                          */
3024                         if (pring->ringno == LPFC_ELS_RING) {
3025                                 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3026                                     (cmdiocbp->iocb_flag &
3027                                                         LPFC_DRIVER_ABORTED)) {
3028                                         spin_lock_irqsave(&phba->hbalock,
3029                                                           iflag);
3030                                         cmdiocbp->iocb_flag &=
3031                                                 ~LPFC_DRIVER_ABORTED;
3032                                         spin_unlock_irqrestore(&phba->hbalock,
3033                                                                iflag);
3034                                         saveq->iocb.ulpStatus =
3035                                                 IOSTAT_LOCAL_REJECT;
3036                                         saveq->iocb.un.ulpWord[4] =
3037                                                 IOERR_SLI_ABORTED;
3038
3039                                         /* Firmware could still be in progress
3040                                          * of DMAing payload, so don't free data
3041                                          * buffer till after a hbeat.
3042                                          */
3043                                         spin_lock_irqsave(&phba->hbalock,
3044                                                           iflag);
3045                                         saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3046                                         spin_unlock_irqrestore(&phba->hbalock,
3047                                                                iflag);
3048                                 }
3049                                 if (phba->sli_rev == LPFC_SLI_REV4) {
3050                                         if (saveq->iocb_flag &
3051                                             LPFC_EXCHANGE_BUSY) {
3052                                                 /* Set cmdiocb flag for the
3053                                                  * exchange busy so sgl (xri)
3054                                                  * will not be released until
3055                                                  * the abort xri is received
3056                                                  * from hba.
3057                                                  */
3058                                                 spin_lock_irqsave(
3059                                                         &phba->hbalock, iflag);
3060                                                 cmdiocbp->iocb_flag |=
3061                                                         LPFC_EXCHANGE_BUSY;
3062                                                 spin_unlock_irqrestore(
3063                                                         &phba->hbalock, iflag);
3064                                         }
3065                                         if (cmdiocbp->iocb_flag &
3066                                             LPFC_DRIVER_ABORTED) {
3067                                                 /*
3068                                                  * Clear LPFC_DRIVER_ABORTED
3069                                                  * bit in case it was driver
3070                                                  * initiated abort.
3071                                                  */
3072                                                 spin_lock_irqsave(
3073                                                         &phba->hbalock, iflag);
3074                                                 cmdiocbp->iocb_flag &=
3075                                                         ~LPFC_DRIVER_ABORTED;
3076                                                 spin_unlock_irqrestore(
3077                                                         &phba->hbalock, iflag);
3078                                                 cmdiocbp->iocb.ulpStatus =
3079                                                         IOSTAT_LOCAL_REJECT;
3080                                                 cmdiocbp->iocb.un.ulpWord[4] =
3081                                                         IOERR_ABORT_REQUESTED;
3082                                                 /*
3083                                                  * For SLI4, irsiocb contains
3084                                                  * NO_XRI in sli_xritag, it
3085                                                  * shall not affect releasing
3086                                                  * sgl (xri) process.
3087                                                  */
3088                                                 saveq->iocb.ulpStatus =
3089                                                         IOSTAT_LOCAL_REJECT;
3090                                                 saveq->iocb.un.ulpWord[4] =
3091                                                         IOERR_SLI_ABORTED;
3092                                                 spin_lock_irqsave(
3093                                                         &phba->hbalock, iflag);
3094                                                 saveq->iocb_flag |=
3095                                                         LPFC_DELAY_MEM_FREE;
3096                                                 spin_unlock_irqrestore(
3097                                                         &phba->hbalock, iflag);
3098                                         }
3099                                 }
3100                         }
3101                         (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3102                 } else
3103                         lpfc_sli_release_iocbq(phba, cmdiocbp);
3104         } else {
3105                 /*
3106                  * Unknown initiating command based on the response iotag.
3107                  * This could be the case on the ELS ring because of
3108                  * lpfc_els_abort().
3109                  */
3110                 if (pring->ringno != LPFC_ELS_RING) {
3111                         /*
3112                          * Ring <ringno> handler: unexpected completion IoTag
3113                          * <IoTag>
3114                          */
3115                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3116                                          "0322 Ring %d handler: "
3117                                          "unexpected completion IoTag x%x "
3118                                          "Data: x%x x%x x%x x%x\n",
3119                                          pring->ringno,
3120                                          saveq->iocb.ulpIoTag,
3121                                          saveq->iocb.ulpStatus,
3122                                          saveq->iocb.un.ulpWord[4],
3123                                          saveq->iocb.ulpCommand,
3124                                          saveq->iocb.ulpContext);
3125                 }
3126         }
3127
3128         return rc;
3129 }
3130
3131 /**
3132  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3133  * @phba: Pointer to HBA context object.
3134  * @pring: Pointer to driver SLI ring object.
3135  *
3136  * This function is called from the iocb ring event handlers when
3137  * put pointer is ahead of the get pointer for a ring. This function signal
3138  * an error attention condition to the worker thread and the worker
3139  * thread will transition the HBA to offline state.
3140  **/
3141 static void
3142 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3143 {
3144         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3145         /*
3146          * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3147          * rsp ring <portRspMax>
3148          */
3149         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3150                         "0312 Ring %d handler: portRspPut %d "
3151                         "is bigger than rsp ring %d\n",
3152                         pring->ringno, le32_to_cpu(pgp->rspPutInx),
3153                         pring->sli.sli3.numRiocb);
3154
3155         phba->link_state = LPFC_HBA_ERROR;
3156
3157         /*
3158          * All error attention handlers are posted to
3159          * worker thread
3160          */
3161         phba->work_ha |= HA_ERATT;
3162         phba->work_hs = HS_FFER3;
3163
3164         lpfc_worker_wake_up(phba);
3165
3166         return;
3167 }
3168
3169 /**
3170  * lpfc_poll_eratt - Error attention polling timer timeout handler
3171  * @ptr: Pointer to address of HBA context object.
3172  *
3173  * This function is invoked by the Error Attention polling timer when the
3174  * timer times out. It will check the SLI Error Attention register for
3175  * possible attention events. If so, it will post an Error Attention event
3176  * and wake up worker thread to process it. Otherwise, it will set up the
3177  * Error Attention polling timer for the next poll.
3178  **/
3179 void lpfc_poll_eratt(struct timer_list *t)
3180 {
3181         struct lpfc_hba *phba;
3182         uint32_t eratt = 0;
3183         uint64_t sli_intr, cnt;
3184
3185         phba = from_timer(phba, t, eratt_poll);
3186
3187         /* Here we will also keep track of interrupts per sec of the hba */
3188         sli_intr = phba->sli.slistat.sli_intr;
3189
3190         if (phba->sli.slistat.sli_prev_intr > sli_intr)
3191                 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3192                         sli_intr);
3193         else
3194                 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3195
3196         /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3197         do_div(cnt, phba->eratt_poll_interval);
3198         phba->sli.slistat.sli_ips = cnt;
3199
3200         phba->sli.slistat.sli_prev_intr = sli_intr;
3201
3202         /* Check chip HA register for error event */
3203         eratt = lpfc_sli_check_eratt(phba);
3204
3205         if (eratt)
3206                 /* Tell the worker thread there is work to do */
3207                 lpfc_worker_wake_up(phba);
3208         else
3209                 /* Restart the timer for next eratt poll */
3210                 mod_timer(&phba->eratt_poll,
3211                           jiffies +
3212                           msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3213         return;
3214 }
3215
3216
3217 /**
3218  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3219  * @phba: Pointer to HBA context object.
3220  * @pring: Pointer to driver SLI ring object.
3221  * @mask: Host attention register mask for this ring.
3222  *
3223  * This function is called from the interrupt context when there is a ring
3224  * event for the fcp ring. The caller does not hold any lock.
3225  * The function processes each response iocb in the response ring until it
3226  * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3227  * LE bit set. The function will call the completion handler of the command iocb
3228  * if the response iocb indicates a completion for a command iocb or it is
3229  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3230  * function if this is an unsolicited iocb.
3231  * This routine presumes LPFC_FCP_RING handling and doesn't bother
3232  * to check it explicitly.
3233  */
3234 int
3235 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3236                                 struct lpfc_sli_ring *pring, uint32_t mask)
3237 {
3238         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3239         IOCB_t *irsp = NULL;
3240         IOCB_t *entry = NULL;
3241         struct lpfc_iocbq *cmdiocbq = NULL;
3242         struct lpfc_iocbq rspiocbq;
3243         uint32_t status;
3244         uint32_t portRspPut, portRspMax;
3245         int rc = 1;
3246         lpfc_iocb_type type;
3247         unsigned long iflag;
3248         uint32_t rsp_cmpl = 0;
3249
3250         spin_lock_irqsave(&phba->hbalock, iflag);
3251         pring->stats.iocb_event++;
3252
3253         /*
3254          * The next available response entry should never exceed the maximum
3255          * entries.  If it does, treat it as an adapter hardware error.
3256          */
3257         portRspMax = pring->sli.sli3.numRiocb;
3258         portRspPut = le32_to_cpu(pgp->rspPutInx);
3259         if (unlikely(portRspPut >= portRspMax)) {
3260                 lpfc_sli_rsp_pointers_error(phba, pring);
3261                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3262                 return 1;
3263         }
3264         if (phba->fcp_ring_in_use) {
3265                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3266                 return 1;
3267         } else
3268                 phba->fcp_ring_in_use = 1;
3269
3270         rmb();
3271         while (pring->sli.sli3.rspidx != portRspPut) {
3272                 /*
3273                  * Fetch an entry off the ring and copy it into a local data
3274                  * structure.  The copy involves a byte-swap since the
3275                  * network byte order and pci byte orders are different.
3276                  */
3277                 entry = lpfc_resp_iocb(phba, pring);
3278                 phba->last_completion_time = jiffies;
3279
3280                 if (++pring->sli.sli3.rspidx >= portRspMax)
3281                         pring->sli.sli3.rspidx = 0;
3282
3283                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3284                                       (uint32_t *) &rspiocbq.iocb,
3285                                       phba->iocb_rsp_size);
3286                 INIT_LIST_HEAD(&(rspiocbq.list));
3287                 irsp = &rspiocbq.iocb;
3288
3289                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3290                 pring->stats.iocb_rsp++;
3291                 rsp_cmpl++;
3292
3293                 if (unlikely(irsp->ulpStatus)) {
3294                         /*
3295                          * If resource errors reported from HBA, reduce
3296                          * queuedepths of the SCSI device.
3297                          */
3298                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3299                             ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3300                              IOERR_NO_RESOURCES)) {
3301                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3302                                 phba->lpfc_rampdown_queue_depth(phba);
3303                                 spin_lock_irqsave(&phba->hbalock, iflag);
3304                         }
3305
3306                         /* Rsp ring <ringno> error: IOCB */
3307                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3308                                         "0336 Rsp Ring %d error: IOCB Data: "
3309                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3310                                         pring->ringno,
3311                                         irsp->un.ulpWord[0],
3312                                         irsp->un.ulpWord[1],
3313                                         irsp->un.ulpWord[2],
3314                                         irsp->un.ulpWord[3],
3315                                         irsp->un.ulpWord[4],
3316                                         irsp->un.ulpWord[5],
3317                                         *(uint32_t *)&irsp->un1,
3318                                         *((uint32_t *)&irsp->un1 + 1));
3319                 }
3320
3321                 switch (type) {
3322                 case LPFC_ABORT_IOCB:
3323                 case LPFC_SOL_IOCB:
3324                         /*
3325                          * Idle exchange closed via ABTS from port.  No iocb
3326                          * resources need to be recovered.
3327                          */
3328                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3329                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3330                                                 "0333 IOCB cmd 0x%x"
3331                                                 " processed. Skipping"
3332                                                 " completion\n",
3333                                                 irsp->ulpCommand);
3334                                 break;
3335                         }
3336
3337                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3338                                                          &rspiocbq);
3339                         if (unlikely(!cmdiocbq))
3340                                 break;
3341                         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3342                                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3343                         if (cmdiocbq->iocb_cmpl) {
3344                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3345                                 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3346                                                       &rspiocbq);
3347                                 spin_lock_irqsave(&phba->hbalock, iflag);
3348                         }
3349                         break;
3350                 case LPFC_UNSOL_IOCB:
3351                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3352                         lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3353                         spin_lock_irqsave(&phba->hbalock, iflag);
3354                         break;
3355                 default:
3356                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3357                                 char adaptermsg[LPFC_MAX_ADPTMSG];
3358                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3359                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3360                                        MAX_MSG_DATA);
3361                                 dev_warn(&((phba->pcidev)->dev),
3362                                          "lpfc%d: %s\n",
3363                                          phba->brd_no, adaptermsg);
3364                         } else {
3365                                 /* Unknown IOCB command */
3366                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3367                                                 "0334 Unknown IOCB command "
3368                                                 "Data: x%x, x%x x%x x%x x%x\n",
3369                                                 type, irsp->ulpCommand,
3370                                                 irsp->ulpStatus,
3371                                                 irsp->ulpIoTag,
3372                                                 irsp->ulpContext);
3373                         }
3374                         break;
3375                 }
3376
3377                 /*
3378                  * The response IOCB has been processed.  Update the ring
3379                  * pointer in SLIM.  If the port response put pointer has not
3380                  * been updated, sync the pgp->rspPutInx and fetch the new port
3381                  * response put pointer.
3382                  */
3383                 writel(pring->sli.sli3.rspidx,
3384                         &phba->host_gp[pring->ringno].rspGetInx);
3385
3386                 if (pring->sli.sli3.rspidx == portRspPut)
3387                         portRspPut = le32_to_cpu(pgp->rspPutInx);
3388         }
3389
3390         if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3391                 pring->stats.iocb_rsp_full++;
3392                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3393                 writel(status, phba->CAregaddr);
3394                 readl(phba->CAregaddr);
3395         }
3396         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3397                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3398                 pring->stats.iocb_cmd_empty++;
3399
3400                 /* Force update of the local copy of cmdGetInx */
3401                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3402                 lpfc_sli_resume_iocb(phba, pring);
3403
3404                 if ((pring->lpfc_sli_cmd_available))
3405                         (pring->lpfc_sli_cmd_available) (phba, pring);
3406
3407         }
3408
3409         phba->fcp_ring_in_use = 0;
3410         spin_unlock_irqrestore(&phba->hbalock, iflag);
3411         return rc;
3412 }
3413
3414 /**
3415  * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3416  * @phba: Pointer to HBA context object.
3417  * @pring: Pointer to driver SLI ring object.
3418  * @rspiocbp: Pointer to driver response IOCB object.
3419  *
3420  * This function is called from the worker thread when there is a slow-path
3421  * response IOCB to process. This function chains all the response iocbs until
3422  * seeing the iocb with the LE bit set. The function will call
3423  * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3424  * completion of a command iocb. The function will call the
3425  * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3426  * The function frees the resources or calls the completion handler if this
3427  * iocb is an abort completion. The function returns NULL when the response
3428  * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3429  * this function shall chain the iocb on to the iocb_continueq and return the
3430  * response iocb passed in.
3431  **/
3432 static struct lpfc_iocbq *
3433 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3434                         struct lpfc_iocbq *rspiocbp)
3435 {
3436         struct lpfc_iocbq *saveq;
3437         struct lpfc_iocbq *cmdiocbp;
3438         struct lpfc_iocbq *next_iocb;
3439         IOCB_t *irsp = NULL;
3440         uint32_t free_saveq;
3441         uint8_t iocb_cmd_type;
3442         lpfc_iocb_type type;
3443         unsigned long iflag;
3444         int rc;
3445
3446         spin_lock_irqsave(&phba->hbalock, iflag);
3447         /* First add the response iocb to the countinueq list */
3448         list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3449         pring->iocb_continueq_cnt++;
3450
3451         /* Now, determine whether the list is completed for processing */
3452         irsp = &rspiocbp->iocb;
3453         if (irsp->ulpLe) {
3454                 /*
3455                  * By default, the driver expects to free all resources
3456                  * associated with this iocb completion.
3457                  */
3458                 free_saveq = 1;
3459                 saveq = list_get_first(&pring->iocb_continueq,
3460                                        struct lpfc_iocbq, list);
3461                 irsp = &(saveq->iocb);
3462                 list_del_init(&pring->iocb_continueq);
3463                 pring->iocb_continueq_cnt = 0;
3464
3465                 pring->stats.iocb_rsp++;
3466
3467                 /*
3468                  * If resource errors reported from HBA, reduce
3469                  * queuedepths of the SCSI device.
3470                  */
3471                 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3472                     ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3473                      IOERR_NO_RESOURCES)) {
3474                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3475                         phba->lpfc_rampdown_queue_depth(phba);
3476                         spin_lock_irqsave(&phba->hbalock, iflag);
3477                 }
3478
3479                 if (irsp->ulpStatus) {
3480                         /* Rsp ring <ringno> error: IOCB */
3481                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3482                                         "0328 Rsp Ring %d error: "
3483                                         "IOCB Data: "
3484                                         "x%x x%x x%x x%x "
3485                                         "x%x x%x x%x x%x "
3486                                         "x%x x%x x%x x%x "
3487                                         "x%x x%x x%x x%x\n",
3488                                         pring->ringno,
3489                                         irsp->un.ulpWord[0],
3490                                         irsp->un.ulpWord[1],
3491                                         irsp->un.ulpWord[2],
3492                                         irsp->un.ulpWord[3],
3493                                         irsp->un.ulpWord[4],
3494                                         irsp->un.ulpWord[5],
3495                                         *(((uint32_t *) irsp) + 6),
3496                                         *(((uint32_t *) irsp) + 7),
3497                                         *(((uint32_t *) irsp) + 8),
3498                                         *(((uint32_t *) irsp) + 9),
3499                                         *(((uint32_t *) irsp) + 10),
3500                                         *(((uint32_t *) irsp) + 11),
3501                                         *(((uint32_t *) irsp) + 12),
3502                                         *(((uint32_t *) irsp) + 13),
3503                                         *(((uint32_t *) irsp) + 14),
3504                                         *(((uint32_t *) irsp) + 15));
3505                 }
3506
3507                 /*
3508                  * Fetch the IOCB command type and call the correct completion
3509                  * routine. Solicited and Unsolicited IOCBs on the ELS ring
3510                  * get freed back to the lpfc_iocb_list by the discovery
3511                  * kernel thread.
3512                  */
3513                 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3514                 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3515                 switch (type) {
3516                 case LPFC_SOL_IOCB:
3517                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3518                         rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3519                         spin_lock_irqsave(&phba->hbalock, iflag);
3520                         break;
3521
3522                 case LPFC_UNSOL_IOCB:
3523                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3524                         rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3525                         spin_lock_irqsave(&phba->hbalock, iflag);
3526                         if (!rc)
3527                                 free_saveq = 0;
3528                         break;
3529
3530                 case LPFC_ABORT_IOCB:
3531                         cmdiocbp = NULL;
3532                         if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3533                                 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3534                                                                  saveq);
3535                         if (cmdiocbp) {
3536                                 /* Call the specified completion routine */
3537                                 if (cmdiocbp->iocb_cmpl) {
3538                                         spin_unlock_irqrestore(&phba->hbalock,
3539                                                                iflag);
3540                                         (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3541                                                               saveq);
3542                                         spin_lock_irqsave(&phba->hbalock,
3543                                                           iflag);
3544                                 } else
3545                                         __lpfc_sli_release_iocbq(phba,
3546                                                                  cmdiocbp);
3547                         }
3548                         break;
3549
3550                 case LPFC_UNKNOWN_IOCB:
3551                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3552                                 char adaptermsg[LPFC_MAX_ADPTMSG];
3553                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3554                                 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3555                                        MAX_MSG_DATA);
3556                                 dev_warn(&((phba->pcidev)->dev),
3557                                          "lpfc%d: %s\n",
3558                                          phba->brd_no, adaptermsg);
3559                         } else {
3560                                 /* Unknown IOCB command */
3561                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3562                                                 "0335 Unknown IOCB "
3563                                                 "command Data: x%x "
3564                                                 "x%x x%x x%x\n",
3565                                                 irsp->ulpCommand,
3566                                                 irsp->ulpStatus,
3567                                                 irsp->ulpIoTag,
3568                                                 irsp->ulpContext);
3569                         }
3570                         break;
3571                 }
3572
3573                 if (free_saveq) {
3574                         list_for_each_entry_safe(rspiocbp, next_iocb,
3575                                                  &saveq->list, list) {
3576                                 list_del_init(&rspiocbp->list);
3577                                 __lpfc_sli_release_iocbq(phba, rspiocbp);
3578                         }
3579                         __lpfc_sli_release_iocbq(phba, saveq);
3580                 }
3581                 rspiocbp = NULL;
3582         }
3583         spin_unlock_irqrestore(&phba->hbalock, iflag);
3584         return rspiocbp;
3585 }
3586
3587 /**
3588  * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3589  * @phba: Pointer to HBA context object.
3590  * @pring: Pointer to driver SLI ring object.
3591  * @mask: Host attention register mask for this ring.
3592  *
3593  * This routine wraps the actual slow_ring event process routine from the
3594  * API jump table function pointer from the lpfc_hba struct.
3595  **/
3596 void
3597 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3598                                 struct lpfc_sli_ring *pring, uint32_t mask)
3599 {
3600         phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3601 }
3602
3603 /**
3604  * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3605  * @phba: Pointer to HBA context object.
3606  * @pring: Pointer to driver SLI ring object.
3607  * @mask: Host attention register mask for this ring.
3608  *
3609  * This function is called from the worker thread when there is a ring event
3610  * for non-fcp rings. The caller does not hold any lock. The function will
3611  * remove each response iocb in the response ring and calls the handle
3612  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3613  **/
3614 static void
3615 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3616                                    struct lpfc_sli_ring *pring, uint32_t mask)
3617 {
3618         struct lpfc_pgp *pgp;
3619         IOCB_t *entry;
3620         IOCB_t *irsp = NULL;
3621         struct lpfc_iocbq *rspiocbp = NULL;
3622         uint32_t portRspPut, portRspMax;
3623         unsigned long iflag;
3624         uint32_t status;
3625
3626         pgp = &phba->port_gp[pring->ringno];
3627         spin_lock_irqsave(&phba->hbalock, iflag);
3628         pring->stats.iocb_event++;
3629
3630         /*
3631          * The next available response entry should never exceed the maximum
3632          * entries.  If it does, treat it as an adapter hardware error.
3633          */
3634         portRspMax = pring->sli.sli3.numRiocb;
3635         portRspPut = le32_to_cpu(pgp->rspPutInx);
3636         if (portRspPut >= portRspMax) {
3637                 /*
3638                  * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3639                  * rsp ring <portRspMax>
3640                  */
3641                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3642                                 "0303 Ring %d handler: portRspPut %d "
3643                                 "is bigger than rsp ring %d\n",
3644                                 pring->ringno, portRspPut, portRspMax);
3645
3646                 phba->link_state = LPFC_HBA_ERROR;
3647                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3648
3649                 phba->work_hs = HS_FFER3;
3650                 lpfc_handle_eratt(phba);
3651
3652                 return;
3653         }
3654
3655         rmb();
3656         while (pring->sli.sli3.rspidx != portRspPut) {
3657                 /*
3658                  * Build a completion list and call the appropriate handler.
3659                  * The process is to get the next available response iocb, get
3660                  * a free iocb from the list, copy the response data into the
3661                  * free iocb, insert to the continuation list, and update the
3662                  * next response index to slim.  This process makes response
3663                  * iocb's in the ring available to DMA as fast as possible but
3664                  * pays a penalty for a copy operation.  Since the iocb is
3665                  * only 32 bytes, this penalty is considered small relative to
3666                  * the PCI reads for register values and a slim write.  When
3667                  * the ulpLe field is set, the entire Command has been
3668                  * received.
3669                  */
3670                 entry = lpfc_resp_iocb(phba, pring);
3671
3672                 phba->last_completion_time = jiffies;
3673                 rspiocbp = __lpfc_sli_get_iocbq(phba);
3674                 if (rspiocbp == NULL) {
3675                         printk(KERN_ERR "%s: out of buffers! Failing "
3676                                "completion.\n", __func__);
3677                         break;
3678                 }
3679
3680                 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3681                                       phba->iocb_rsp_size);
3682                 irsp = &rspiocbp->iocb;
3683
3684                 if (++pring->sli.sli3.rspidx >= portRspMax)
3685                         pring->sli.sli3.rspidx = 0;
3686
3687                 if (pring->ringno == LPFC_ELS_RING) {
3688                         lpfc_debugfs_slow_ring_trc(phba,
3689                         "IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
3690                                 *(((uint32_t *) irsp) + 4),
3691                                 *(((uint32_t *) irsp) + 6),
3692                                 *(((uint32_t *) irsp) + 7));
3693                 }
3694
3695                 writel(pring->sli.sli3.rspidx,
3696                         &phba->host_gp[pring->ringno].rspGetInx);
3697
3698                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3699                 /* Handle the response IOCB */
3700                 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3701                 spin_lock_irqsave(&phba->hbalock, iflag);
3702
3703                 /*
3704                  * If the port response put pointer has not been updated, sync
3705                  * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3706                  * response put pointer.
3707                  */
3708                 if (pring->sli.sli3.rspidx == portRspPut) {
3709                         portRspPut = le32_to_cpu(pgp->rspPutInx);
3710                 }
3711         } /* while (pring->sli.sli3.rspidx != portRspPut) */
3712
3713         if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3714                 /* At least one response entry has been freed */
3715                 pring->stats.iocb_rsp_full++;
3716                 /* SET RxRE_RSP in Chip Att register */
3717                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3718                 writel(status, phba->CAregaddr);
3719                 readl(phba->CAregaddr); /* flush */
3720         }
3721         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3722                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3723                 pring->stats.iocb_cmd_empty++;
3724
3725                 /* Force update of the local copy of cmdGetInx */
3726                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3727                 lpfc_sli_resume_iocb(phba, pring);
3728
3729                 if ((pring->lpfc_sli_cmd_available))
3730                         (pring->lpfc_sli_cmd_available) (phba, pring);
3731
3732         }
3733
3734         spin_unlock_irqrestore(&phba->hbalock, iflag);
3735         return;
3736 }
3737
3738 /**
3739  * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3740  * @phba: Pointer to HBA context object.
3741  * @pring: Pointer to driver SLI ring object.
3742  * @mask: Host attention register mask for this ring.
3743  *
3744  * This function is called from the worker thread when there is a pending
3745  * ELS response iocb on the driver internal slow-path response iocb worker
3746  * queue. The caller does not hold any lock. The function will remove each
3747  * response iocb from the response worker queue and calls the handle
3748  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3749  **/
3750 static void
3751 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3752                                    struct lpfc_sli_ring *pring, uint32_t mask)
3753 {
3754         struct lpfc_iocbq *irspiocbq;
3755         struct hbq_dmabuf *dmabuf;
3756         struct lpfc_cq_event *cq_event;
3757         unsigned long iflag;
3758
3759         spin_lock_irqsave(&phba->hbalock, iflag);
3760         phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3761         spin_unlock_irqrestore(&phba->hbalock, iflag);
3762         while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3763                 /* Get the response iocb from the head of work queue */
3764                 spin_lock_irqsave(&phba->hbalock, iflag);
3765                 list_remove_head(&phba->sli4_hba.sp_queue_event,
3766                                  cq_event, struct lpfc_cq_event, list);
3767                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3768
3769                 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3770                 case CQE_CODE_COMPL_WQE:
3771                         irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3772                                                  cq_event);
3773                         /* Translate ELS WCQE to response IOCBQ */
3774                         irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3775                                                                    irspiocbq);
3776                         if (irspiocbq)
3777                                 lpfc_sli_sp_handle_rspiocb(phba, pring,
3778                                                            irspiocbq);
3779                         break;
3780                 case CQE_CODE_RECEIVE:
3781                 case CQE_CODE_RECEIVE_V1:
3782                         dmabuf = container_of(cq_event, struct hbq_dmabuf,
3783                                               cq_event);
3784                         lpfc_sli4_handle_received_buffer(phba, dmabuf);
3785                         break;
3786                 default:
3787                         break;
3788                 }
3789         }
3790 }
3791
3792 /**
3793  * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3794  * @phba: Pointer to HBA context object.
3795  * @pring: Pointer to driver SLI ring object.
3796  *
3797  * This function aborts all iocbs in the given ring and frees all the iocb
3798  * objects in txq. This function issues an abort iocb for all the iocb commands
3799  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3800  * the return of this function. The caller is not required to hold any locks.
3801  **/
3802 void
3803 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3804 {
3805         LIST_HEAD(completions);
3806         struct lpfc_iocbq *iocb, *next_iocb;
3807
3808         if (pring->ringno == LPFC_ELS_RING) {
3809                 lpfc_fabric_abort_hba(phba);
3810         }
3811
3812         /* Error everything on txq and txcmplq
3813          * First do the txq.
3814          */
3815         if (phba->sli_rev >= LPFC_SLI_REV4) {
3816                 spin_lock_irq(&pring->ring_lock);
3817                 list_splice_init(&pring->txq, &completions);
3818                 pring->txq_cnt = 0;
3819                 spin_unlock_irq(&pring->ring_lock);
3820
3821                 spin_lock_irq(&phba->hbalock);
3822                 /* Next issue ABTS for everything on the txcmplq */
3823                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3824                         lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3825                 spin_unlock_irq(&phba->hbalock);
3826         } else {
3827                 spin_lock_irq(&phba->hbalock);
3828                 list_splice_init(&pring->txq, &completions);
3829                 pring->txq_cnt = 0;
3830
3831                 /* Next issue ABTS for everything on the txcmplq */
3832                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3833                         lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3834                 spin_unlock_irq(&phba->hbalock);
3835         }
3836
3837         /* Cancel all the IOCBs from the completions list */
3838         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3839                               IOERR_SLI_ABORTED);
3840 }
3841
3842 /**
3843  * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
3844  * @phba: Pointer to HBA context object.
3845  * @pring: Pointer to driver SLI ring object.
3846  *
3847  * This function aborts all iocbs in the given ring and frees all the iocb
3848  * objects in txq. This function issues an abort iocb for all the iocb commands
3849  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3850  * the return of this function. The caller is not required to hold any locks.
3851  **/
3852 void
3853 lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3854 {
3855         LIST_HEAD(completions);
3856         struct lpfc_iocbq *iocb, *next_iocb;
3857
3858         if (pring->ringno == LPFC_ELS_RING)
3859                 lpfc_fabric_abort_hba(phba);
3860
3861         spin_lock_irq(&phba->hbalock);
3862         /* Next issue ABTS for everything on the txcmplq */
3863         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3864                 lpfc_sli4_abort_nvme_io(phba, pring, iocb);
3865         spin_unlock_irq(&phba->hbalock);
3866 }
3867
3868
3869 /**
3870  * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3871  * @phba: Pointer to HBA context object.
3872  * @pring: Pointer to driver SLI ring object.
3873  *
3874  * This function aborts all iocbs in FCP rings and frees all the iocb
3875  * objects in txq. This function issues an abort iocb for all the iocb commands
3876  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3877  * the return of this function. The caller is not required to hold any locks.
3878  **/
3879 void
3880 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3881 {
3882         struct lpfc_sli *psli = &phba->sli;
3883         struct lpfc_sli_ring  *pring;
3884         uint32_t i;
3885
3886         /* Look on all the FCP Rings for the iotag */
3887         if (phba->sli_rev >= LPFC_SLI_REV4) {
3888                 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3889                         pring = phba->sli4_hba.fcp_wq[i]->pring;
3890                         lpfc_sli_abort_iocb_ring(phba, pring);
3891                 }
3892         } else {
3893                 pring = &psli->sli3_ring[LPFC_FCP_RING];
3894                 lpfc_sli_abort_iocb_ring(phba, pring);
3895         }
3896 }
3897
3898 /**
3899  * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
3900  * @phba: Pointer to HBA context object.
3901  *
3902  * This function aborts all wqes in NVME rings. This function issues an
3903  * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
3904  * the txcmplq is not guaranteed to complete before the return of this
3905  * function. The caller is not required to hold any locks.
3906  **/
3907 void
3908 lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
3909 {
3910         struct lpfc_sli_ring  *pring;
3911         uint32_t i;
3912
3913         if (phba->sli_rev < LPFC_SLI_REV4)
3914                 return;
3915
3916         /* Abort all IO on each NVME ring. */
3917         for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
3918                 pring = phba->sli4_hba.nvme_wq[i]->pring;
3919                 lpfc_sli_abort_wqe_ring(phba, pring);
3920         }
3921 }
3922
3923
3924 /**
3925  * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3926  * @phba: Pointer to HBA context object.
3927  *
3928  * This function flushes all iocbs in the fcp ring and frees all the iocb
3929  * objects in txq and txcmplq. This function will not issue abort iocbs
3930  * for all the iocb commands in txcmplq, they will just be returned with
3931  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3932  * slot has been permanently disabled.
3933  **/
3934 void
3935 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3936 {
3937         LIST_HEAD(txq);
3938         LIST_HEAD(txcmplq);
3939         struct lpfc_sli *psli = &phba->sli;
3940         struct lpfc_sli_ring  *pring;
3941         uint32_t i;
3942         struct lpfc_iocbq *piocb, *next_iocb;
3943
3944         spin_lock_irq(&phba->hbalock);
3945         /* Indicate the I/O queues are flushed */
3946         phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3947         spin_unlock_irq(&phba->hbalock);
3948
3949         /* Look on all the FCP Rings for the iotag */
3950         if (phba->sli_rev >= LPFC_SLI_REV4) {
3951                 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3952                         pring = phba->sli4_hba.fcp_wq[i]->pring;
3953
3954                         spin_lock_irq(&pring->ring_lock);
3955                         /* Retrieve everything on txq */
3956                         list_splice_init(&pring->txq, &txq);
3957                         list_for_each_entry_safe(piocb, next_iocb,
3958                                                  &pring->txcmplq, list)
3959                                 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3960                         /* Retrieve everything on the txcmplq */
3961                         list_splice_init(&pring->txcmplq, &txcmplq);
3962                         pring->txq_cnt = 0;
3963                         pring->txcmplq_cnt = 0;
3964                         spin_unlock_irq(&pring->ring_lock);
3965
3966                         /* Flush the txq */
3967                         lpfc_sli_cancel_iocbs(phba, &txq,
3968                                               IOSTAT_LOCAL_REJECT,
3969                                               IOERR_SLI_DOWN);
3970                         /* Flush the txcmpq */
3971                         lpfc_sli_cancel_iocbs(phba, &txcmplq,
3972                                               IOSTAT_LOCAL_REJECT,
3973                                               IOERR_SLI_DOWN);
3974                 }
3975         } else {
3976                 pring = &psli->sli3_ring[LPFC_FCP_RING];
3977
3978                 spin_lock_irq(&phba->hbalock);
3979                 /* Retrieve everything on txq */
3980                 list_splice_init(&pring->txq, &txq);
3981                 list_for_each_entry_safe(piocb, next_iocb,
3982                                          &pring->txcmplq, list)
3983                         piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3984                 /* Retrieve everything on the txcmplq */
3985                 list_splice_init(&pring->txcmplq, &txcmplq);
3986                 pring->txq_cnt = 0;
3987                 pring->txcmplq_cnt = 0;
3988                 spin_unlock_irq(&phba->hbalock);
3989
3990                 /* Flush the txq */
3991                 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3992                                       IOERR_SLI_DOWN);
3993                 /* Flush the txcmpq */
3994                 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3995                                       IOERR_SLI_DOWN);
3996         }
3997 }
3998
3999 /**
4000  * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
4001  * @phba: Pointer to HBA context object.
4002  *
4003  * This function flushes all wqes in the nvme rings and frees all resources
4004  * in the txcmplq. This function does not issue abort wqes for the IO
4005  * commands in txcmplq, they will just be returned with
4006  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4007  * slot has been permanently disabled.
4008  **/
4009 void
4010 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
4011 {
4012         LIST_HEAD(txcmplq);
4013         struct lpfc_sli_ring  *pring;
4014         uint32_t i;
4015         struct lpfc_iocbq *piocb, *next_iocb;
4016
4017         if (phba->sli_rev < LPFC_SLI_REV4)
4018                 return;
4019
4020         /* Hint to other driver operations that a flush is in progress. */
4021         spin_lock_irq(&phba->hbalock);
4022         phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
4023         spin_unlock_irq(&phba->hbalock);
4024
4025         /* Cycle through all NVME rings and complete each IO with
4026          * a local driver reason code.  This is a flush so no
4027          * abort exchange to FW.
4028          */
4029         for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
4030                 pring = phba->sli4_hba.nvme_wq[i]->pring;
4031
4032                 spin_lock_irq(&pring->ring_lock);
4033                 list_for_each_entry_safe(piocb, next_iocb,
4034                                          &pring->txcmplq, list)
4035                         piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4036                 /* Retrieve everything on the txcmplq */
4037                 list_splice_init(&pring->txcmplq, &txcmplq);
4038                 pring->txcmplq_cnt = 0;
4039                 spin_unlock_irq(&pring->ring_lock);
4040
4041                 /* Flush the txcmpq &&&PAE */
4042                 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4043                                       IOSTAT_LOCAL_REJECT,
4044                                       IOERR_SLI_DOWN);
4045         }
4046 }
4047
4048 /**
4049  * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4050  * @phba: Pointer to HBA context object.
4051  * @mask: Bit mask to be checked.
4052  *
4053  * This function reads the host status register and compares
4054  * with the provided bit mask to check if HBA completed
4055  * the restart. This function will wait in a loop for the
4056  * HBA to complete restart. If the HBA does not restart within
4057  * 15 iterations, the function will reset the HBA again. The
4058  * function returns 1 when HBA fail to restart otherwise returns
4059  * zero.
4060  **/
4061 static int
4062 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4063 {
4064         uint32_t status;
4065         int i = 0;
4066         int retval = 0;
4067
4068         /* Read the HBA Host Status Register */
4069         if (lpfc_readl(phba->HSregaddr, &status))
4070                 return 1;
4071
4072         /*
4073          * Check status register every 100ms for 5 retries, then every
4074          * 500ms for 5, then every 2.5 sec for 5, then reset board and
4075          * every 2.5 sec for 4.
4076          * Break our of the loop if errors occurred during init.
4077          */
4078         while (((status & mask) != mask) &&
4079                !(status & HS_FFERM) &&
4080                i++ < 20) {
4081
4082                 if (i <= 5)
4083                         msleep(10);
4084                 else if (i <= 10)
4085                         msleep(500);
4086                 else
4087                         msleep(2500);
4088
4089                 if (i == 15) {
4090                                 /* Do post */
4091                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4092                         lpfc_sli_brdrestart(phba);
4093                 }
4094                 /* Read the HBA Host Status Register */
4095                 if (lpfc_readl(phba->HSregaddr, &status)) {
4096                         retval = 1;
4097                         break;
4098                 }
4099         }
4100
4101         /* Check to see if any errors occurred during init */
4102         if ((status & HS_FFERM) || (i >= 20)) {
4103                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4104                                 "2751 Adapter failed to restart, "
4105                                 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4106                                 status,
4107                                 readl(phba->MBslimaddr + 0xa8),
4108                                 readl(phba->MBslimaddr + 0xac));
4109                 phba->link_state = LPFC_HBA_ERROR;
4110                 retval = 1;
4111         }
4112
4113         return retval;
4114 }
4115
4116 /**
4117  * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4118  * @phba: Pointer to HBA context object.
4119  * @mask: Bit mask to be checked.
4120  *
4121  * This function checks the host status register to check if HBA is
4122  * ready. This function will wait in a loop for the HBA to be ready
4123  * If the HBA is not ready , the function will will reset the HBA PCI
4124  * function again. The function returns 1 when HBA fail to be ready
4125  * otherwise returns zero.
4126  **/
4127 static int
4128 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4129 {
4130         uint32_t status;
4131         int retval = 0;
4132
4133         /* Read the HBA Host Status Register */
4134         status = lpfc_sli4_post_status_check(phba);
4135
4136         if (status) {
4137                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4138                 lpfc_sli_brdrestart(phba);
4139                 status = lpfc_sli4_post_status_check(phba);
4140         }
4141
4142         /* Check to see if any errors occurred during init */
4143         if (status) {
4144                 phba->link_state = LPFC_HBA_ERROR;
4145                 retval = 1;
4146         } else
4147                 phba->sli4_hba.intr_enable = 0;
4148
4149         return retval;
4150 }
4151
4152 /**
4153  * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4154  * @phba: Pointer to HBA context object.
4155  * @mask: Bit mask to be checked.
4156  *
4157  * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4158  * from the API jump table function pointer from the lpfc_hba struct.
4159  **/
4160 int
4161 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4162 {
4163         return phba->lpfc_sli_brdready(phba, mask);
4164 }
4165
4166 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4167
4168 /**
4169  * lpfc_reset_barrier - Make HBA ready for HBA reset
4170  * @phba: Pointer to HBA context object.
4171  *
4172  * This function is called before resetting an HBA. This function is called
4173  * with hbalock held and requests HBA to quiesce DMAs before a reset.
4174  **/
4175 void lpfc_reset_barrier(struct lpfc_hba *phba)
4176 {
4177         uint32_t __iomem *resp_buf;
4178         uint32_t __iomem *mbox_buf;
4179         volatile uint32_t mbox;
4180         uint32_t hc_copy, ha_copy, resp_data;
4181         int  i;
4182         uint8_t hdrtype;
4183
4184         lockdep_assert_held(&phba->hbalock);
4185
4186         pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4187         if (hdrtype != 0x80 ||
4188             (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4189              FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4190                 return;
4191
4192         /*
4193          * Tell the other part of the chip to suspend temporarily all
4194          * its DMA activity.
4195          */
4196         resp_buf = phba->MBslimaddr;
4197
4198         /* Disable the error attention */
4199         if (lpfc_readl(phba->HCregaddr, &hc_copy))
4200                 return;
4201         writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4202         readl(phba->HCregaddr); /* flush */
4203         phba->link_flag |= LS_IGNORE_ERATT;
4204
4205         if (lpfc_readl(phba->HAregaddr, &ha_copy))
4206                 return;
4207         if (ha_copy & HA_ERATT) {
4208                 /* Clear Chip error bit */
4209                 writel(HA_ERATT, phba->HAregaddr);
4210                 phba->pport->stopped = 1;
4211         }
4212
4213         mbox = 0;
4214         ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4215         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4216
4217         writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4218         mbox_buf = phba->MBslimaddr;
4219         writel(mbox, mbox_buf);
4220
4221         for (i = 0; i < 50; i++) {
4222                 if (lpfc_readl((resp_buf + 1), &resp_data))
4223                         return;
4224                 if (resp_data != ~(BARRIER_TEST_PATTERN))
4225                         mdelay(1);
4226                 else
4227                         break;
4228         }
4229         resp_data = 0;
4230         if (lpfc_readl((resp_buf + 1), &resp_data))
4231                 return;
4232         if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
4233                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4234                     phba->pport->stopped)
4235                         goto restore_hc;
4236                 else
4237                         goto clear_errat;
4238         }
4239
4240         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4241         resp_data = 0;
4242         for (i = 0; i < 500; i++) {
4243                 if (lpfc_readl(resp_buf, &resp_data))
4244                         return;
4245                 if (resp_data != mbox)
4246                         mdelay(1);
4247                 else
4248                         break;
4249         }
4250
4251 clear_errat:
4252
4253         while (++i < 500) {
4254                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4255                         return;
4256                 if (!(ha_copy & HA_ERATT))
4257                         mdelay(1);
4258                 else
4259                         break;
4260         }
4261
4262         if (readl(phba->HAregaddr) & HA_ERATT) {
4263                 writel(HA_ERATT, phba->HAregaddr);
4264                 phba->pport->stopped = 1;
4265         }
4266
4267 restore_hc:
4268         phba->link_flag &= ~LS_IGNORE_ERATT;
4269         writel(hc_copy, phba->HCregaddr);
4270         readl(phba->HCregaddr); /* flush */
4271 }
4272
4273 /**
4274  * lpfc_sli_brdkill - Issue a kill_board mailbox command
4275  * @phba: Pointer to HBA context object.
4276  *
4277  * This function issues a kill_board mailbox command and waits for
4278  * the error attention interrupt. This function is called for stopping
4279  * the firmware processing. The caller is not required to hold any
4280  * locks. This function calls lpfc_hba_down_post function to free
4281  * any pending commands after the kill. The function will return 1 when it
4282  * fails to kill the board else will return 0.
4283  **/
4284 int
4285 lpfc_sli_brdkill(struct lpfc_hba *phba)
4286 {
4287         struct lpfc_sli *psli;
4288         LPFC_MBOXQ_t *pmb;
4289         uint32_t status;
4290         uint32_t ha_copy;
4291         int retval;
4292         int i = 0;
4293
4294         psli = &phba->sli;
4295
4296         /* Kill HBA */
4297         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4298                         "0329 Kill HBA Data: x%x x%x\n",
4299                         phba->pport->port_state, psli->sli_flag);
4300
4301         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4302         if (!pmb)
4303                 return 1;
4304
4305         /* Disable the error attention */
4306         spin_lock_irq(&phba->hbalock);
4307         if (lpfc_readl(phba->HCregaddr, &status)) {
4308                 spin_unlock_irq(&phba->hbalock);
4309                 mempool_free(pmb, phba->mbox_mem_pool);
4310                 return 1;
4311         }
4312         status &= ~HC_ERINT_ENA;
4313         writel(status, phba->HCregaddr);
4314         readl(phba->HCregaddr); /* flush */
4315         phba->link_flag |= LS_IGNORE_ERATT;
4316         spin_unlock_irq(&phba->hbalock);
4317
4318         lpfc_kill_board(phba, pmb);
4319         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4320         retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4321
4322         if (retval != MBX_SUCCESS) {
4323                 if (retval != MBX_BUSY)
4324                         mempool_free(pmb, phba->mbox_mem_pool);
4325                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4326                                 "2752 KILL_BOARD command failed retval %d\n",
4327                                 retval);
4328                 spin_lock_irq(&phba->hbalock);
4329                 phba->link_flag &= ~LS_IGNORE_ERATT;
4330                 spin_unlock_irq(&phba->hbalock);
4331                 return 1;
4332         }
4333
4334         spin_lock_irq(&phba->hbalock);
4335         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4336         spin_unlock_irq(&phba->hbalock);
4337
4338         mempool_free(pmb, phba->mbox_mem_pool);
4339
4340         /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4341          * attention every 100ms for 3 seconds. If we don't get ERATT after
4342          * 3 seconds we still set HBA_ERROR state because the status of the
4343          * board is now undefined.
4344          */
4345         if (lpfc_readl(phba->HAregaddr, &ha_copy))
4346                 return 1;
4347         while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4348                 mdelay(100);
4349                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4350                         return 1;
4351         }
4352
4353         del_timer_sync(&psli->mbox_tmo);
4354         if (ha_copy & HA_ERATT) {
4355                 writel(HA_ERATT, phba->HAregaddr);
4356                 phba->pport->stopped = 1;
4357         }
4358         spin_lock_irq(&phba->hbalock);
4359         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4360         psli->mbox_active = NULL;
4361         phba->link_flag &= ~LS_IGNORE_ERATT;
4362         spin_unlock_irq(&phba->hbalock);
4363
4364         lpfc_hba_down_post(phba);
4365         phba->link_state = LPFC_HBA_ERROR;
4366
4367         return ha_copy & HA_ERATT ? 0 : 1;
4368 }
4369
4370 /**
4371  * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4372  * @phba: Pointer to HBA context object.
4373  *
4374  * This function resets the HBA by writing HC_INITFF to the control
4375  * register. After the HBA resets, this function resets all the iocb ring
4376  * indices. This function disables PCI layer parity checking during
4377  * the reset.
4378  * This function returns 0 always.
4379  * The caller is not required to hold any locks.
4380  **/
4381 int
4382 lpfc_sli_brdreset(struct lpfc_hba *phba)
4383 {
4384         struct lpfc_sli *psli;
4385         struct lpfc_sli_ring *pring;
4386         uint16_t cfg_value;
4387         int i;
4388
4389         psli = &phba->sli;
4390
4391         /* Reset HBA */
4392         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4393                         "0325 Reset HBA Data: x%x x%x\n",
4394                         (phba->pport) ? phba->pport->port_state : 0,
4395                         psli->sli_flag);
4396
4397         /* perform board reset */
4398         phba->fc_eventTag = 0;
4399         phba->link_events = 0;
4400         if (phba->pport) {
4401                 phba->pport->fc_myDID = 0;
4402                 phba->pport->fc_prevDID = 0;
4403         }
4404
4405         /* Turn off parity checking and serr during the physical reset */
4406         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4407         pci_write_config_word(phba->pcidev, PCI_COMMAND,
4408                               (cfg_value &
4409                                ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4410
4411         psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4412
4413         /* Now toggle INITFF bit in the Host Control Register */
4414         writel(HC_INITFF, phba->HCregaddr);
4415         mdelay(1);
4416         readl(phba->HCregaddr); /* flush */
4417         writel(0, phba->HCregaddr);
4418         readl(phba->HCregaddr); /* flush */
4419
4420         /* Restore PCI cmd register */
4421         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4422
4423         /* Initialize relevant SLI info */
4424         for (i = 0; i < psli->num_rings; i++) {
4425                 pring = &psli->sli3_ring[i];
4426                 pring->flag = 0;
4427                 pring->sli.sli3.rspidx = 0;
4428                 pring->sli.sli3.next_cmdidx  = 0;
4429                 pring->sli.sli3.local_getidx = 0;
4430                 pring->sli.sli3.cmdidx = 0;
4431                 pring->missbufcnt = 0;
4432         }
4433
4434         phba->link_state = LPFC_WARM_START;
4435         return 0;
4436 }
4437
4438 /**
4439  * lpfc_sli4_brdreset - Reset a sli-4 HBA
4440  * @phba: Pointer to HBA context object.
4441  *
4442  * This function resets a SLI4 HBA. This function disables PCI layer parity
4443  * checking during resets the device. The caller is not required to hold
4444  * any locks.
4445  *
4446  * This function returns 0 always.
4447  **/
4448 int
4449 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4450 {
4451         struct lpfc_sli *psli = &phba->sli;
4452         uint16_t cfg_value;
4453         int rc = 0;
4454
4455         /* Reset HBA */
4456         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4457                         "0295 Reset HBA Data: x%x x%x x%x\n",
4458                         phba->pport->port_state, psli->sli_flag,
4459                         phba->hba_flag);
4460
4461         /* perform board reset */
4462         phba->fc_eventTag = 0;
4463         phba->link_events = 0;
4464         phba->pport->fc_myDID = 0;
4465         phba->pport->fc_prevDID = 0;
4466
4467         spin_lock_irq(&phba->hbalock);
4468         psli->sli_flag &= ~(LPFC_PROCESS_LA);
4469         phba->fcf.fcf_flag = 0;
4470         spin_unlock_irq(&phba->hbalock);
4471
4472         /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4473         if (phba->hba_flag & HBA_FW_DUMP_OP) {
4474                 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4475                 return rc;
4476         }
4477
4478         /* Now physically reset the device */
4479         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4480                         "0389 Performing PCI function reset!\n");
4481
4482         /* Turn off parity checking and serr during the physical reset */
4483         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4484         pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4485                               ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4486
4487         /* Perform FCoE PCI function reset before freeing queue memory */
4488         rc = lpfc_pci_function_reset(phba);
4489
4490         /* Restore PCI cmd register */
4491         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4492
4493         return rc;
4494 }
4495
4496 /**
4497  * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4498  * @phba: Pointer to HBA context object.
4499  *
4500  * This function is called in the SLI initialization code path to
4501  * restart the HBA. The caller is not required to hold any lock.
4502  * This function writes MBX_RESTART mailbox command to the SLIM and
4503  * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4504  * function to free any pending commands. The function enables
4505  * POST only during the first initialization. The function returns zero.
4506  * The function does not guarantee completion of MBX_RESTART mailbox
4507  * command before the return of this function.
4508  **/
4509 static int
4510 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4511 {
4512         MAILBOX_t *mb;
4513         struct lpfc_sli *psli;
4514         volatile uint32_t word0;
4515         void __iomem *to_slim;
4516         uint32_t hba_aer_enabled;
4517
4518         spin_lock_irq(&phba->hbalock);
4519
4520         /* Take PCIe device Advanced Error Reporting (AER) state */
4521         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4522
4523         psli = &phba->sli;
4524
4525         /* Restart HBA */
4526         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4527                         "0337 Restart HBA Data: x%x x%x\n",
4528                         (phba->pport) ? phba->pport->port_state : 0,
4529                         psli->sli_flag);
4530
4531         word0 = 0;
4532         mb = (MAILBOX_t *) &word0;
4533         mb->mbxCommand = MBX_RESTART;
4534         mb->mbxHc = 1;
4535
4536         lpfc_reset_barrier(phba);
4537
4538         to_slim = phba->MBslimaddr;
4539         writel(*(uint32_t *) mb, to_slim);
4540         readl(to_slim); /* flush */
4541
4542         /* Only skip post after fc_ffinit is completed */
4543         if (phba->pport && phba->pport->port_state)
4544                 word0 = 1;      /* This is really setting up word1 */
4545         else
4546                 word0 = 0;      /* This is really setting up word1 */
4547         to_slim = phba->MBslimaddr + sizeof (uint32_t);
4548         writel(*(uint32_t *) mb, to_slim);
4549         readl(to_slim); /* flush */
4550
4551         lpfc_sli_brdreset(phba);
4552         if (phba->pport)
4553                 phba->pport->stopped = 0;
4554         phba->link_state = LPFC_INIT_START;
4555         phba->hba_flag = 0;
4556         spin_unlock_irq(&phba->hbalock);
4557
4558         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4559         psli->stats_start = get_seconds();
4560
4561         /* Give the INITFF and Post time to settle. */
4562         mdelay(100);
4563
4564         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4565         if (hba_aer_enabled)
4566                 pci_disable_pcie_error_reporting(phba->pcidev);
4567
4568         lpfc_hba_down_post(phba);
4569
4570         return 0;
4571 }
4572
4573 /**
4574  * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4575  * @phba: Pointer to HBA context object.
4576  *
4577  * This function is called in the SLI initialization code path to restart
4578  * a SLI4 HBA. The caller is not required to hold any lock.
4579  * At the end of the function, it calls lpfc_hba_down_post function to
4580  * free any pending commands.
4581  **/
4582 static int
4583 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4584 {
4585         struct lpfc_sli *psli = &phba->sli;
4586         uint32_t hba_aer_enabled;
4587         int rc;
4588
4589         /* Restart HBA */
4590         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4591                         "0296 Restart HBA Data: x%x x%x\n",
4592                         phba->pport->port_state, psli->sli_flag);
4593
4594         /* Take PCIe device Advanced Error Reporting (AER) state */
4595         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4596
4597         rc = lpfc_sli4_brdreset(phba);
4598
4599         spin_lock_irq(&phba->hbalock);
4600         phba->pport->stopped = 0;
4601         phba->link_state = LPFC_INIT_START;
4602         phba->hba_flag = 0;
4603         spin_unlock_irq(&phba->hbalock);
4604
4605         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4606         psli->stats_start = get_seconds();
4607
4608         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4609         if (hba_aer_enabled)
4610                 pci_disable_pcie_error_reporting(phba->pcidev);
4611
4612         lpfc_hba_down_post(phba);
4613         lpfc_sli4_queue_destroy(phba);
4614
4615         return rc;
4616 }
4617
4618 /**
4619  * lpfc_sli_brdrestart - Wrapper func for restarting hba
4620  * @phba: Pointer to HBA context object.
4621  *
4622  * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4623  * API jump table function pointer from the lpfc_hba struct.
4624 **/
4625 int
4626 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4627 {
4628         return phba->lpfc_sli_brdrestart(phba);
4629 }
4630
4631 /**
4632  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4633  * @phba: Pointer to HBA context object.
4634  *
4635  * This function is called after a HBA restart to wait for successful
4636  * restart of the HBA. Successful restart of the HBA is indicated by
4637  * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4638  * iteration, the function will restart the HBA again. The function returns
4639  * zero if HBA successfully restarted else returns negative error code.
4640  **/
4641 int
4642 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4643 {
4644         uint32_t status, i = 0;
4645
4646         /* Read the HBA Host Status Register */
4647         if (lpfc_readl(phba->HSregaddr, &status))
4648                 return -EIO;
4649
4650         /* Check status register to see what current state is */
4651         i = 0;
4652         while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4653
4654                 /* Check every 10ms for 10 retries, then every 100ms for 90
4655                  * retries, then every 1 sec for 50 retires for a total of
4656                  * ~60 seconds before reset the board again and check every
4657                  * 1 sec for 50 retries. The up to 60 seconds before the
4658                  * board ready is required by the Falcon FIPS zeroization
4659                  * complete, and any reset the board in between shall cause
4660                  * restart of zeroization, further delay the board ready.
4661                  */
4662                 if (i++ >= 200) {
4663                         /* Adapter failed to init, timeout, status reg
4664                            <status> */
4665                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4666                                         "0436 Adapter failed to init, "
4667                                         "timeout, status reg x%x, "
4668                                         "FW Data: A8 x%x AC x%x\n", status,
4669                                         readl(phba->MBslimaddr + 0xa8),
4670                                         readl(phba->MBslimaddr + 0xac));
4671                         phba->link_state = LPFC_HBA_ERROR;
4672                         return -ETIMEDOUT;
4673                 }
4674
4675                 /* Check to see if any errors occurred during init */
4676                 if (status & HS_FFERM) {
4677                         /* ERROR: During chipset initialization */
4678                         /* Adapter failed to init, chipset, status reg
4679                            <status> */
4680                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4681                                         "0437 Adapter failed to init, "
4682                                         "chipset, status reg x%x, "
4683                                         "FW Data: A8 x%x AC x%x\n", status,
4684                                         readl(phba->MBslimaddr + 0xa8),
4685                                         readl(phba->MBslimaddr + 0xac));
4686                         phba->link_state = LPFC_HBA_ERROR;
4687                         return -EIO;
4688                 }
4689
4690                 if (i <= 10)
4691                         msleep(10);
4692                 else if (i <= 100)
4693                         msleep(100);
4694                 else
4695                         msleep(1000);
4696
4697                 if (i == 150) {
4698                         /* Do post */
4699                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4700                         lpfc_sli_brdrestart(phba);
4701                 }
4702                 /* Read the HBA Host Status Register */
4703                 if (lpfc_readl(phba->HSregaddr, &status))
4704                         return -EIO;
4705         }
4706
4707         /* Check to see if any errors occurred during init */
4708         if (status & HS_FFERM) {
4709                 /* ERROR: During chipset initialization */
4710                 /* Adapter failed to init, chipset, status reg <status> */
4711                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4712                                 "0438 Adapter failed to init, chipset, "
4713                                 "status reg x%x, "
4714                                 "FW Data: A8 x%x AC x%x\n", status,
4715                                 readl(phba->MBslimaddr + 0xa8),
4716                                 readl(phba->MBslimaddr + 0xac));
4717                 phba->link_state = LPFC_HBA_ERROR;
4718                 return -EIO;
4719         }
4720
4721         /* Clear all interrupt enable conditions */
4722         writel(0, phba->HCregaddr);
4723         readl(phba->HCregaddr); /* flush */
4724
4725         /* setup host attn register */
4726         writel(0xffffffff, phba->HAregaddr);
4727         readl(phba->HAregaddr); /* flush */
4728         return 0;
4729 }
4730
4731 /**
4732  * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4733  *
4734  * This function calculates and returns the number of HBQs required to be
4735  * configured.
4736  **/
4737 int
4738 lpfc_sli_hbq_count(void)
4739 {
4740         return ARRAY_SIZE(lpfc_hbq_defs);
4741 }
4742
4743 /**
4744  * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4745  *
4746  * This function adds the number of hbq entries in every HBQ to get
4747  * the total number of hbq entries required for the HBA and returns
4748  * the total count.
4749  **/
4750 static int
4751 lpfc_sli_hbq_entry_count(void)
4752 {
4753         int  hbq_count = lpfc_sli_hbq_count();
4754         int  count = 0;
4755         int  i;
4756
4757         for (i = 0; i < hbq_count; ++i)
4758                 count += lpfc_hbq_defs[i]->entry_count;
4759         return count;
4760 }
4761
4762 /**
4763  * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4764  *
4765  * This function calculates amount of memory required for all hbq entries
4766  * to be configured and returns the total memory required.
4767  **/
4768 int
4769 lpfc_sli_hbq_size(void)
4770 {
4771         return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4772 }
4773
4774 /**
4775  * lpfc_sli_hbq_setup - configure and initialize HBQs
4776  * @phba: Pointer to HBA context object.
4777  *
4778  * This function is called during the SLI initialization to configure
4779  * all the HBQs and post buffers to the HBQ. The caller is not
4780  * required to hold any locks. This function will return zero if successful
4781  * else it will return negative error code.
4782  **/
4783 static int
4784 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4785 {
4786         int  hbq_count = lpfc_sli_hbq_count();
4787         LPFC_MBOXQ_t *pmb;
4788         MAILBOX_t *pmbox;
4789         uint32_t hbqno;
4790         uint32_t hbq_entry_index;
4791
4792                                 /* Get a Mailbox buffer to setup mailbox
4793                                  * commands for HBA initialization
4794                                  */
4795         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4796
4797         if (!pmb)
4798                 return -ENOMEM;
4799
4800         pmbox = &pmb->u.mb;
4801
4802         /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4803         phba->link_state = LPFC_INIT_MBX_CMDS;
4804         phba->hbq_in_use = 1;
4805
4806         hbq_entry_index = 0;
4807         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4808                 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4809                 phba->hbqs[hbqno].hbqPutIdx      = 0;
4810                 phba->hbqs[hbqno].local_hbqGetIdx   = 0;
4811                 phba->hbqs[hbqno].entry_count =
4812                         lpfc_hbq_defs[hbqno]->entry_count;
4813                 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4814                         hbq_entry_index, pmb);
4815                 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4816
4817                 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4818                         /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4819                            mbxStatus <status>, ring <num> */
4820
4821                         lpfc_printf_log(phba, KERN_ERR,
4822                                         LOG_SLI | LOG_VPORT,
4823                                         "1805 Adapter failed to init. "
4824                                         "Data: x%x x%x x%x\n",
4825                                         pmbox->mbxCommand,
4826                                         pmbox->mbxStatus, hbqno);
4827
4828                         phba->link_state = LPFC_HBA_ERROR;
4829                         mempool_free(pmb, phba->mbox_mem_pool);
4830                         return -ENXIO;
4831                 }
4832         }
4833         phba->hbq_count = hbq_count;
4834
4835         mempool_free(pmb, phba->mbox_mem_pool);
4836
4837         /* Initially populate or replenish the HBQs */
4838         for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4839                 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4840         return 0;
4841 }
4842
4843 /**
4844  * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4845  * @phba: Pointer to HBA context object.
4846  *
4847  * This function is called during the SLI initialization to configure
4848  * all the HBQs and post buffers to the HBQ. The caller is not
4849  * required to hold any locks. This function will return zero if successful
4850  * else it will return negative error code.
4851  **/
4852 static int
4853 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4854 {
4855         phba->hbq_in_use = 1;
4856         phba->hbqs[LPFC_ELS_HBQ].entry_count =
4857                 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4858         phba->hbq_count = 1;
4859         lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4860         /* Initially populate or replenish the HBQs */
4861         return 0;
4862 }
4863
4864 /**
4865  * lpfc_sli_config_port - Issue config port mailbox command
4866  * @phba: Pointer to HBA context object.
4867  * @sli_mode: sli mode - 2/3
4868  *
4869  * This function is called by the sli initialization code path
4870  * to issue config_port mailbox command. This function restarts the
4871  * HBA firmware and issues a config_port mailbox command to configure
4872  * the SLI interface in the sli mode specified by sli_mode
4873  * variable. The caller is not required to hold any locks.
4874  * The function returns 0 if successful, else returns negative error
4875  * code.
4876  **/
4877 int
4878 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4879 {
4880         LPFC_MBOXQ_t *pmb;
4881         uint32_t resetcount = 0, rc = 0, done = 0;
4882
4883         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4884         if (!pmb) {
4885                 phba->link_state = LPFC_HBA_ERROR;
4886                 return -ENOMEM;
4887         }
4888
4889         phba->sli_rev = sli_mode;
4890         while (resetcount < 2 && !done) {
4891                 spin_lock_irq(&phba->hbalock);
4892                 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4893                 spin_unlock_irq(&phba->hbalock);
4894                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4895                 lpfc_sli_brdrestart(phba);
4896                 rc = lpfc_sli_chipset_init(phba);
4897                 if (rc)
4898                         break;
4899
4900                 spin_lock_irq(&phba->hbalock);
4901                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4902                 spin_unlock_irq(&phba->hbalock);
4903                 resetcount++;
4904
4905                 /* Call pre CONFIG_PORT mailbox command initialization.  A
4906                  * value of 0 means the call was successful.  Any other
4907                  * nonzero value is a failure, but if ERESTART is returned,
4908                  * the driver may reset the HBA and try again.
4909                  */
4910                 rc = lpfc_config_port_prep(phba);
4911                 if (rc == -ERESTART) {
4912                         phba->link_state = LPFC_LINK_UNKNOWN;
4913                         continue;
4914                 } else if (rc)
4915                         break;
4916
4917                 phba->link_state = LPFC_INIT_MBX_CMDS;
4918                 lpfc_config_port(phba, pmb);
4919                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4920                 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4921                                         LPFC_SLI3_HBQ_ENABLED |
4922                                         LPFC_SLI3_CRP_ENABLED |
4923                                         LPFC_SLI3_BG_ENABLED |
4924                                         LPFC_SLI3_DSS_ENABLED);
4925                 if (rc != MBX_SUCCESS) {
4926                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4927                                 "0442 Adapter failed to init, mbxCmd x%x "
4928                                 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4929                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4930                         spin_lock_irq(&phba->hbalock);
4931                         phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4932                         spin_unlock_irq(&phba->hbalock);
4933                         rc = -ENXIO;
4934                 } else {
4935                         /* Allow asynchronous mailbox command to go through */
4936                         spin_lock_irq(&phba->hbalock);
4937                         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4938                         spin_unlock_irq(&phba->hbalock);
4939                         done = 1;
4940
4941                         if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4942                             (pmb->u.mb.un.varCfgPort.gasabt == 0))
4943                                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4944                                         "3110 Port did not grant ASABT\n");
4945                 }
4946         }
4947         if (!done) {
4948                 rc = -EINVAL;
4949                 goto do_prep_failed;
4950         }
4951         if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4952                 if (!pmb->u.mb.un.varCfgPort.cMA) {
4953                         rc = -ENXIO;
4954                         goto do_prep_failed;
4955                 }
4956                 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
4957                         phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
4958                         phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4959                         phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4960                                 phba->max_vpi : phba->max_vports;
4961
4962                 } else
4963                         phba->max_vpi = 0;
4964                 phba->fips_level = 0;
4965                 phba->fips_spec_rev = 0;
4966                 if (pmb->u.mb.un.varCfgPort.gdss) {
4967                         phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
4968                         phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4969                         phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4970                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4971                                         "2850 Security Crypto Active. FIPS x%d "
4972                                         "(Spec Rev: x%d)",
4973                                         phba->fips_level, phba->fips_spec_rev);
4974                 }
4975                 if (pmb->u.mb.un.varCfgPort.sec_err) {
4976                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4977                                         "2856 Config Port Security Crypto "
4978                                         "Error: x%x ",
4979                                         pmb->u.mb.un.varCfgPort.sec_err);
4980                 }
4981                 if (pmb->u.mb.un.varCfgPort.gerbm)
4982                         phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
4983                 if (pmb->u.mb.un.varCfgPort.gcrp)
4984                         phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
4985
4986                 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4987                 phba->port_gp = phba->mbox->us.s3_pgp.port;
4988
4989                 if (phba->cfg_enable_bg) {
4990                         if (pmb->u.mb.un.varCfgPort.gbg)
4991                                 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4992                         else
4993                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4994                                                 "0443 Adapter did not grant "
4995                                                 "BlockGuard\n");
4996                 }
4997         } else {
4998                 phba->hbq_get = NULL;
4999                 phba->port_gp = phba->mbox->us.s2.port;
5000                 phba->max_vpi = 0;
5001         }
5002 do_prep_failed:
5003         mempool_free(pmb, phba->mbox_mem_pool);
5004         return rc;
5005 }
5006
5007
5008 /**
5009  * lpfc_sli_hba_setup - SLI initialization function
5010  * @phba: Pointer to HBA context object.
5011  *
5012  * This function is the main SLI initialization function. This function
5013  * is called by the HBA initialization code, HBA reset code and HBA
5014  * error attention handler code. Caller is not required to hold any
5015  * locks. This function issues config_port mailbox command to configure
5016  * the SLI, setup iocb rings and HBQ rings. In the end the function
5017  * calls the config_port_post function to issue init_link mailbox
5018  * command and to start the discovery. The function will return zero
5019  * if successful, else it will return negative error code.
5020  **/
5021 int
5022 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5023 {
5024         uint32_t rc;
5025         int  mode = 3, i;
5026         int longs;
5027
5028         switch (phba->cfg_sli_mode) {
5029         case 2:
5030                 if (phba->cfg_enable_npiv) {
5031                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5032                                 "1824 NPIV enabled: Override sli_mode "
5033                                 "parameter (%d) to auto (0).\n",
5034                                 phba->cfg_sli_mode);
5035                         break;
5036                 }
5037                 mode = 2;
5038                 break;
5039         case 0:
5040         case 3:
5041                 break;
5042         default:
5043                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5044                                 "1819 Unrecognized sli_mode parameter: %d.\n",
5045                                 phba->cfg_sli_mode);
5046
5047                 break;
5048         }
5049         phba->fcp_embed_io = 0; /* SLI4 FC support only */
5050
5051         rc = lpfc_sli_config_port(phba, mode);
5052
5053         if (rc && phba->cfg_sli_mode == 3)
5054                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5055                                 "1820 Unable to select SLI-3.  "
5056                                 "Not supported by adapter.\n");
5057         if (rc && mode != 2)
5058                 rc = lpfc_sli_config_port(phba, 2);
5059         else if (rc && mode == 2)
5060                 rc = lpfc_sli_config_port(phba, 3);
5061         if (rc)
5062                 goto lpfc_sli_hba_setup_error;
5063
5064         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5065         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5066                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5067                 if (!rc) {
5068                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5069                                         "2709 This device supports "
5070                                         "Advanced Error Reporting (AER)\n");
5071                         spin_lock_irq(&phba->hbalock);
5072                         phba->hba_flag |= HBA_AER_ENABLED;
5073                         spin_unlock_irq(&phba->hbalock);
5074                 } else {
5075                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5076                                         "2708 This device does not support "
5077                                         "Advanced Error Reporting (AER): %d\n",
5078                                         rc);
5079                         phba->cfg_aer_support = 0;
5080                 }
5081         }
5082
5083         if (phba->sli_rev == 3) {
5084                 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5085                 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5086         } else {
5087                 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5088                 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5089                 phba->sli3_options = 0;
5090         }
5091
5092         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5093                         "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5094                         phba->sli_rev, phba->max_vpi);
5095         rc = lpfc_sli_ring_map(phba);
5096
5097         if (rc)
5098                 goto lpfc_sli_hba_setup_error;
5099
5100         /* Initialize VPIs. */
5101         if (phba->sli_rev == LPFC_SLI_REV3) {
5102                 /*
5103                  * The VPI bitmask and physical ID array are allocated
5104                  * and initialized once only - at driver load.  A port
5105                  * reset doesn't need to reinitialize this memory.
5106                  */
5107                 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5108                         longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5109                         phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
5110                                                   GFP_KERNEL);
5111                         if (!phba->vpi_bmask) {
5112                                 rc = -ENOMEM;
5113                                 goto lpfc_sli_hba_setup_error;
5114                         }
5115
5116                         phba->vpi_ids = kzalloc(
5117                                         (phba->max_vpi+1) * sizeof(uint16_t),
5118                                         GFP_KERNEL);
5119                         if (!phba->vpi_ids) {
5120                                 kfree(phba->vpi_bmask);
5121                                 rc = -ENOMEM;
5122                                 goto lpfc_sli_hba_setup_error;
5123                         }
5124                         for (i = 0; i < phba->max_vpi; i++)
5125                                 phba->vpi_ids[i] = i;
5126                 }
5127         }
5128
5129         /* Init HBQs */
5130         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5131                 rc = lpfc_sli_hbq_setup(phba);
5132                 if (rc)
5133                         goto lpfc_sli_hba_setup_error;
5134         }
5135         spin_lock_irq(&phba->hbalock);
5136         phba->sli.sli_flag |= LPFC_PROCESS_LA;
5137         spin_unlock_irq(&phba->hbalock);
5138
5139         rc = lpfc_config_port_post(phba);
5140         if (rc)
5141                 goto lpfc_sli_hba_setup_error;
5142
5143         return rc;
5144
5145 lpfc_sli_hba_setup_error:
5146         phba->link_state = LPFC_HBA_ERROR;
5147         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5148                         "0445 Firmware initialization failed\n");
5149         return rc;
5150 }
5151
5152 /**
5153  * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5154  * @phba: Pointer to HBA context object.
5155  * @mboxq: mailbox pointer.
5156  * This function issue a dump mailbox command to read config region
5157  * 23 and parse the records in the region and populate driver
5158  * data structure.
5159  **/
5160 static int
5161 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5162 {
5163         LPFC_MBOXQ_t *mboxq;
5164         struct lpfc_dmabuf *mp;
5165         struct lpfc_mqe *mqe;
5166         uint32_t data_length;
5167         int rc;
5168
5169         /* Program the default value of vlan_id and fc_map */
5170         phba->valid_vlan = 0;
5171         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5172         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5173         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5174
5175         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5176         if (!mboxq)
5177                 return -ENOMEM;
5178
5179         mqe = &mboxq->u.mqe;
5180         if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5181                 rc = -ENOMEM;
5182                 goto out_free_mboxq;
5183         }
5184
5185         mp = (struct lpfc_dmabuf *) mboxq->context1;
5186         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5187
5188         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5189                         "(%d):2571 Mailbox cmd x%x Status x%x "
5190                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5191                         "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5192                         "CQ: x%x x%x x%x x%x\n",
5193                         mboxq->vport ? mboxq->vport->vpi : 0,
5194                         bf_get(lpfc_mqe_command, mqe),
5195                         bf_get(lpfc_mqe_status, mqe),
5196                         mqe->un.mb_words[0], mqe->un.mb_words[1],
5197                         mqe->un.mb_words[2], mqe->un.mb_words[3],
5198                         mqe->un.mb_words[4], mqe->un.mb_words[5],
5199                         mqe->un.mb_words[6], mqe->un.mb_words[7],
5200                         mqe->un.mb_words[8], mqe->un.mb_words[9],
5201                         mqe->un.mb_words[10], mqe->un.mb_words[11],
5202                         mqe->un.mb_words[12], mqe->un.mb_words[13],
5203                         mqe->un.mb_words[14], mqe->un.mb_words[15],
5204                         mqe->un.mb_words[16], mqe->un.mb_words[50],
5205                         mboxq->mcqe.word0,
5206                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
5207                         mboxq->mcqe.trailer);
5208
5209         if (rc) {
5210                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5211                 kfree(mp);
5212                 rc = -EIO;
5213                 goto out_free_mboxq;
5214         }
5215         data_length = mqe->un.mb_words[5];
5216         if (data_length > DMP_RGN23_SIZE) {
5217                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5218                 kfree(mp);
5219                 rc = -EIO;
5220                 goto out_free_mboxq;
5221         }
5222
5223         lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5224         lpfc_mbuf_free(phba, mp->virt, mp->phys);
5225         kfree(mp);
5226         rc = 0;
5227
5228 out_free_mboxq:
5229         mempool_free(mboxq, phba->mbox_mem_pool);
5230         return rc;
5231 }
5232
5233 /**
5234  * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5235  * @phba: pointer to lpfc hba data structure.
5236  * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5237  * @vpd: pointer to the memory to hold resulting port vpd data.
5238  * @vpd_size: On input, the number of bytes allocated to @vpd.
5239  *            On output, the number of data bytes in @vpd.
5240  *
5241  * This routine executes a READ_REV SLI4 mailbox command.  In
5242  * addition, this routine gets the port vpd data.
5243  *
5244  * Return codes
5245  *      0 - successful
5246  *      -ENOMEM - could not allocated memory.
5247  **/
5248 static int
5249 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5250                     uint8_t *vpd, uint32_t *vpd_size)
5251 {
5252         int rc = 0;
5253         uint32_t dma_size;
5254         struct lpfc_dmabuf *dmabuf;
5255         struct lpfc_mqe *mqe;
5256
5257         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5258         if (!dmabuf)
5259                 return -ENOMEM;
5260
5261         /*
5262          * Get a DMA buffer for the vpd data resulting from the READ_REV
5263          * mailbox command.
5264          */
5265         dma_size = *vpd_size;
5266         dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
5267                                            &dmabuf->phys, GFP_KERNEL);
5268         if (!dmabuf->virt) {
5269                 kfree(dmabuf);
5270                 return -ENOMEM;
5271         }
5272
5273         /*
5274          * The SLI4 implementation of READ_REV conflicts at word1,
5275          * bits 31:16 and SLI4 adds vpd functionality not present
5276          * in SLI3.  This code corrects the conflicts.
5277          */
5278         lpfc_read_rev(phba, mboxq);
5279         mqe = &mboxq->u.mqe;
5280         mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5281         mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5282         mqe->un.read_rev.word1 &= 0x0000FFFF;
5283         bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5284         bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5285
5286         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5287         if (rc) {
5288                 dma_free_coherent(&phba->pcidev->dev, dma_size,
5289                                   dmabuf->virt, dmabuf->phys);
5290                 kfree(dmabuf);
5291                 return -EIO;
5292         }
5293
5294         /*
5295          * The available vpd length cannot be bigger than the
5296          * DMA buffer passed to the port.  Catch the less than
5297          * case and update the caller's size.
5298          */
5299         if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5300                 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5301
5302         memcpy(vpd, dmabuf->virt, *vpd_size);
5303
5304         dma_free_coherent(&phba->pcidev->dev, dma_size,
5305                           dmabuf->virt, dmabuf->phys);
5306         kfree(dmabuf);
5307         return 0;
5308 }
5309
5310 /**
5311  * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5312  * @phba: pointer to lpfc hba data structure.
5313  *
5314  * This routine retrieves SLI4 device physical port name this PCI function
5315  * is attached to.
5316  *
5317  * Return codes
5318  *      0 - successful
5319  *      otherwise - failed to retrieve physical port name
5320  **/
5321 static int
5322 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5323 {
5324         LPFC_MBOXQ_t *mboxq;
5325         struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5326         struct lpfc_controller_attribute *cntl_attr;
5327         struct lpfc_mbx_get_port_name *get_port_name;
5328         void *virtaddr = NULL;
5329         uint32_t alloclen, reqlen;
5330         uint32_t shdr_status, shdr_add_status;
5331         union lpfc_sli4_cfg_shdr *shdr;
5332         char cport_name = 0;
5333         int rc;
5334
5335         /* We assume nothing at this point */
5336         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5337         phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5338
5339         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5340         if (!mboxq)
5341                 return -ENOMEM;
5342         /* obtain link type and link number via READ_CONFIG */
5343         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5344         lpfc_sli4_read_config(phba);
5345         if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5346                 goto retrieve_ppname;
5347
5348         /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5349         reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5350         alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5351                         LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5352                         LPFC_SLI4_MBX_NEMBED);
5353         if (alloclen < reqlen) {
5354                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5355                                 "3084 Allocated DMA memory size (%d) is "
5356                                 "less than the requested DMA memory size "
5357                                 "(%d)\n", alloclen, reqlen);
5358                 rc = -ENOMEM;
5359                 goto out_free_mboxq;
5360         }
5361         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5362         virtaddr = mboxq->sge_array->addr[0];
5363         mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5364         shdr = &mbx_cntl_attr->cfg_shdr;
5365         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5366         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5367         if (shdr_status || shdr_add_status || rc) {
5368                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5369                                 "3085 Mailbox x%x (x%x/x%x) failed, "
5370                                 "rc:x%x, status:x%x, add_status:x%x\n",
5371                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5372                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5373                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5374                                 rc, shdr_status, shdr_add_status);
5375                 rc = -ENXIO;
5376                 goto out_free_mboxq;
5377         }
5378         cntl_attr = &mbx_cntl_attr->cntl_attr;
5379         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5380         phba->sli4_hba.lnk_info.lnk_tp =
5381                 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5382         phba->sli4_hba.lnk_info.lnk_no =
5383                 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5384         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5385                         "3086 lnk_type:%d, lnk_numb:%d\n",
5386                         phba->sli4_hba.lnk_info.lnk_tp,
5387                         phba->sli4_hba.lnk_info.lnk_no);
5388
5389 retrieve_ppname:
5390         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5391                 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5392                 sizeof(struct lpfc_mbx_get_port_name) -
5393                 sizeof(struct lpfc_sli4_cfg_mhdr),
5394                 LPFC_SLI4_MBX_EMBED);
5395         get_port_name = &mboxq->u.mqe.un.get_port_name;
5396         shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5397         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5398         bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5399                 phba->sli4_hba.lnk_info.lnk_tp);
5400         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5401         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5402         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5403         if (shdr_status || shdr_add_status || rc) {
5404                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5405                                 "3087 Mailbox x%x (x%x/x%x) failed: "
5406                                 "rc:x%x, status:x%x, add_status:x%x\n",
5407                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5408                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5409                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5410                                 rc, shdr_status, shdr_add_status);
5411                 rc = -ENXIO;
5412                 goto out_free_mboxq;
5413         }
5414         switch (phba->sli4_hba.lnk_info.lnk_no) {
5415         case LPFC_LINK_NUMBER_0:
5416                 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5417                                 &get_port_name->u.response);
5418                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5419                 break;
5420         case LPFC_LINK_NUMBER_1:
5421                 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5422                                 &get_port_name->u.response);
5423                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5424                 break;
5425         case LPFC_LINK_NUMBER_2:
5426                 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5427                                 &get_port_name->u.response);
5428                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5429                 break;
5430         case LPFC_LINK_NUMBER_3:
5431                 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5432                                 &get_port_name->u.response);
5433                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5434                 break;
5435         default:
5436                 break;
5437         }
5438
5439         if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5440                 phba->Port[0] = cport_name;
5441                 phba->Port[1] = '\0';
5442                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5443                                 "3091 SLI get port name: %s\n", phba->Port);
5444         }
5445
5446 out_free_mboxq:
5447         if (rc != MBX_TIMEOUT) {
5448                 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5449                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
5450                 else
5451                         mempool_free(mboxq, phba->mbox_mem_pool);
5452         }
5453         return rc;
5454 }
5455
5456 /**
5457  * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5458  * @phba: pointer to lpfc hba data structure.
5459  *
5460  * This routine is called to explicitly arm the SLI4 device's completion and
5461  * event queues
5462  **/
5463 static void
5464 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5465 {
5466         int qidx;
5467         struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5468
5469         sli4_hba->sli4_cq_release(sli4_hba->mbx_cq, LPFC_QUEUE_REARM);
5470         sli4_hba->sli4_cq_release(sli4_hba->els_cq, LPFC_QUEUE_REARM);
5471         if (sli4_hba->nvmels_cq)
5472                 sli4_hba->sli4_cq_release(sli4_hba->nvmels_cq,
5473                                                 LPFC_QUEUE_REARM);
5474
5475         if (sli4_hba->fcp_cq)
5476                 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
5477                         sli4_hba->sli4_cq_release(sli4_hba->fcp_cq[qidx],
5478                                                 LPFC_QUEUE_REARM);
5479
5480         if (sli4_hba->nvme_cq)
5481                 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
5482                         sli4_hba->sli4_cq_release(sli4_hba->nvme_cq[qidx],
5483                                                 LPFC_QUEUE_REARM);
5484
5485         if (phba->cfg_fof)
5486                 sli4_hba->sli4_cq_release(sli4_hba->oas_cq, LPFC_QUEUE_REARM);
5487
5488         if (sli4_hba->hba_eq)
5489                 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
5490                         sli4_hba->sli4_eq_release(sli4_hba->hba_eq[qidx],
5491                                                         LPFC_QUEUE_REARM);
5492
5493         if (phba->nvmet_support) {
5494                 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5495                         sli4_hba->sli4_cq_release(
5496                                 sli4_hba->nvmet_cqset[qidx],
5497                                 LPFC_QUEUE_REARM);
5498                 }
5499         }
5500
5501         if (phba->cfg_fof)
5502                 sli4_hba->sli4_eq_release(sli4_hba->fof_eq, LPFC_QUEUE_REARM);
5503 }
5504
5505 /**
5506  * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5507  * @phba: Pointer to HBA context object.
5508  * @type: The resource extent type.
5509  * @extnt_count: buffer to hold port available extent count.
5510  * @extnt_size: buffer to hold element count per extent.
5511  *
5512  * This function calls the port and retrievs the number of available
5513  * extents and their size for a particular extent type.
5514  *
5515  * Returns: 0 if successful.  Nonzero otherwise.
5516  **/
5517 int
5518 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5519                                uint16_t *extnt_count, uint16_t *extnt_size)
5520 {
5521         int rc = 0;
5522         uint32_t length;
5523         uint32_t mbox_tmo;
5524         struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5525         LPFC_MBOXQ_t *mbox;
5526
5527         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5528         if (!mbox)
5529                 return -ENOMEM;
5530
5531         /* Find out how many extents are available for this resource type */
5532         length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5533                   sizeof(struct lpfc_sli4_cfg_mhdr));
5534         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5535                          LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5536                          length, LPFC_SLI4_MBX_EMBED);
5537
5538         /* Send an extents count of 0 - the GET doesn't use it. */
5539         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5540                                         LPFC_SLI4_MBX_EMBED);
5541         if (unlikely(rc)) {
5542                 rc = -EIO;
5543                 goto err_exit;
5544         }
5545
5546         if (!phba->sli4_hba.intr_enable)
5547                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5548         else {
5549                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5550                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5551         }
5552         if (unlikely(rc)) {
5553                 rc = -EIO;
5554                 goto err_exit;
5555         }
5556
5557         rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5558         if (bf_get(lpfc_mbox_hdr_status,
5559                    &rsrc_info->header.cfg_shdr.response)) {
5560                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5561                                 "2930 Failed to get resource extents "
5562                                 "Status 0x%x Add'l Status 0x%x\n",
5563                                 bf_get(lpfc_mbox_hdr_status,
5564                                        &rsrc_info->header.cfg_shdr.response),
5565                                 bf_get(lpfc_mbox_hdr_add_status,
5566                                        &rsrc_info->header.cfg_shdr.response));
5567                 rc = -EIO;
5568                 goto err_exit;
5569         }
5570
5571         *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5572                               &rsrc_info->u.rsp);
5573         *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5574                              &rsrc_info->u.rsp);
5575
5576         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5577                         "3162 Retrieved extents type-%d from port: count:%d, "
5578                         "size:%d\n", type, *extnt_count, *extnt_size);
5579
5580 err_exit:
5581         mempool_free(mbox, phba->mbox_mem_pool);
5582         return rc;
5583 }
5584
5585 /**
5586  * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5587  * @phba: Pointer to HBA context object.
5588  * @type: The extent type to check.
5589  *
5590  * This function reads the current available extents from the port and checks
5591  * if the extent count or extent size has changed since the last access.
5592  * Callers use this routine post port reset to understand if there is a
5593  * extent reprovisioning requirement.
5594  *
5595  * Returns:
5596  *   -Error: error indicates problem.
5597  *   1: Extent count or size has changed.
5598  *   0: No changes.
5599  **/
5600 static int
5601 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5602 {
5603         uint16_t curr_ext_cnt, rsrc_ext_cnt;
5604         uint16_t size_diff, rsrc_ext_size;
5605         int rc = 0;
5606         struct lpfc_rsrc_blks *rsrc_entry;
5607         struct list_head *rsrc_blk_list = NULL;
5608
5609         size_diff = 0;
5610         curr_ext_cnt = 0;
5611         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5612                                             &rsrc_ext_cnt,
5613                                             &rsrc_ext_size);
5614         if (unlikely(rc))
5615                 return -EIO;
5616
5617         switch (type) {
5618         case LPFC_RSC_TYPE_FCOE_RPI:
5619                 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5620                 break;
5621         case LPFC_RSC_TYPE_FCOE_VPI:
5622                 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5623                 break;
5624         case LPFC_RSC_TYPE_FCOE_XRI:
5625                 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5626                 break;
5627         case LPFC_RSC_TYPE_FCOE_VFI:
5628                 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5629                 break;
5630         default:
5631                 break;
5632         }
5633
5634         list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5635                 curr_ext_cnt++;
5636                 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5637                         size_diff++;
5638         }
5639
5640         if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5641                 rc = 1;
5642
5643         return rc;
5644 }
5645
5646 /**
5647  * lpfc_sli4_cfg_post_extnts -
5648  * @phba: Pointer to HBA context object.
5649  * @extnt_cnt - number of available extents.
5650  * @type - the extent type (rpi, xri, vfi, vpi).
5651  * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5652  * @mbox - pointer to the caller's allocated mailbox structure.
5653  *
5654  * This function executes the extents allocation request.  It also
5655  * takes care of the amount of memory needed to allocate or get the
5656  * allocated extents. It is the caller's responsibility to evaluate
5657  * the response.
5658  *
5659  * Returns:
5660  *   -Error:  Error value describes the condition found.
5661  *   0: if successful
5662  **/
5663 static int
5664 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5665                           uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5666 {
5667         int rc = 0;
5668         uint32_t req_len;
5669         uint32_t emb_len;
5670         uint32_t alloc_len, mbox_tmo;
5671
5672         /* Calculate the total requested length of the dma memory */
5673         req_len = extnt_cnt * sizeof(uint16_t);
5674
5675         /*
5676          * Calculate the size of an embedded mailbox.  The uint32_t
5677          * accounts for extents-specific word.
5678          */
5679         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5680                 sizeof(uint32_t);
5681
5682         /*
5683          * Presume the allocation and response will fit into an embedded
5684          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
5685          */
5686         *emb = LPFC_SLI4_MBX_EMBED;
5687         if (req_len > emb_len) {
5688                 req_len = extnt_cnt * sizeof(uint16_t) +
5689                         sizeof(union lpfc_sli4_cfg_shdr) +
5690                         sizeof(uint32_t);
5691                 *emb = LPFC_SLI4_MBX_NEMBED;
5692         }
5693
5694         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5695                                      LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5696                                      req_len, *emb);
5697         if (alloc_len < req_len) {
5698                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5699                         "2982 Allocated DMA memory size (x%x) is "
5700                         "less than the requested DMA memory "
5701                         "size (x%x)\n", alloc_len, req_len);
5702                 return -ENOMEM;
5703         }
5704         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5705         if (unlikely(rc))
5706                 return -EIO;
5707
5708         if (!phba->sli4_hba.intr_enable)
5709                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5710         else {
5711                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5712                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5713         }
5714
5715         if (unlikely(rc))
5716                 rc = -EIO;
5717         return rc;
5718 }
5719
5720 /**
5721  * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5722  * @phba: Pointer to HBA context object.
5723  * @type:  The resource extent type to allocate.
5724  *
5725  * This function allocates the number of elements for the specified
5726  * resource type.
5727  **/
5728 static int
5729 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5730 {
5731         bool emb = false;
5732         uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5733         uint16_t rsrc_id, rsrc_start, j, k;
5734         uint16_t *ids;
5735         int i, rc;
5736         unsigned long longs;
5737         unsigned long *bmask;
5738         struct lpfc_rsrc_blks *rsrc_blks;
5739         LPFC_MBOXQ_t *mbox;
5740         uint32_t length;
5741         struct lpfc_id_range *id_array = NULL;
5742         void *virtaddr = NULL;
5743         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5744         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5745         struct list_head *ext_blk_list;
5746
5747         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5748                                             &rsrc_cnt,
5749                                             &rsrc_size);
5750         if (unlikely(rc))
5751                 return -EIO;
5752
5753         if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5754                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5755                         "3009 No available Resource Extents "
5756                         "for resource type 0x%x: Count: 0x%x, "
5757                         "Size 0x%x\n", type, rsrc_cnt,
5758                         rsrc_size);
5759                 return -ENOMEM;
5760         }
5761
5762         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5763                         "2903 Post resource extents type-0x%x: "
5764                         "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5765
5766         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5767         if (!mbox)
5768                 return -ENOMEM;
5769
5770         rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5771         if (unlikely(rc)) {
5772                 rc = -EIO;
5773                 goto err_exit;
5774         }
5775
5776         /*
5777          * Figure out where the response is located.  Then get local pointers
5778          * to the response data.  The port does not guarantee to respond to
5779          * all extents counts request so update the local variable with the
5780          * allocated count from the port.
5781          */
5782         if (emb == LPFC_SLI4_MBX_EMBED) {
5783                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5784                 id_array = &rsrc_ext->u.rsp.id[0];
5785                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5786         } else {
5787                 virtaddr = mbox->sge_array->addr[0];
5788                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5789                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5790                 id_array = &n_rsrc->id;
5791         }
5792
5793         longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5794         rsrc_id_cnt = rsrc_cnt * rsrc_size;
5795
5796         /*
5797          * Based on the resource size and count, correct the base and max
5798          * resource values.
5799          */
5800         length = sizeof(struct lpfc_rsrc_blks);
5801         switch (type) {
5802         case LPFC_RSC_TYPE_FCOE_RPI:
5803                 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5804                                                    sizeof(unsigned long),
5805                                                    GFP_KERNEL);
5806                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5807                         rc = -ENOMEM;
5808                         goto err_exit;
5809                 }
5810                 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5811                                                  sizeof(uint16_t),
5812                                                  GFP_KERNEL);
5813                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5814                         kfree(phba->sli4_hba.rpi_bmask);
5815                         rc = -ENOMEM;
5816                         goto err_exit;
5817                 }
5818
5819                 /*
5820                  * The next_rpi was initialized with the maximum available
5821                  * count but the port may allocate a smaller number.  Catch
5822                  * that case and update the next_rpi.
5823                  */
5824                 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5825
5826                 /* Initialize local ptrs for common extent processing later. */
5827                 bmask = phba->sli4_hba.rpi_bmask;
5828                 ids = phba->sli4_hba.rpi_ids;
5829                 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5830                 break;
5831         case LPFC_RSC_TYPE_FCOE_VPI:
5832                 phba->vpi_bmask = kzalloc(longs *
5833                                           sizeof(unsigned long),
5834                                           GFP_KERNEL);
5835                 if (unlikely(!phba->vpi_bmask)) {
5836                         rc = -ENOMEM;
5837                         goto err_exit;
5838                 }
5839                 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5840                                          sizeof(uint16_t),
5841                                          GFP_KERNEL);
5842                 if (unlikely(!phba->vpi_ids)) {
5843                         kfree(phba->vpi_bmask);
5844                         rc = -ENOMEM;
5845                         goto err_exit;
5846                 }
5847
5848                 /* Initialize local ptrs for common extent processing later. */
5849                 bmask = phba->vpi_bmask;
5850                 ids = phba->vpi_ids;
5851                 ext_blk_list = &phba->lpfc_vpi_blk_list;
5852                 break;
5853         case LPFC_RSC_TYPE_FCOE_XRI:
5854                 phba->sli4_hba.xri_bmask = kzalloc(longs *
5855                                                    sizeof(unsigned long),
5856                                                    GFP_KERNEL);
5857                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5858                         rc = -ENOMEM;
5859                         goto err_exit;
5860                 }
5861                 phba->sli4_hba.max_cfg_param.xri_used = 0;
5862                 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5863                                                  sizeof(uint16_t),
5864                                                  GFP_KERNEL);
5865                 if (unlikely(!phba->sli4_hba.xri_ids)) {
5866                         kfree(phba->sli4_hba.xri_bmask);
5867                         rc = -ENOMEM;
5868                         goto err_exit;
5869                 }
5870
5871                 /* Initialize local ptrs for common extent processing later. */
5872                 bmask = phba->sli4_hba.xri_bmask;
5873                 ids = phba->sli4_hba.xri_ids;
5874                 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5875                 break;
5876         case LPFC_RSC_TYPE_FCOE_VFI:
5877                 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5878                                                    sizeof(unsigned long),
5879                                                    GFP_KERNEL);
5880                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5881                         rc = -ENOMEM;
5882                         goto err_exit;
5883                 }
5884                 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5885                                                  sizeof(uint16_t),
5886                                                  GFP_KERNEL);
5887                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5888                         kfree(phba->sli4_hba.vfi_bmask);
5889                         rc = -ENOMEM;
5890                         goto err_exit;
5891                 }
5892
5893                 /* Initialize local ptrs for common extent processing later. */
5894                 bmask = phba->sli4_hba.vfi_bmask;
5895                 ids = phba->sli4_hba.vfi_ids;
5896                 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5897                 break;
5898         default:
5899                 /* Unsupported Opcode.  Fail call. */
5900                 id_array = NULL;
5901                 bmask = NULL;
5902                 ids = NULL;
5903                 ext_blk_list = NULL;
5904                 goto err_exit;
5905         }
5906
5907         /*
5908          * Complete initializing the extent configuration with the
5909          * allocated ids assigned to this function.  The bitmask serves
5910          * as an index into the array and manages the available ids.  The
5911          * array just stores the ids communicated to the port via the wqes.
5912          */
5913         for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5914                 if ((i % 2) == 0)
5915                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5916                                          &id_array[k]);
5917                 else
5918                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5919                                          &id_array[k]);
5920
5921                 rsrc_blks = kzalloc(length, GFP_KERNEL);
5922                 if (unlikely(!rsrc_blks)) {
5923                         rc = -ENOMEM;
5924                         kfree(bmask);
5925                         kfree(ids);
5926                         goto err_exit;
5927                 }
5928                 rsrc_blks->rsrc_start = rsrc_id;
5929                 rsrc_blks->rsrc_size = rsrc_size;
5930                 list_add_tail(&rsrc_blks->list, ext_blk_list);
5931                 rsrc_start = rsrc_id;
5932                 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
5933                         phba->sli4_hba.scsi_xri_start = rsrc_start +
5934                                 lpfc_sli4_get_iocb_cnt(phba);
5935                         phba->sli4_hba.nvme_xri_start =
5936                                 phba->sli4_hba.scsi_xri_start +
5937                                 phba->sli4_hba.scsi_xri_max;
5938                 }
5939
5940                 while (rsrc_id < (rsrc_start + rsrc_size)) {
5941                         ids[j] = rsrc_id;
5942                         rsrc_id++;
5943                         j++;
5944                 }
5945                 /* Entire word processed.  Get next word.*/
5946                 if ((i % 2) == 1)
5947                         k++;
5948         }
5949  err_exit:
5950         lpfc_sli4_mbox_cmd_free(phba, mbox);
5951         return rc;
5952 }
5953
5954
5955
5956 /**
5957  * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5958  * @phba: Pointer to HBA context object.
5959  * @type: the extent's type.
5960  *
5961  * This function deallocates all extents of a particular resource type.
5962  * SLI4 does not allow for deallocating a particular extent range.  It
5963  * is the caller's responsibility to release all kernel memory resources.
5964  **/
5965 static int
5966 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5967 {
5968         int rc;
5969         uint32_t length, mbox_tmo = 0;
5970         LPFC_MBOXQ_t *mbox;
5971         struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5972         struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5973
5974         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5975         if (!mbox)
5976                 return -ENOMEM;
5977
5978         /*
5979          * This function sends an embedded mailbox because it only sends the
5980          * the resource type.  All extents of this type are released by the
5981          * port.
5982          */
5983         length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5984                   sizeof(struct lpfc_sli4_cfg_mhdr));
5985         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5986                          LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5987                          length, LPFC_SLI4_MBX_EMBED);
5988
5989         /* Send an extents count of 0 - the dealloc doesn't use it. */
5990         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5991                                         LPFC_SLI4_MBX_EMBED);
5992         if (unlikely(rc)) {
5993                 rc = -EIO;
5994                 goto out_free_mbox;
5995         }
5996         if (!phba->sli4_hba.intr_enable)
5997                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5998         else {
5999                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6000                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6001         }
6002         if (unlikely(rc)) {
6003                 rc = -EIO;
6004                 goto out_free_mbox;
6005         }
6006
6007         dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6008         if (bf_get(lpfc_mbox_hdr_status,
6009                    &dealloc_rsrc->header.cfg_shdr.response)) {
6010                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6011                                 "2919 Failed to release resource extents "
6012                                 "for type %d - Status 0x%x Add'l Status 0x%x. "
6013                                 "Resource memory not released.\n",
6014                                 type,
6015                                 bf_get(lpfc_mbox_hdr_status,
6016                                     &dealloc_rsrc->header.cfg_shdr.response),
6017                                 bf_get(lpfc_mbox_hdr_add_status,
6018                                     &dealloc_rsrc->header.cfg_shdr.response));
6019                 rc = -EIO;
6020                 goto out_free_mbox;
6021         }
6022
6023         /* Release kernel memory resources for the specific type. */
6024         switch (type) {
6025         case LPFC_RSC_TYPE_FCOE_VPI:
6026                 kfree(phba->vpi_bmask);
6027                 kfree(phba->vpi_ids);
6028                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6029                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6030                                     &phba->lpfc_vpi_blk_list, list) {
6031                         list_del_init(&rsrc_blk->list);
6032                         kfree(rsrc_blk);
6033                 }
6034                 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6035                 break;
6036         case LPFC_RSC_TYPE_FCOE_XRI:
6037                 kfree(phba->sli4_hba.xri_bmask);
6038                 kfree(phba->sli4_hba.xri_ids);
6039                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6040                                     &phba->sli4_hba.lpfc_xri_blk_list, list) {
6041                         list_del_init(&rsrc_blk->list);
6042                         kfree(rsrc_blk);
6043                 }
6044                 break;
6045         case LPFC_RSC_TYPE_FCOE_VFI:
6046                 kfree(phba->sli4_hba.vfi_bmask);
6047                 kfree(phba->sli4_hba.vfi_ids);
6048                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6049                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6050                                     &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6051                         list_del_init(&rsrc_blk->list);
6052                         kfree(rsrc_blk);
6053                 }
6054                 break;
6055         case LPFC_RSC_TYPE_FCOE_RPI:
6056                 /* RPI bitmask and physical id array are cleaned up earlier. */
6057                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6058                                     &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6059                         list_del_init(&rsrc_blk->list);
6060                         kfree(rsrc_blk);
6061                 }
6062                 break;
6063         default:
6064                 break;
6065         }
6066
6067         bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6068
6069  out_free_mbox:
6070         mempool_free(mbox, phba->mbox_mem_pool);
6071         return rc;
6072 }
6073
6074 static void
6075 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6076                   uint32_t feature)
6077 {
6078         uint32_t len;
6079
6080         len = sizeof(struct lpfc_mbx_set_feature) -
6081                 sizeof(struct lpfc_sli4_cfg_mhdr);
6082         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6083                          LPFC_MBOX_OPCODE_SET_FEATURES, len,
6084                          LPFC_SLI4_MBX_EMBED);
6085
6086         switch (feature) {
6087         case LPFC_SET_UE_RECOVERY:
6088                 bf_set(lpfc_mbx_set_feature_UER,
6089                        &mbox->u.mqe.un.set_feature, 1);
6090                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6091                 mbox->u.mqe.un.set_feature.param_len = 8;
6092                 break;
6093         case LPFC_SET_MDS_DIAGS:
6094                 bf_set(lpfc_mbx_set_feature_mds,
6095                        &mbox->u.mqe.un.set_feature, 1);
6096                 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6097                        &mbox->u.mqe.un.set_feature, 1);
6098                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6099                 mbox->u.mqe.un.set_feature.param_len = 8;
6100                 break;
6101         }
6102
6103         return;
6104 }
6105
6106 /**
6107  * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6108  * @phba: Pointer to HBA context object.
6109  *
6110  * This function allocates all SLI4 resource identifiers.
6111  **/
6112 int
6113 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6114 {
6115         int i, rc, error = 0;
6116         uint16_t count, base;
6117         unsigned long longs;
6118
6119         if (!phba->sli4_hba.rpi_hdrs_in_use)
6120                 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6121         if (phba->sli4_hba.extents_in_use) {
6122                 /*
6123                  * The port supports resource extents. The XRI, VPI, VFI, RPI
6124                  * resource extent count must be read and allocated before
6125                  * provisioning the resource id arrays.
6126                  */
6127                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6128                     LPFC_IDX_RSRC_RDY) {
6129                         /*
6130                          * Extent-based resources are set - the driver could
6131                          * be in a port reset. Figure out if any corrective
6132                          * actions need to be taken.
6133                          */
6134                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6135                                                  LPFC_RSC_TYPE_FCOE_VFI);
6136                         if (rc != 0)
6137                                 error++;
6138                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6139                                                  LPFC_RSC_TYPE_FCOE_VPI);
6140                         if (rc != 0)
6141                                 error++;
6142                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6143                                                  LPFC_RSC_TYPE_FCOE_XRI);
6144                         if (rc != 0)
6145                                 error++;
6146                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6147                                                  LPFC_RSC_TYPE_FCOE_RPI);
6148                         if (rc != 0)
6149                                 error++;
6150
6151                         /*
6152                          * It's possible that the number of resources
6153                          * provided to this port instance changed between
6154                          * resets.  Detect this condition and reallocate
6155                          * resources.  Otherwise, there is no action.
6156                          */
6157                         if (error) {
6158                                 lpfc_printf_log(phba, KERN_INFO,
6159                                                 LOG_MBOX | LOG_INIT,
6160                                                 "2931 Detected extent resource "
6161                                                 "change.  Reallocating all "
6162                                                 "extents.\n");
6163                                 rc = lpfc_sli4_dealloc_extent(phba,
6164                                                  LPFC_RSC_TYPE_FCOE_VFI);
6165                                 rc = lpfc_sli4_dealloc_extent(phba,
6166                                                  LPFC_RSC_TYPE_FCOE_VPI);
6167                                 rc = lpfc_sli4_dealloc_extent(phba,
6168                                                  LPFC_RSC_TYPE_FCOE_XRI);
6169                                 rc = lpfc_sli4_dealloc_extent(phba,
6170                                                  LPFC_RSC_TYPE_FCOE_RPI);
6171                         } else
6172                                 return 0;
6173                 }
6174
6175                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6176                 if (unlikely(rc))
6177                         goto err_exit;
6178
6179                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6180                 if (unlikely(rc))
6181                         goto err_exit;
6182
6183                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6184                 if (unlikely(rc))
6185                         goto err_exit;
6186
6187                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6188                 if (unlikely(rc))
6189                         goto err_exit;
6190                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6191                        LPFC_IDX_RSRC_RDY);
6192                 return rc;
6193         } else {
6194                 /*
6195                  * The port does not support resource extents.  The XRI, VPI,
6196                  * VFI, RPI resource ids were determined from READ_CONFIG.
6197                  * Just allocate the bitmasks and provision the resource id
6198                  * arrays.  If a port reset is active, the resources don't
6199                  * need any action - just exit.
6200                  */
6201                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6202                     LPFC_IDX_RSRC_RDY) {
6203                         lpfc_sli4_dealloc_resource_identifiers(phba);
6204                         lpfc_sli4_remove_rpis(phba);
6205                 }
6206                 /* RPIs. */
6207                 count = phba->sli4_hba.max_cfg_param.max_rpi;
6208                 if (count <= 0) {
6209                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6210                                         "3279 Invalid provisioning of "
6211                                         "rpi:%d\n", count);
6212                         rc = -EINVAL;
6213                         goto err_exit;
6214                 }
6215                 base = phba->sli4_hba.max_cfg_param.rpi_base;
6216                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6217                 phba->sli4_hba.rpi_bmask = kzalloc(longs *
6218                                                    sizeof(unsigned long),
6219                                                    GFP_KERNEL);
6220                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6221                         rc = -ENOMEM;
6222                         goto err_exit;
6223                 }
6224                 phba->sli4_hba.rpi_ids = kzalloc(count *
6225                                                  sizeof(uint16_t),
6226                                                  GFP_KERNEL);
6227                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6228                         rc = -ENOMEM;
6229                         goto free_rpi_bmask;
6230                 }
6231
6232                 for (i = 0; i < count; i++)
6233                         phba->sli4_hba.rpi_ids[i] = base + i;
6234
6235                 /* VPIs. */
6236                 count = phba->sli4_hba.max_cfg_param.max_vpi;
6237                 if (count <= 0) {
6238                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6239                                         "3280 Invalid provisioning of "
6240                                         "vpi:%d\n", count);
6241                         rc = -EINVAL;
6242                         goto free_rpi_ids;
6243                 }
6244                 base = phba->sli4_hba.max_cfg_param.vpi_base;
6245                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6246                 phba->vpi_bmask = kzalloc(longs *
6247                                           sizeof(unsigned long),
6248                                           GFP_KERNEL);
6249                 if (unlikely(!phba->vpi_bmask)) {
6250                         rc = -ENOMEM;
6251                         goto free_rpi_ids;
6252                 }
6253                 phba->vpi_ids = kzalloc(count *
6254                                         sizeof(uint16_t),
6255                                         GFP_KERNEL);
6256                 if (unlikely(!phba->vpi_ids)) {
6257                         rc = -ENOMEM;
6258                         goto free_vpi_bmask;
6259                 }
6260
6261                 for (i = 0; i < count; i++)
6262                         phba->vpi_ids[i] = base + i;
6263
6264                 /* XRIs. */
6265                 count = phba->sli4_hba.max_cfg_param.max_xri;
6266                 if (count <= 0) {
6267                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6268                                         "3281 Invalid provisioning of "
6269                                         "xri:%d\n", count);
6270                         rc = -EINVAL;
6271                         goto free_vpi_ids;
6272                 }
6273                 base = phba->sli4_hba.max_cfg_param.xri_base;
6274                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6275                 phba->sli4_hba.xri_bmask = kzalloc(longs *
6276                                                    sizeof(unsigned long),
6277                                                    GFP_KERNEL);
6278                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6279                         rc = -ENOMEM;
6280                         goto free_vpi_ids;
6281                 }
6282                 phba->sli4_hba.max_cfg_param.xri_used = 0;
6283                 phba->sli4_hba.xri_ids = kzalloc(count *
6284                                                  sizeof(uint16_t),
6285                                                  GFP_KERNEL);
6286                 if (unlikely(!phba->sli4_hba.xri_ids)) {
6287                         rc = -ENOMEM;
6288                         goto free_xri_bmask;
6289                 }
6290
6291                 for (i = 0; i < count; i++)
6292                         phba->sli4_hba.xri_ids[i] = base + i;
6293
6294                 /* VFIs. */
6295                 count = phba->sli4_hba.max_cfg_param.max_vfi;
6296                 if (count <= 0) {
6297                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6298                                         "3282 Invalid provisioning of "
6299                                         "vfi:%d\n", count);
6300                         rc = -EINVAL;
6301                         goto free_xri_ids;
6302                 }
6303                 base = phba->sli4_hba.max_cfg_param.vfi_base;
6304                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6305                 phba->sli4_hba.vfi_bmask = kzalloc(longs *
6306                                                    sizeof(unsigned long),
6307                                                    GFP_KERNEL);
6308                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6309                         rc = -ENOMEM;
6310                         goto free_xri_ids;
6311                 }
6312                 phba->sli4_hba.vfi_ids = kzalloc(count *
6313                                                  sizeof(uint16_t),
6314                                                  GFP_KERNEL);
6315                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6316                         rc = -ENOMEM;
6317                         goto free_vfi_bmask;
6318                 }
6319
6320                 for (i = 0; i < count; i++)
6321                         phba->sli4_hba.vfi_ids[i] = base + i;
6322
6323                 /*
6324                  * Mark all resources ready.  An HBA reset doesn't need
6325                  * to reset the initialization.
6326                  */
6327                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6328                        LPFC_IDX_RSRC_RDY);
6329                 return 0;
6330         }
6331
6332  free_vfi_bmask:
6333         kfree(phba->sli4_hba.vfi_bmask);
6334         phba->sli4_hba.vfi_bmask = NULL;
6335  free_xri_ids:
6336         kfree(phba->sli4_hba.xri_ids);
6337         phba->sli4_hba.xri_ids = NULL;
6338  free_xri_bmask:
6339         kfree(phba->sli4_hba.xri_bmask);
6340         phba->sli4_hba.xri_bmask = NULL;
6341  free_vpi_ids:
6342         kfree(phba->vpi_ids);
6343         phba->vpi_ids = NULL;
6344  free_vpi_bmask:
6345         kfree(phba->vpi_bmask);
6346         phba->vpi_bmask = NULL;
6347  free_rpi_ids:
6348         kfree(phba->sli4_hba.rpi_ids);
6349         phba->sli4_hba.rpi_ids = NULL;
6350  free_rpi_bmask:
6351         kfree(phba->sli4_hba.rpi_bmask);
6352         phba->sli4_hba.rpi_bmask = NULL;
6353  err_exit:
6354         return rc;
6355 }
6356
6357 /**
6358  * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6359  * @phba: Pointer to HBA context object.
6360  *
6361  * This function allocates the number of elements for the specified
6362  * resource type.
6363  **/
6364 int
6365 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6366 {
6367         if (phba->sli4_hba.extents_in_use) {
6368                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6369                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6370                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6371                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6372         } else {
6373                 kfree(phba->vpi_bmask);
6374                 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6375                 kfree(phba->vpi_ids);
6376                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6377                 kfree(phba->sli4_hba.xri_bmask);
6378                 kfree(phba->sli4_hba.xri_ids);
6379                 kfree(phba->sli4_hba.vfi_bmask);
6380                 kfree(phba->sli4_hba.vfi_ids);
6381                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6382                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6383         }
6384
6385         return 0;
6386 }
6387
6388 /**
6389  * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6390  * @phba: Pointer to HBA context object.
6391  * @type: The resource extent type.
6392  * @extnt_count: buffer to hold port extent count response
6393  * @extnt_size: buffer to hold port extent size response.
6394  *
6395  * This function calls the port to read the host allocated extents
6396  * for a particular type.
6397  **/
6398 int
6399 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6400                                uint16_t *extnt_cnt, uint16_t *extnt_size)
6401 {
6402         bool emb;
6403         int rc = 0;
6404         uint16_t curr_blks = 0;
6405         uint32_t req_len, emb_len;
6406         uint32_t alloc_len, mbox_tmo;
6407         struct list_head *blk_list_head;
6408         struct lpfc_rsrc_blks *rsrc_blk;
6409         LPFC_MBOXQ_t *mbox;
6410         void *virtaddr = NULL;
6411         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6412         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6413         union  lpfc_sli4_cfg_shdr *shdr;
6414
6415         switch (type) {
6416         case LPFC_RSC_TYPE_FCOE_VPI:
6417                 blk_list_head = &phba->lpfc_vpi_blk_list;
6418                 break;
6419         case LPFC_RSC_TYPE_FCOE_XRI:
6420                 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6421                 break;
6422         case LPFC_RSC_TYPE_FCOE_VFI:
6423                 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6424                 break;
6425         case LPFC_RSC_TYPE_FCOE_RPI:
6426                 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6427                 break;
6428         default:
6429                 return -EIO;
6430         }
6431
6432         /* Count the number of extents currently allocatd for this type. */
6433         list_for_each_entry(rsrc_blk, blk_list_head, list) {
6434                 if (curr_blks == 0) {
6435                         /*
6436                          * The GET_ALLOCATED mailbox does not return the size,
6437                          * just the count.  The size should be just the size
6438                          * stored in the current allocated block and all sizes
6439                          * for an extent type are the same so set the return
6440                          * value now.
6441                          */
6442                         *extnt_size = rsrc_blk->rsrc_size;
6443                 }
6444                 curr_blks++;
6445         }
6446
6447         /*
6448          * Calculate the size of an embedded mailbox.  The uint32_t
6449          * accounts for extents-specific word.
6450          */
6451         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6452                 sizeof(uint32_t);
6453
6454         /*
6455          * Presume the allocation and response will fit into an embedded
6456          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
6457          */
6458         emb = LPFC_SLI4_MBX_EMBED;
6459         req_len = emb_len;
6460         if (req_len > emb_len) {
6461                 req_len = curr_blks * sizeof(uint16_t) +
6462                         sizeof(union lpfc_sli4_cfg_shdr) +
6463                         sizeof(uint32_t);
6464                 emb = LPFC_SLI4_MBX_NEMBED;
6465         }
6466
6467         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6468         if (!mbox)
6469                 return -ENOMEM;
6470         memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6471
6472         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6473                                      LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6474                                      req_len, emb);
6475         if (alloc_len < req_len) {
6476                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6477                         "2983 Allocated DMA memory size (x%x) is "
6478                         "less than the requested DMA memory "
6479                         "size (x%x)\n", alloc_len, req_len);
6480                 rc = -ENOMEM;
6481                 goto err_exit;
6482         }
6483         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6484         if (unlikely(rc)) {
6485                 rc = -EIO;
6486                 goto err_exit;
6487         }
6488
6489         if (!phba->sli4_hba.intr_enable)
6490                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6491         else {
6492                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6493                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6494         }
6495
6496         if (unlikely(rc)) {
6497                 rc = -EIO;
6498                 goto err_exit;
6499         }
6500
6501         /*
6502          * Figure out where the response is located.  Then get local pointers
6503          * to the response data.  The port does not guarantee to respond to
6504          * all extents counts request so update the local variable with the
6505          * allocated count from the port.
6506          */
6507         if (emb == LPFC_SLI4_MBX_EMBED) {
6508                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6509                 shdr = &rsrc_ext->header.cfg_shdr;
6510                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6511         } else {
6512                 virtaddr = mbox->sge_array->addr[0];
6513                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6514                 shdr = &n_rsrc->cfg_shdr;
6515                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6516         }
6517
6518         if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6519                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6520                         "2984 Failed to read allocated resources "
6521                         "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6522                         type,
6523                         bf_get(lpfc_mbox_hdr_status, &shdr->response),
6524                         bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6525                 rc = -EIO;
6526                 goto err_exit;
6527         }
6528  err_exit:
6529         lpfc_sli4_mbox_cmd_free(phba, mbox);
6530         return rc;
6531 }
6532
6533 /**
6534  * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
6535  * @phba: pointer to lpfc hba data structure.
6536  * @pring: Pointer to driver SLI ring object.
6537  * @sgl_list: linked link of sgl buffers to post
6538  * @cnt: number of linked list buffers
6539  *
6540  * This routine walks the list of buffers that have been allocated and
6541  * repost them to the port by using SGL block post. This is needed after a
6542  * pci_function_reset/warm_start or start. It attempts to construct blocks
6543  * of buffer sgls which contains contiguous xris and uses the non-embedded
6544  * SGL block post mailbox commands to post them to the port. For single
6545  * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6546  * mailbox command for posting.
6547  *
6548  * Returns: 0 = success, non-zero failure.
6549  **/
6550 static int
6551 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6552                           struct list_head *sgl_list, int cnt)
6553 {
6554         struct lpfc_sglq *sglq_entry = NULL;
6555         struct lpfc_sglq *sglq_entry_next = NULL;
6556         struct lpfc_sglq *sglq_entry_first = NULL;
6557         int status, total_cnt;
6558         int post_cnt = 0, num_posted = 0, block_cnt = 0;
6559         int last_xritag = NO_XRI;
6560         LIST_HEAD(prep_sgl_list);
6561         LIST_HEAD(blck_sgl_list);
6562         LIST_HEAD(allc_sgl_list);
6563         LIST_HEAD(post_sgl_list);
6564         LIST_HEAD(free_sgl_list);
6565
6566         spin_lock_irq(&phba->hbalock);
6567         spin_lock(&phba->sli4_hba.sgl_list_lock);
6568         list_splice_init(sgl_list, &allc_sgl_list);
6569         spin_unlock(&phba->sli4_hba.sgl_list_lock);
6570         spin_unlock_irq(&phba->hbalock);
6571
6572         total_cnt = cnt;
6573         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6574                                  &allc_sgl_list, list) {
6575                 list_del_init(&sglq_entry->list);
6576                 block_cnt++;
6577                 if ((last_xritag != NO_XRI) &&
6578                     (sglq_entry->sli4_xritag != last_xritag + 1)) {
6579                         /* a hole in xri block, form a sgl posting block */
6580                         list_splice_init(&prep_sgl_list, &blck_sgl_list);
6581                         post_cnt = block_cnt - 1;
6582                         /* prepare list for next posting block */
6583                         list_add_tail(&sglq_entry->list, &prep_sgl_list);
6584                         block_cnt = 1;
6585                 } else {
6586                         /* prepare list for next posting block */
6587                         list_add_tail(&sglq_entry->list, &prep_sgl_list);
6588                         /* enough sgls for non-embed sgl mbox command */
6589                         if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6590                                 list_splice_init(&prep_sgl_list,
6591                                                  &blck_sgl_list);
6592                                 post_cnt = block_cnt;
6593                                 block_cnt = 0;
6594                         }
6595                 }
6596                 num_posted++;
6597
6598                 /* keep track of last sgl's xritag */
6599                 last_xritag = sglq_entry->sli4_xritag;
6600
6601                 /* end of repost sgl list condition for buffers */
6602                 if (num_posted == total_cnt) {
6603                         if (post_cnt == 0) {
6604                                 list_splice_init(&prep_sgl_list,
6605                                                  &blck_sgl_list);
6606                                 post_cnt = block_cnt;
6607                         } else if (block_cnt == 1) {
6608                                 status = lpfc_sli4_post_sgl(phba,
6609                                                 sglq_entry->phys, 0,
6610                                                 sglq_entry->sli4_xritag);
6611                                 if (!status) {
6612                                         /* successful, put sgl to posted list */
6613                                         list_add_tail(&sglq_entry->list,
6614                                                       &post_sgl_list);
6615                                 } else {
6616                                         /* Failure, put sgl to free list */
6617                                         lpfc_printf_log(phba, KERN_WARNING,
6618                                                 LOG_SLI,
6619                                                 "3159 Failed to post "
6620                                                 "sgl, xritag:x%x\n",
6621                                                 sglq_entry->sli4_xritag);
6622                                         list_add_tail(&sglq_entry->list,
6623                                                       &free_sgl_list);
6624                                         total_cnt--;
6625                                 }
6626                         }
6627                 }
6628
6629                 /* continue until a nembed page worth of sgls */
6630                 if (post_cnt == 0)
6631                         continue;
6632
6633                 /* post the buffer list sgls as a block */
6634                 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
6635                                                  post_cnt);
6636
6637                 if (!status) {
6638                         /* success, put sgl list to posted sgl list */
6639                         list_splice_init(&blck_sgl_list, &post_sgl_list);
6640                 } else {
6641                         /* Failure, put sgl list to free sgl list */
6642                         sglq_entry_first = list_first_entry(&blck_sgl_list,
6643                                                             struct lpfc_sglq,
6644                                                             list);
6645                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6646                                         "3160 Failed to post sgl-list, "
6647                                         "xritag:x%x-x%x\n",
6648                                         sglq_entry_first->sli4_xritag,
6649                                         (sglq_entry_first->sli4_xritag +
6650                                          post_cnt - 1));
6651                         list_splice_init(&blck_sgl_list, &free_sgl_list);
6652                         total_cnt -= post_cnt;
6653                 }
6654
6655                 /* don't reset xirtag due to hole in xri block */
6656                 if (block_cnt == 0)
6657                         last_xritag = NO_XRI;
6658
6659                 /* reset sgl post count for next round of posting */
6660                 post_cnt = 0;
6661         }
6662
6663         /* free the sgls failed to post */
6664         lpfc_free_sgl_list(phba, &free_sgl_list);
6665
6666         /* push sgls posted to the available list */
6667         if (!list_empty(&post_sgl_list)) {
6668                 spin_lock_irq(&phba->hbalock);
6669                 spin_lock(&phba->sli4_hba.sgl_list_lock);
6670                 list_splice_init(&post_sgl_list, sgl_list);
6671                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6672                 spin_unlock_irq(&phba->hbalock);
6673         } else {
6674                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6675                                 "3161 Failure to post sgl to port.\n");
6676                 return -EIO;
6677         }
6678
6679         /* return the number of XRIs actually posted */
6680         return total_cnt;
6681 }
6682
6683 void
6684 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6685 {
6686         uint32_t len;
6687
6688         len = sizeof(struct lpfc_mbx_set_host_data) -
6689                 sizeof(struct lpfc_sli4_cfg_mhdr);
6690         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6691                          LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
6692                          LPFC_SLI4_MBX_EMBED);
6693
6694         mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
6695         mbox->u.mqe.un.set_host_data.param_len =
6696                                         LPFC_HOST_OS_DRIVER_VERSION_SIZE;
6697         snprintf(mbox->u.mqe.un.set_host_data.data,
6698                  LPFC_HOST_OS_DRIVER_VERSION_SIZE,
6699                  "Linux %s v"LPFC_DRIVER_VERSION,
6700                  (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
6701 }
6702
6703 int
6704 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
6705                     struct lpfc_queue *drq, int count, int idx)
6706 {
6707         int rc, i;
6708         struct lpfc_rqe hrqe;
6709         struct lpfc_rqe drqe;
6710         struct lpfc_rqb *rqbp;
6711         unsigned long flags;
6712         struct rqb_dmabuf *rqb_buffer;
6713         LIST_HEAD(rqb_buf_list);
6714
6715         spin_lock_irqsave(&phba->hbalock, flags);
6716         rqbp = hrq->rqbp;
6717         for (i = 0; i < count; i++) {
6718                 /* IF RQ is already full, don't bother */
6719                 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
6720                         break;
6721                 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
6722                 if (!rqb_buffer)
6723                         break;
6724                 rqb_buffer->hrq = hrq;
6725                 rqb_buffer->drq = drq;
6726                 rqb_buffer->idx = idx;
6727                 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
6728         }
6729         while (!list_empty(&rqb_buf_list)) {
6730                 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
6731                                  hbuf.list);
6732
6733                 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
6734                 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
6735                 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
6736                 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
6737                 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
6738                 if (rc < 0) {
6739                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6740                                         "6421 Cannot post to HRQ %d: %x %x %x "
6741                                         "DRQ %x %x\n",
6742                                         hrq->queue_id,
6743                                         hrq->host_index,
6744                                         hrq->hba_index,
6745                                         hrq->entry_count,
6746                                         drq->host_index,
6747                                         drq->hba_index);
6748                         rqbp->rqb_free_buffer(phba, rqb_buffer);
6749                 } else {
6750                         list_add_tail(&rqb_buffer->hbuf.list,
6751                                       &rqbp->rqb_buffer_list);
6752                         rqbp->buffer_count++;
6753                 }
6754         }
6755         spin_unlock_irqrestore(&phba->hbalock, flags);
6756         return 1;
6757 }
6758
6759 /**
6760  * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
6761  * @phba: Pointer to HBA context object.
6762  *
6763  * This function is the main SLI4 device initialization PCI function. This
6764  * function is called by the HBA initialization code, HBA reset code and
6765  * HBA error attention handler code. Caller is not required to hold any
6766  * locks.
6767  **/
6768 int
6769 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6770 {
6771         int rc, i, cnt;
6772         LPFC_MBOXQ_t *mboxq;
6773         struct lpfc_mqe *mqe;
6774         uint8_t *vpd;
6775         uint32_t vpd_size;
6776         uint32_t ftr_rsp = 0;
6777         struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6778         struct lpfc_vport *vport = phba->pport;
6779         struct lpfc_dmabuf *mp;
6780         struct lpfc_rqb *rqbp;
6781
6782         /* Perform a PCI function reset to start from clean */
6783         rc = lpfc_pci_function_reset(phba);
6784         if (unlikely(rc))
6785                 return -ENODEV;
6786
6787         /* Check the HBA Host Status Register for readyness */
6788         rc = lpfc_sli4_post_status_check(phba);
6789         if (unlikely(rc))
6790                 return -ENODEV;
6791         else {
6792                 spin_lock_irq(&phba->hbalock);
6793                 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6794                 spin_unlock_irq(&phba->hbalock);
6795         }
6796
6797         /*
6798          * Allocate a single mailbox container for initializing the
6799          * port.
6800          */
6801         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6802         if (!mboxq)
6803                 return -ENOMEM;
6804
6805         /* Issue READ_REV to collect vpd and FW information. */
6806         vpd_size = SLI4_PAGE_SIZE;
6807         vpd = kzalloc(vpd_size, GFP_KERNEL);
6808         if (!vpd) {
6809                 rc = -ENOMEM;
6810                 goto out_free_mbox;
6811         }
6812
6813         rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
6814         if (unlikely(rc)) {
6815                 kfree(vpd);
6816                 goto out_free_mbox;
6817         }
6818
6819         mqe = &mboxq->u.mqe;
6820         phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6821         if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
6822                 phba->hba_flag |= HBA_FCOE_MODE;
6823                 phba->fcp_embed_io = 0; /* SLI4 FC support only */
6824         } else {
6825                 phba->hba_flag &= ~HBA_FCOE_MODE;
6826         }
6827
6828         if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6829                 LPFC_DCBX_CEE_MODE)
6830                 phba->hba_flag |= HBA_FIP_SUPPORT;
6831         else
6832                 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6833
6834         phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6835
6836         if (phba->sli_rev != LPFC_SLI_REV4) {
6837                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6838                         "0376 READ_REV Error. SLI Level %d "
6839                         "FCoE enabled %d\n",
6840                         phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
6841                 rc = -EIO;
6842                 kfree(vpd);
6843                 goto out_free_mbox;
6844         }
6845
6846         /*
6847          * Continue initialization with default values even if driver failed
6848          * to read FCoE param config regions, only read parameters if the
6849          * board is FCoE
6850          */
6851         if (phba->hba_flag & HBA_FCOE_MODE &&
6852             lpfc_sli4_read_fcoe_params(phba))
6853                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6854                         "2570 Failed to read FCoE parameters\n");
6855
6856         /*
6857          * Retrieve sli4 device physical port name, failure of doing it
6858          * is considered as non-fatal.
6859          */
6860         rc = lpfc_sli4_retrieve_pport_name(phba);
6861         if (!rc)
6862                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6863                                 "3080 Successful retrieving SLI4 device "
6864                                 "physical port name: %s.\n", phba->Port);
6865
6866         /*
6867          * Evaluate the read rev and vpd data. Populate the driver
6868          * state with the results. If this routine fails, the failure
6869          * is not fatal as the driver will use generic values.
6870          */
6871         rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6872         if (unlikely(!rc)) {
6873                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6874                                 "0377 Error %d parsing vpd. "
6875                                 "Using defaults.\n", rc);
6876                 rc = 0;
6877         }
6878         kfree(vpd);
6879
6880         /* Save information as VPD data */
6881         phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6882         phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6883         phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6884         phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6885                                          &mqe->un.read_rev);
6886         phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6887                                        &mqe->un.read_rev);
6888         phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6889                                             &mqe->un.read_rev);
6890         phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6891                                            &mqe->un.read_rev);
6892         phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6893         memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6894         phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6895         memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6896         phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6897         memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6898         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6899                         "(%d):0380 READ_REV Status x%x "
6900                         "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6901                         mboxq->vport ? mboxq->vport->vpi : 0,
6902                         bf_get(lpfc_mqe_status, mqe),
6903                         phba->vpd.rev.opFwName,
6904                         phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6905                         phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
6906
6907         /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3)  */
6908         rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
6909         if (phba->pport->cfg_lun_queue_depth > rc) {
6910                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6911                                 "3362 LUN queue depth changed from %d to %d\n",
6912                                 phba->pport->cfg_lun_queue_depth, rc);
6913                 phba->pport->cfg_lun_queue_depth = rc;
6914         }
6915
6916         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6917             LPFC_SLI_INTF_IF_TYPE_0) {
6918                 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
6919                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6920                 if (rc == MBX_SUCCESS) {
6921                         phba->hba_flag |= HBA_RECOVERABLE_UE;
6922                         /* Set 1Sec interval to detect UE */
6923                         phba->eratt_poll_interval = 1;
6924                         phba->sli4_hba.ue_to_sr = bf_get(
6925                                         lpfc_mbx_set_feature_UESR,
6926                                         &mboxq->u.mqe.un.set_feature);
6927                         phba->sli4_hba.ue_to_rp = bf_get(
6928                                         lpfc_mbx_set_feature_UERP,
6929                                         &mboxq->u.mqe.un.set_feature);
6930                 }
6931         }
6932
6933         if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
6934                 /* Enable MDS Diagnostics only if the SLI Port supports it */
6935                 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
6936                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6937                 if (rc != MBX_SUCCESS)
6938                         phba->mds_diags_support = 0;
6939         }
6940
6941         /*
6942          * Discover the port's supported feature set and match it against the
6943          * hosts requests.
6944          */
6945         lpfc_request_features(phba, mboxq);
6946         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6947         if (unlikely(rc)) {
6948                 rc = -EIO;
6949                 goto out_free_mbox;
6950         }
6951
6952         /*
6953          * The port must support FCP initiator mode as this is the
6954          * only mode running in the host.
6955          */
6956         if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6957                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6958                                 "0378 No support for fcpi mode.\n");
6959                 ftr_rsp++;
6960         }
6961
6962         /* Performance Hints are ONLY for FCoE */
6963         if (phba->hba_flag & HBA_FCOE_MODE) {
6964                 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6965                         phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6966                 else
6967                         phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
6968         }
6969
6970         /*
6971          * If the port cannot support the host's requested features
6972          * then turn off the global config parameters to disable the
6973          * feature in the driver.  This is not a fatal error.
6974          */
6975         phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6976         if (phba->cfg_enable_bg) {
6977                 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6978                         phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6979                 else
6980                         ftr_rsp++;
6981         }
6982
6983         if (phba->max_vpi && phba->cfg_enable_npiv &&
6984             !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6985                 ftr_rsp++;
6986
6987         if (ftr_rsp) {
6988                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6989                                 "0379 Feature Mismatch Data: x%08x %08x "
6990                                 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6991                                 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6992                                 phba->cfg_enable_npiv, phba->max_vpi);
6993                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6994                         phba->cfg_enable_bg = 0;
6995                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6996                         phba->cfg_enable_npiv = 0;
6997         }
6998
6999         /* These SLI3 features are assumed in SLI4 */
7000         spin_lock_irq(&phba->hbalock);
7001         phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7002         spin_unlock_irq(&phba->hbalock);
7003
7004         /*
7005          * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
7006          * calls depends on these resources to complete port setup.
7007          */
7008         rc = lpfc_sli4_alloc_resource_identifiers(phba);
7009         if (rc) {
7010                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7011                                 "2920 Failed to alloc Resource IDs "
7012                                 "rc = x%x\n", rc);
7013                 goto out_free_mbox;
7014         }
7015
7016         lpfc_set_host_data(phba, mboxq);
7017
7018         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7019         if (rc) {
7020                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7021                                 "2134 Failed to set host os driver version %x",
7022                                 rc);
7023         }
7024
7025         /* Read the port's service parameters. */
7026         rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7027         if (rc) {
7028                 phba->link_state = LPFC_HBA_ERROR;
7029                 rc = -ENOMEM;
7030                 goto out_free_mbox;
7031         }
7032
7033         mboxq->vport = vport;
7034         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7035         mp = (struct lpfc_dmabuf *) mboxq->context1;
7036         if (rc == MBX_SUCCESS) {
7037                 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7038                 rc = 0;
7039         }
7040
7041         /*
7042          * This memory was allocated by the lpfc_read_sparam routine. Release
7043          * it to the mbuf pool.
7044          */
7045         lpfc_mbuf_free(phba, mp->virt, mp->phys);
7046         kfree(mp);
7047         mboxq->context1 = NULL;
7048         if (unlikely(rc)) {
7049                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7050                                 "0382 READ_SPARAM command failed "
7051                                 "status %d, mbxStatus x%x\n",
7052                                 rc, bf_get(lpfc_mqe_status, mqe));
7053                 phba->link_state = LPFC_HBA_ERROR;
7054                 rc = -EIO;
7055                 goto out_free_mbox;
7056         }
7057
7058         lpfc_update_vport_wwn(vport);
7059
7060         /* Update the fc_host data structures with new wwn. */
7061         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7062         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7063
7064         /* Create all the SLI4 queues */
7065         rc = lpfc_sli4_queue_create(phba);
7066         if (rc) {
7067                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7068                                 "3089 Failed to allocate queues\n");
7069                 rc = -ENODEV;
7070                 goto out_free_mbox;
7071         }
7072         /* Set up all the queues to the device */
7073         rc = lpfc_sli4_queue_setup(phba);
7074         if (unlikely(rc)) {
7075                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7076                                 "0381 Error %d during queue setup.\n ", rc);
7077                 goto out_stop_timers;
7078         }
7079         /* Initialize the driver internal SLI layer lists. */
7080         lpfc_sli4_setup(phba);
7081         lpfc_sli4_queue_init(phba);
7082
7083         /* update host els xri-sgl sizes and mappings */
7084         rc = lpfc_sli4_els_sgl_update(phba);
7085         if (unlikely(rc)) {
7086                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7087                                 "1400 Failed to update xri-sgl size and "
7088                                 "mapping: %d\n", rc);
7089                 goto out_destroy_queue;
7090         }
7091
7092         /* register the els sgl pool to the port */
7093         rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7094                                        phba->sli4_hba.els_xri_cnt);
7095         if (unlikely(rc < 0)) {
7096                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7097                                 "0582 Error %d during els sgl post "
7098                                 "operation\n", rc);
7099                 rc = -ENODEV;
7100                 goto out_destroy_queue;
7101         }
7102         phba->sli4_hba.els_xri_cnt = rc;
7103
7104         if (phba->nvmet_support) {
7105                 /* update host nvmet xri-sgl sizes and mappings */
7106                 rc = lpfc_sli4_nvmet_sgl_update(phba);
7107                 if (unlikely(rc)) {
7108                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7109                                         "6308 Failed to update nvmet-sgl size "
7110                                         "and mapping: %d\n", rc);
7111                         goto out_destroy_queue;
7112                 }
7113
7114                 /* register the nvmet sgl pool to the port */
7115                 rc = lpfc_sli4_repost_sgl_list(
7116                         phba,
7117                         &phba->sli4_hba.lpfc_nvmet_sgl_list,
7118                         phba->sli4_hba.nvmet_xri_cnt);
7119                 if (unlikely(rc < 0)) {
7120                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7121                                         "3117 Error %d during nvmet "
7122                                         "sgl post\n", rc);
7123                         rc = -ENODEV;
7124                         goto out_destroy_queue;
7125                 }
7126                 phba->sli4_hba.nvmet_xri_cnt = rc;
7127
7128                 cnt = phba->cfg_iocb_cnt * 1024;
7129                 /* We need 1 iocbq for every SGL, for IO processing */
7130                 cnt += phba->sli4_hba.nvmet_xri_cnt;
7131         } else {
7132                 /* update host scsi xri-sgl sizes and mappings */
7133                 rc = lpfc_sli4_scsi_sgl_update(phba);
7134                 if (unlikely(rc)) {
7135                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7136                                         "6309 Failed to update scsi-sgl size "
7137                                         "and mapping: %d\n", rc);
7138                         goto out_destroy_queue;
7139                 }
7140
7141                 /* update host nvme xri-sgl sizes and mappings */
7142                 rc = lpfc_sli4_nvme_sgl_update(phba);
7143                 if (unlikely(rc)) {
7144                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7145                                         "6082 Failed to update nvme-sgl size "
7146                                         "and mapping: %d\n", rc);
7147                         goto out_destroy_queue;
7148                 }
7149
7150                 cnt = phba->cfg_iocb_cnt * 1024;
7151         }
7152
7153         if (!phba->sli.iocbq_lookup) {
7154                 /* Initialize and populate the iocb list per host */
7155                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7156                                 "2821 initialize iocb list %d total %d\n",
7157                                 phba->cfg_iocb_cnt, cnt);
7158                 rc = lpfc_init_iocb_list(phba, cnt);
7159                 if (rc) {
7160                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7161                                         "1413 Failed to init iocb list.\n");
7162                         goto out_destroy_queue;
7163                 }
7164         }
7165
7166         if (phba->nvmet_support)
7167                 lpfc_nvmet_create_targetport(phba);
7168
7169         if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7170                 /* Post initial buffers to all RQs created */
7171                 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7172                         rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7173                         INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7174                         rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7175                         rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7176                         rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7177                         rqbp->buffer_count = 0;
7178
7179                         lpfc_post_rq_buffer(
7180                                 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7181                                 phba->sli4_hba.nvmet_mrq_data[i],
7182                                 LPFC_NVMET_RQE_DEF_COUNT, i);
7183                 }
7184         }
7185
7186         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
7187                 /* register the allocated scsi sgl pool to the port */
7188                 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
7189                 if (unlikely(rc)) {
7190                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7191                                         "0383 Error %d during scsi sgl post "
7192                                         "operation\n", rc);
7193                         /* Some Scsi buffers were moved to abort scsi list */
7194                         /* A pci function reset will repost them */
7195                         rc = -ENODEV;
7196                         goto out_destroy_queue;
7197                 }
7198         }
7199
7200         if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
7201             (phba->nvmet_support == 0)) {
7202
7203                 /* register the allocated nvme sgl pool to the port */
7204                 rc = lpfc_repost_nvme_sgl_list(phba);
7205                 if (unlikely(rc)) {
7206                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7207                                         "6116 Error %d during nvme sgl post "
7208                                         "operation\n", rc);
7209                         /* Some NVME buffers were moved to abort nvme list */
7210                         /* A pci function reset will repost them */
7211                         rc = -ENODEV;
7212                         goto out_destroy_queue;
7213                 }
7214         }
7215
7216         /* Post the rpi header region to the device. */
7217         rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7218         if (unlikely(rc)) {
7219                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7220                                 "0393 Error %d during rpi post operation\n",
7221                                 rc);
7222                 rc = -ENODEV;
7223                 goto out_destroy_queue;
7224         }
7225         lpfc_sli4_node_prep(phba);
7226
7227         if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7228                 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7229                         /*
7230                          * The FC Port needs to register FCFI (index 0)
7231                          */
7232                         lpfc_reg_fcfi(phba, mboxq);
7233                         mboxq->vport = phba->pport;
7234                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7235                         if (rc != MBX_SUCCESS)
7236                                 goto out_unset_queue;
7237                         rc = 0;
7238                         phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7239                                                 &mboxq->u.mqe.un.reg_fcfi);
7240                 } else {
7241                         /* We are a NVME Target mode with MRQ > 1 */
7242
7243                         /* First register the FCFI */
7244                         lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7245                         mboxq->vport = phba->pport;
7246                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7247                         if (rc != MBX_SUCCESS)
7248                                 goto out_unset_queue;
7249                         rc = 0;
7250                         phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7251                                                 &mboxq->u.mqe.un.reg_fcfi_mrq);
7252
7253                         /* Next register the MRQs */
7254                         lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7255                         mboxq->vport = phba->pport;
7256                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7257                         if (rc != MBX_SUCCESS)
7258                                 goto out_unset_queue;
7259                         rc = 0;
7260                 }
7261                 /* Check if the port is configured to be disabled */
7262                 lpfc_sli_read_link_ste(phba);
7263         }
7264
7265         /* Arm the CQs and then EQs on device */
7266         lpfc_sli4_arm_cqeq_intr(phba);
7267
7268         /* Indicate device interrupt mode */
7269         phba->sli4_hba.intr_enable = 1;
7270
7271         /* Allow asynchronous mailbox command to go through */
7272         spin_lock_irq(&phba->hbalock);
7273         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7274         spin_unlock_irq(&phba->hbalock);
7275
7276         /* Post receive buffers to the device */
7277         lpfc_sli4_rb_setup(phba);
7278
7279         /* Reset HBA FCF states after HBA reset */
7280         phba->fcf.fcf_flag = 0;
7281         phba->fcf.current_rec.flag = 0;
7282
7283         /* Start the ELS watchdog timer */
7284         mod_timer(&vport->els_tmofunc,
7285                   jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7286
7287         /* Start heart beat timer */
7288         mod_timer(&phba->hb_tmofunc,
7289                   jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7290         phba->hb_outstanding = 0;
7291         phba->last_completion_time = jiffies;
7292
7293         /* Start error attention (ERATT) polling timer */
7294         mod_timer(&phba->eratt_poll,
7295                   jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7296
7297         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7298         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7299                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7300                 if (!rc) {
7301                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7302                                         "2829 This device supports "
7303                                         "Advanced Error Reporting (AER)\n");
7304                         spin_lock_irq(&phba->hbalock);
7305                         phba->hba_flag |= HBA_AER_ENABLED;
7306                         spin_unlock_irq(&phba->hbalock);
7307                 } else {
7308                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7309                                         "2830 This device does not support "
7310                                         "Advanced Error Reporting (AER)\n");
7311                         phba->cfg_aer_support = 0;
7312                 }
7313                 rc = 0;
7314         }
7315
7316         /*
7317          * The port is ready, set the host's link state to LINK_DOWN
7318          * in preparation for link interrupts.
7319          */
7320         spin_lock_irq(&phba->hbalock);
7321         phba->link_state = LPFC_LINK_DOWN;
7322         spin_unlock_irq(&phba->hbalock);
7323         if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7324             (phba->hba_flag & LINK_DISABLED)) {
7325                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7326                                 "3103 Adapter Link is disabled.\n");
7327                 lpfc_down_link(phba, mboxq);
7328                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7329                 if (rc != MBX_SUCCESS) {
7330                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7331                                         "3104 Adapter failed to issue "
7332                                         "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7333                         goto out_unset_queue;
7334                 }
7335         } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7336                 /* don't perform init_link on SLI4 FC port loopback test */
7337                 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7338                         rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7339                         if (rc)
7340                                 goto out_unset_queue;
7341                 }
7342         }
7343         mempool_free(mboxq, phba->mbox_mem_pool);
7344         return rc;
7345 out_unset_queue:
7346         /* Unset all the queues set up in this routine when error out */
7347         lpfc_sli4_queue_unset(phba);
7348 out_destroy_queue:
7349         lpfc_free_iocb_list(phba);
7350         lpfc_sli4_queue_destroy(phba);
7351 out_stop_timers:
7352         lpfc_stop_hba_timers(phba);
7353 out_free_mbox:
7354         mempool_free(mboxq, phba->mbox_mem_pool);
7355         return rc;
7356 }
7357
7358 /**
7359  * lpfc_mbox_timeout - Timeout call back function for mbox timer
7360  * @ptr: context object - pointer to hba structure.
7361  *
7362  * This is the callback function for mailbox timer. The mailbox
7363  * timer is armed when a new mailbox command is issued and the timer
7364  * is deleted when the mailbox complete. The function is called by
7365  * the kernel timer code when a mailbox does not complete within
7366  * expected time. This function wakes up the worker thread to
7367  * process the mailbox timeout and returns. All the processing is
7368  * done by the worker thread function lpfc_mbox_timeout_handler.
7369  **/
7370 void
7371 lpfc_mbox_timeout(struct timer_list *t)
7372 {
7373         struct lpfc_hba  *phba = from_timer(phba, t, sli.mbox_tmo);
7374         unsigned long iflag;
7375         uint32_t tmo_posted;
7376
7377         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7378         tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7379         if (!tmo_posted)
7380                 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7381         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7382
7383         if (!tmo_posted)
7384                 lpfc_worker_wake_up(phba);
7385         return;
7386 }
7387
7388 /**
7389  * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7390  *                                    are pending
7391  * @phba: Pointer to HBA context object.
7392  *
7393  * This function checks if any mailbox completions are present on the mailbox
7394  * completion queue.
7395  **/
7396 static bool
7397 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7398 {
7399
7400         uint32_t idx;
7401         struct lpfc_queue *mcq;
7402         struct lpfc_mcqe *mcqe;
7403         bool pending_completions = false;
7404         uint8_t qe_valid;
7405
7406         if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7407                 return false;
7408
7409         /* Check for completions on mailbox completion queue */
7410
7411         mcq = phba->sli4_hba.mbx_cq;
7412         idx = mcq->hba_index;
7413         qe_valid = mcq->qe_valid;
7414         while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe) == qe_valid) {
7415                 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
7416                 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7417                     (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7418                         pending_completions = true;
7419                         break;
7420                 }
7421                 idx = (idx + 1) % mcq->entry_count;
7422                 if (mcq->hba_index == idx)
7423                         break;
7424
7425                 /* if the index wrapped around, toggle the valid bit */
7426                 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7427                         qe_valid = (qe_valid) ? 0 : 1;
7428         }
7429         return pending_completions;
7430
7431 }
7432
7433 /**
7434  * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7435  *                                            that were missed.
7436  * @phba: Pointer to HBA context object.
7437  *
7438  * For sli4, it is possible to miss an interrupt. As such mbox completions
7439  * maybe missed causing erroneous mailbox timeouts to occur. This function
7440  * checks to see if mbox completions are on the mailbox completion queue
7441  * and will process all the completions associated with the eq for the
7442  * mailbox completion queue.
7443  **/
7444 bool
7445 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7446 {
7447         struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
7448         uint32_t eqidx;
7449         struct lpfc_queue *fpeq = NULL;
7450         struct lpfc_eqe *eqe;
7451         bool mbox_pending;
7452
7453         if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7454                 return false;
7455
7456         /* Find the eq associated with the mcq */
7457
7458         if (sli4_hba->hba_eq)
7459                 for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
7460                         if (sli4_hba->hba_eq[eqidx]->queue_id ==
7461                             sli4_hba->mbx_cq->assoc_qid) {
7462                                 fpeq = sli4_hba->hba_eq[eqidx];
7463                                 break;
7464                         }
7465         if (!fpeq)
7466                 return false;
7467
7468         /* Turn off interrupts from this EQ */
7469
7470         sli4_hba->sli4_eq_clr_intr(fpeq);
7471
7472         /* Check to see if a mbox completion is pending */
7473
7474         mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7475
7476         /*
7477          * If a mbox completion is pending, process all the events on EQ
7478          * associated with the mbox completion queue (this could include
7479          * mailbox commands, async events, els commands, receive queue data
7480          * and fcp commands)
7481          */
7482
7483         if (mbox_pending)
7484                 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
7485                         lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
7486                         fpeq->EQ_processed++;
7487                 }
7488
7489         /* Always clear and re-arm the EQ */
7490
7491         sli4_hba->sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
7492
7493         return mbox_pending;
7494
7495 }
7496
7497 /**
7498  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7499  * @phba: Pointer to HBA context object.
7500  *
7501  * This function is called from worker thread when a mailbox command times out.
7502  * The caller is not required to hold any locks. This function will reset the
7503  * HBA and recover all the pending commands.
7504  **/
7505 void
7506 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7507 {
7508         LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
7509         MAILBOX_t *mb = NULL;
7510
7511         struct lpfc_sli *psli = &phba->sli;
7512
7513         /* If the mailbox completed, process the completion and return */
7514         if (lpfc_sli4_process_missed_mbox_completions(phba))
7515                 return;
7516
7517         if (pmbox != NULL)
7518                 mb = &pmbox->u.mb;
7519         /* Check the pmbox pointer first.  There is a race condition
7520          * between the mbox timeout handler getting executed in the
7521          * worklist and the mailbox actually completing. When this
7522          * race condition occurs, the mbox_active will be NULL.
7523          */
7524         spin_lock_irq(&phba->hbalock);
7525         if (pmbox == NULL) {
7526                 lpfc_printf_log(phba, KERN_WARNING,
7527                                 LOG_MBOX | LOG_SLI,
7528                                 "0353 Active Mailbox cleared - mailbox timeout "
7529                                 "exiting\n");
7530                 spin_unlock_irq(&phba->hbalock);
7531                 return;
7532         }
7533
7534         /* Mbox cmd <mbxCommand> timeout */
7535         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7536                         "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
7537                         mb->mbxCommand,
7538                         phba->pport->port_state,
7539                         phba->sli.sli_flag,
7540                         phba->sli.mbox_active);
7541         spin_unlock_irq(&phba->hbalock);
7542
7543         /* Setting state unknown so lpfc_sli_abort_iocb_ring
7544          * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
7545          * it to fail all outstanding SCSI IO.
7546          */
7547         spin_lock_irq(&phba->pport->work_port_lock);
7548         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7549         spin_unlock_irq(&phba->pport->work_port_lock);
7550         spin_lock_irq(&phba->hbalock);
7551         phba->link_state = LPFC_LINK_UNKNOWN;
7552         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7553         spin_unlock_irq(&phba->hbalock);
7554
7555         lpfc_sli_abort_fcp_rings(phba);
7556
7557         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7558                         "0345 Resetting board due to mailbox timeout\n");
7559
7560         /* Reset the HBA device */
7561         lpfc_reset_hba(phba);
7562 }
7563
7564 /**
7565  * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
7566  * @phba: Pointer to HBA context object.
7567  * @pmbox: Pointer to mailbox object.
7568  * @flag: Flag indicating how the mailbox need to be processed.
7569  *
7570  * This function is called by discovery code and HBA management code
7571  * to submit a mailbox command to firmware with SLI-3 interface spec. This
7572  * function gets the hbalock to protect the data structures.
7573  * The mailbox command can be submitted in polling mode, in which case
7574  * this function will wait in a polling loop for the completion of the
7575  * mailbox.
7576  * If the mailbox is submitted in no_wait mode (not polling) the
7577  * function will submit the command and returns immediately without waiting
7578  * for the mailbox completion. The no_wait is supported only when HBA
7579  * is in SLI2/SLI3 mode - interrupts are enabled.
7580  * The SLI interface allows only one mailbox pending at a time. If the
7581  * mailbox is issued in polling mode and there is already a mailbox
7582  * pending, then the function will return an error. If the mailbox is issued
7583  * in NO_WAIT mode and there is a mailbox pending already, the function
7584  * will return MBX_BUSY after queuing the mailbox into mailbox queue.
7585  * The sli layer owns the mailbox object until the completion of mailbox
7586  * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
7587  * return codes the caller owns the mailbox command after the return of
7588  * the function.
7589  **/
7590 static int
7591 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
7592                        uint32_t flag)
7593 {
7594         MAILBOX_t *mbx;
7595         struct lpfc_sli *psli = &phba->sli;
7596         uint32_t status, evtctr;
7597         uint32_t ha_copy, hc_copy;
7598         int i;
7599         unsigned long timeout;
7600         unsigned long drvr_flag = 0;
7601         uint32_t word0, ldata;
7602         void __iomem *to_slim;
7603         int processing_queue = 0;
7604
7605         spin_lock_irqsave(&phba->hbalock, drvr_flag);
7606         if (!pmbox) {
7607                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7608                 /* processing mbox queue from intr_handler */
7609                 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7610                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7611                         return MBX_SUCCESS;
7612                 }
7613                 processing_queue = 1;
7614                 pmbox = lpfc_mbox_get(phba);
7615                 if (!pmbox) {
7616                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7617                         return MBX_SUCCESS;
7618                 }
7619         }
7620
7621         if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
7622                 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
7623                 if(!pmbox->vport) {
7624                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7625                         lpfc_printf_log(phba, KERN_ERR,
7626                                         LOG_MBOX | LOG_VPORT,
7627                                         "1806 Mbox x%x failed. No vport\n",
7628                                         pmbox->u.mb.mbxCommand);
7629                         dump_stack();
7630                         goto out_not_finished;
7631                 }
7632         }
7633
7634         /* If the PCI channel is in offline state, do not post mbox. */
7635         if (unlikely(pci_channel_offline(phba->pcidev))) {
7636                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7637                 goto out_not_finished;
7638         }
7639
7640         /* If HBA has a deferred error attention, fail the iocb. */
7641         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
7642                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7643                 goto out_not_finished;
7644         }
7645
7646         psli = &phba->sli;
7647
7648         mbx = &pmbox->u.mb;
7649         status = MBX_SUCCESS;
7650
7651         if (phba->link_state == LPFC_HBA_ERROR) {
7652                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7653
7654                 /* Mbox command <mbxCommand> cannot issue */
7655                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7656                                 "(%d):0311 Mailbox command x%x cannot "
7657                                 "issue Data: x%x x%x\n",
7658                                 pmbox->vport ? pmbox->vport->vpi : 0,
7659                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7660                 goto out_not_finished;
7661         }
7662
7663         if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
7664                 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
7665                         !(hc_copy & HC_MBINT_ENA)) {
7666                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7667                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7668                                 "(%d):2528 Mailbox command x%x cannot "
7669                                 "issue Data: x%x x%x\n",
7670                                 pmbox->vport ? pmbox->vport->vpi : 0,
7671                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7672                         goto out_not_finished;
7673                 }
7674         }
7675
7676         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7677                 /* Polling for a mbox command when another one is already active
7678                  * is not allowed in SLI. Also, the driver must have established
7679                  * SLI2 mode to queue and process multiple mbox commands.
7680                  */
7681
7682                 if (flag & MBX_POLL) {
7683                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7684
7685                         /* Mbox command <mbxCommand> cannot issue */
7686                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7687                                         "(%d):2529 Mailbox command x%x "
7688                                         "cannot issue Data: x%x x%x\n",
7689                                         pmbox->vport ? pmbox->vport->vpi : 0,
7690                                         pmbox->u.mb.mbxCommand,
7691                                         psli->sli_flag, flag);
7692                         goto out_not_finished;
7693                 }
7694
7695                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
7696                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7697                         /* Mbox command <mbxCommand> cannot issue */
7698                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7699                                         "(%d):2530 Mailbox command x%x "
7700                                         "cannot issue Data: x%x x%x\n",
7701                                         pmbox->vport ? pmbox->vport->vpi : 0,
7702                                         pmbox->u.mb.mbxCommand,
7703                                         psli->sli_flag, flag);
7704                         goto out_not_finished;
7705                 }
7706
7707                 /* Another mailbox command is still being processed, queue this
7708                  * command to be processed later.
7709                  */
7710                 lpfc_mbox_put(phba, pmbox);
7711
7712                 /* Mbox cmd issue - BUSY */
7713                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7714                                 "(%d):0308 Mbox cmd issue - BUSY Data: "
7715                                 "x%x x%x x%x x%x\n",
7716                                 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
7717                                 mbx->mbxCommand,
7718                                 phba->pport ? phba->pport->port_state : 0xff,
7719                                 psli->sli_flag, flag);
7720
7721                 psli->slistat.mbox_busy++;
7722                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7723
7724                 if (pmbox->vport) {
7725                         lpfc_debugfs_disc_trc(pmbox->vport,
7726                                 LPFC_DISC_TRC_MBOX_VPORT,
7727                                 "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
7728                                 (uint32_t)mbx->mbxCommand,
7729                                 mbx->un.varWords[0], mbx->un.varWords[1]);
7730                 }
7731                 else {
7732                         lpfc_debugfs_disc_trc(phba->pport,
7733                                 LPFC_DISC_TRC_MBOX,
7734                                 "MBOX Bsy:        cmd:x%x mb:x%x x%x",
7735                                 (uint32_t)mbx->mbxCommand,
7736                                 mbx->un.varWords[0], mbx->un.varWords[1]);
7737                 }
7738
7739                 return MBX_BUSY;
7740         }
7741
7742         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7743
7744         /* If we are not polling, we MUST be in SLI2 mode */
7745         if (flag != MBX_POLL) {
7746                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
7747                     (mbx->mbxCommand != MBX_KILL_BOARD)) {
7748                         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7749                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7750                         /* Mbox command <mbxCommand> cannot issue */
7751                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7752                                         "(%d):2531 Mailbox command x%x "
7753                                         "cannot issue Data: x%x x%x\n",
7754                                         pmbox->vport ? pmbox->vport->vpi : 0,
7755                                         pmbox->u.mb.mbxCommand,
7756                                         psli->sli_flag, flag);
7757                         goto out_not_finished;
7758                 }
7759                 /* timeout active mbox command */
7760                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7761                                            1000);
7762                 mod_timer(&psli->mbox_tmo, jiffies + timeout);
7763         }
7764
7765         /* Mailbox cmd <cmd> issue */
7766         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7767                         "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
7768                         "x%x\n",
7769                         pmbox->vport ? pmbox->vport->vpi : 0,
7770                         mbx->mbxCommand,
7771                         phba->pport ? phba->pport->port_state : 0xff,
7772                         psli->sli_flag, flag);
7773
7774         if (mbx->mbxCommand != MBX_HEARTBEAT) {
7775                 if (pmbox->vport) {
7776                         lpfc_debugfs_disc_trc(pmbox->vport,
7777                                 LPFC_DISC_TRC_MBOX_VPORT,
7778                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7779                                 (uint32_t)mbx->mbxCommand,
7780                                 mbx->un.varWords[0], mbx->un.varWords[1]);
7781                 }
7782                 else {
7783                         lpfc_debugfs_disc_trc(phba->pport,
7784                                 LPFC_DISC_TRC_MBOX,
7785                                 "MBOX Send:       cmd:x%x mb:x%x x%x",
7786                                 (uint32_t)mbx->mbxCommand,
7787                                 mbx->un.varWords[0], mbx->un.varWords[1]);
7788                 }
7789         }
7790
7791         psli->slistat.mbox_cmd++;
7792         evtctr = psli->slistat.mbox_event;
7793
7794         /* next set own bit for the adapter and copy over command word */
7795         mbx->mbxOwner = OWN_CHIP;
7796
7797         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7798                 /* Populate mbox extension offset word. */
7799                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
7800                         *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7801                                 = (uint8_t *)phba->mbox_ext
7802                                   - (uint8_t *)phba->mbox;
7803                 }
7804
7805                 /* Copy the mailbox extension data */
7806                 if (pmbox->in_ext_byte_len && pmbox->context2) {
7807                         lpfc_sli_pcimem_bcopy(pmbox->context2,
7808                                 (uint8_t *)phba->mbox_ext,
7809                                 pmbox->in_ext_byte_len);
7810                 }
7811                 /* Copy command data to host SLIM area */
7812                 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
7813         } else {
7814                 /* Populate mbox extension offset word. */
7815                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
7816                         *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7817                                 = MAILBOX_HBA_EXT_OFFSET;
7818
7819                 /* Copy the mailbox extension data */
7820                 if (pmbox->in_ext_byte_len && pmbox->context2)
7821                         lpfc_memcpy_to_slim(phba->MBslimaddr +
7822                                 MAILBOX_HBA_EXT_OFFSET,
7823                                 pmbox->context2, pmbox->in_ext_byte_len);
7824
7825                 if (mbx->mbxCommand == MBX_CONFIG_PORT)
7826                         /* copy command data into host mbox for cmpl */
7827                         lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
7828                                               MAILBOX_CMD_SIZE);
7829
7830                 /* First copy mbox command data to HBA SLIM, skip past first
7831                    word */
7832                 to_slim = phba->MBslimaddr + sizeof (uint32_t);
7833                 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
7834                             MAILBOX_CMD_SIZE - sizeof (uint32_t));
7835
7836                 /* Next copy over first word, with mbxOwner set */
7837                 ldata = *((uint32_t *)mbx);
7838                 to_slim = phba->MBslimaddr;
7839                 writel(ldata, to_slim);
7840                 readl(to_slim); /* flush */
7841
7842                 if (mbx->mbxCommand == MBX_CONFIG_PORT)
7843                         /* switch over to host mailbox */
7844                         psli->sli_flag |= LPFC_SLI_ACTIVE;
7845         }
7846
7847         wmb();
7848
7849         switch (flag) {
7850         case MBX_NOWAIT:
7851                 /* Set up reference to mailbox command */
7852                 psli->mbox_active = pmbox;
7853                 /* Interrupt board to do it */
7854                 writel(CA_MBATT, phba->CAregaddr);
7855                 readl(phba->CAregaddr); /* flush */
7856                 /* Don't wait for it to finish, just return */
7857                 break;
7858
7859         case MBX_POLL:
7860                 /* Set up null reference to mailbox command */
7861                 psli->mbox_active = NULL;
7862                 /* Interrupt board to do it */
7863                 writel(CA_MBATT, phba->CAregaddr);
7864                 readl(phba->CAregaddr); /* flush */
7865
7866                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7867                         /* First read mbox status word */
7868                         word0 = *((uint32_t *)phba->mbox);
7869                         word0 = le32_to_cpu(word0);
7870                 } else {
7871                         /* First read mbox status word */
7872                         if (lpfc_readl(phba->MBslimaddr, &word0)) {
7873                                 spin_unlock_irqrestore(&phba->hbalock,
7874                                                        drvr_flag);
7875                                 goto out_not_finished;
7876                         }
7877                 }
7878
7879                 /* Read the HBA Host Attention Register */
7880                 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7881                         spin_unlock_irqrestore(&phba->hbalock,
7882                                                        drvr_flag);
7883                         goto out_not_finished;
7884                 }
7885                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7886                                                         1000) + jiffies;
7887                 i = 0;
7888                 /* Wait for command to complete */
7889                 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
7890                        (!(ha_copy & HA_MBATT) &&
7891                         (phba->link_state > LPFC_WARM_START))) {
7892                         if (time_after(jiffies, timeout)) {
7893                                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7894                                 spin_unlock_irqrestore(&phba->hbalock,
7895                                                        drvr_flag);
7896                                 goto out_not_finished;
7897                         }
7898
7899                         /* Check if we took a mbox interrupt while we were
7900                            polling */
7901                         if (((word0 & OWN_CHIP) != OWN_CHIP)
7902                             && (evtctr != psli->slistat.mbox_event))
7903                                 break;
7904
7905                         if (i++ > 10) {
7906                                 spin_unlock_irqrestore(&phba->hbalock,
7907                                                        drvr_flag);
7908                                 msleep(1);
7909                                 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7910                         }
7911
7912                         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7913                                 /* First copy command data */
7914                                 word0 = *((uint32_t *)phba->mbox);
7915                                 word0 = le32_to_cpu(word0);
7916                                 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7917                                         MAILBOX_t *slimmb;
7918                                         uint32_t slimword0;
7919                                         /* Check real SLIM for any errors */
7920                                         slimword0 = readl(phba->MBslimaddr);
7921                                         slimmb = (MAILBOX_t *) & slimword0;
7922                                         if (((slimword0 & OWN_CHIP) != OWN_CHIP)
7923                                             && slimmb->mbxStatus) {
7924                                                 psli->sli_flag &=
7925                                                     ~LPFC_SLI_ACTIVE;
7926                                                 word0 = slimword0;
7927                                         }
7928                                 }
7929                         } else {
7930                                 /* First copy command data */
7931                                 word0 = readl(phba->MBslimaddr);
7932                         }
7933                         /* Read the HBA Host Attention Register */
7934                         if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7935                                 spin_unlock_irqrestore(&phba->hbalock,
7936                                                        drvr_flag);
7937                                 goto out_not_finished;
7938                         }
7939                 }
7940
7941                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7942                         /* copy results back to user */
7943                         lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
7944                                                 MAILBOX_CMD_SIZE);
7945                         /* Copy the mailbox extension data */
7946                         if (pmbox->out_ext_byte_len && pmbox->context2) {
7947                                 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
7948                                                       pmbox->context2,
7949                                                       pmbox->out_ext_byte_len);
7950                         }
7951                 } else {
7952                         /* First copy command data */
7953                         lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
7954                                                 MAILBOX_CMD_SIZE);
7955                         /* Copy the mailbox extension data */
7956                         if (pmbox->out_ext_byte_len && pmbox->context2) {
7957                                 lpfc_memcpy_from_slim(pmbox->context2,
7958                                         phba->MBslimaddr +
7959                                         MAILBOX_HBA_EXT_OFFSET,
7960                                         pmbox->out_ext_byte_len);
7961                         }
7962                 }
7963
7964                 writel(HA_MBATT, phba->HAregaddr);
7965                 readl(phba->HAregaddr); /* flush */
7966
7967                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7968                 status = mbx->mbxStatus;
7969         }
7970
7971         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7972         return status;
7973
7974 out_not_finished:
7975         if (processing_queue) {
7976                 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
7977                 lpfc_mbox_cmpl_put(phba, pmbox);
7978         }
7979         return MBX_NOT_FINISHED;
7980 }
7981
7982 /**
7983  * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
7984  * @phba: Pointer to HBA context object.
7985  *
7986  * The function blocks the posting of SLI4 asynchronous mailbox commands from
7987  * the driver internal pending mailbox queue. It will then try to wait out the
7988  * possible outstanding mailbox command before return.
7989  *
7990  * Returns:
7991  *      0 - the outstanding mailbox command completed; otherwise, the wait for
7992  *      the outstanding mailbox command timed out.
7993  **/
7994 static int
7995 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
7996 {
7997         struct lpfc_sli *psli = &phba->sli;
7998         int rc = 0;
7999         unsigned long timeout = 0;
8000
8001         /* Mark the asynchronous mailbox command posting as blocked */
8002         spin_lock_irq(&phba->hbalock);
8003         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8004         /* Determine how long we might wait for the active mailbox
8005          * command to be gracefully completed by firmware.
8006          */
8007         if (phba->sli.mbox_active)
8008                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8009                                                 phba->sli.mbox_active) *
8010                                                 1000) + jiffies;
8011         spin_unlock_irq(&phba->hbalock);
8012
8013         /* Make sure the mailbox is really active */
8014         if (timeout)
8015                 lpfc_sli4_process_missed_mbox_completions(phba);
8016
8017         /* Wait for the outstnading mailbox command to complete */
8018         while (phba->sli.mbox_active) {
8019                 /* Check active mailbox complete status every 2ms */
8020                 msleep(2);
8021                 if (time_after(jiffies, timeout)) {
8022                         /* Timeout, marked the outstanding cmd not complete */
8023                         rc = 1;
8024                         break;
8025                 }
8026         }
8027
8028         /* Can not cleanly block async mailbox command, fails it */
8029         if (rc) {
8030                 spin_lock_irq(&phba->hbalock);
8031                 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8032                 spin_unlock_irq(&phba->hbalock);
8033         }
8034         return rc;
8035 }
8036
8037 /**
8038  * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8039  * @phba: Pointer to HBA context object.
8040  *
8041  * The function unblocks and resume posting of SLI4 asynchronous mailbox
8042  * commands from the driver internal pending mailbox queue. It makes sure
8043  * that there is no outstanding mailbox command before resuming posting
8044  * asynchronous mailbox commands. If, for any reason, there is outstanding
8045  * mailbox command, it will try to wait it out before resuming asynchronous
8046  * mailbox command posting.
8047  **/
8048 static void
8049 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8050 {
8051         struct lpfc_sli *psli = &phba->sli;
8052
8053         spin_lock_irq(&phba->hbalock);
8054         if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8055                 /* Asynchronous mailbox posting is not blocked, do nothing */
8056                 spin_unlock_irq(&phba->hbalock);
8057                 return;
8058         }
8059
8060         /* Outstanding synchronous mailbox command is guaranteed to be done,
8061          * successful or timeout, after timing-out the outstanding mailbox
8062          * command shall always be removed, so just unblock posting async
8063          * mailbox command and resume
8064          */
8065         psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8066         spin_unlock_irq(&phba->hbalock);
8067
8068         /* wake up worker thread to post asynchronlous mailbox command */
8069         lpfc_worker_wake_up(phba);
8070 }
8071
8072 /**
8073  * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8074  * @phba: Pointer to HBA context object.
8075  * @mboxq: Pointer to mailbox object.
8076  *
8077  * The function waits for the bootstrap mailbox register ready bit from
8078  * port for twice the regular mailbox command timeout value.
8079  *
8080  *      0 - no timeout on waiting for bootstrap mailbox register ready.
8081  *      MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8082  **/
8083 static int
8084 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8085 {
8086         uint32_t db_ready;
8087         unsigned long timeout;
8088         struct lpfc_register bmbx_reg;
8089
8090         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8091                                    * 1000) + jiffies;
8092
8093         do {
8094                 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8095                 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8096                 if (!db_ready)
8097                         msleep(2);
8098
8099                 if (time_after(jiffies, timeout))
8100                         return MBXERR_ERROR;
8101         } while (!db_ready);
8102
8103         return 0;
8104 }
8105
8106 /**
8107  * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8108  * @phba: Pointer to HBA context object.
8109  * @mboxq: Pointer to mailbox object.
8110  *
8111  * The function posts a mailbox to the port.  The mailbox is expected
8112  * to be comletely filled in and ready for the port to operate on it.
8113  * This routine executes a synchronous completion operation on the
8114  * mailbox by polling for its completion.
8115  *
8116  * The caller must not be holding any locks when calling this routine.
8117  *
8118  * Returns:
8119  *      MBX_SUCCESS - mailbox posted successfully
8120  *      Any of the MBX error values.
8121  **/
8122 static int
8123 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8124 {
8125         int rc = MBX_SUCCESS;
8126         unsigned long iflag;
8127         uint32_t mcqe_status;
8128         uint32_t mbx_cmnd;
8129         struct lpfc_sli *psli = &phba->sli;
8130         struct lpfc_mqe *mb = &mboxq->u.mqe;
8131         struct lpfc_bmbx_create *mbox_rgn;
8132         struct dma_address *dma_address;
8133
8134         /*
8135          * Only one mailbox can be active to the bootstrap mailbox region
8136          * at a time and there is no queueing provided.
8137          */
8138         spin_lock_irqsave(&phba->hbalock, iflag);
8139         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8140                 spin_unlock_irqrestore(&phba->hbalock, iflag);
8141                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8142                                 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8143                                 "cannot issue Data: x%x x%x\n",
8144                                 mboxq->vport ? mboxq->vport->vpi : 0,
8145                                 mboxq->u.mb.mbxCommand,
8146                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8147                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8148                                 psli->sli_flag, MBX_POLL);
8149                 return MBXERR_ERROR;
8150         }
8151         /* The server grabs the token and owns it until release */
8152         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8153         phba->sli.mbox_active = mboxq;
8154         spin_unlock_irqrestore(&phba->hbalock, iflag);
8155
8156         /* wait for bootstrap mbox register for readyness */
8157         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8158         if (rc)
8159                 goto exit;
8160
8161         /*
8162          * Initialize the bootstrap memory region to avoid stale data areas
8163          * in the mailbox post.  Then copy the caller's mailbox contents to
8164          * the bmbx mailbox region.
8165          */
8166         mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8167         memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8168         lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8169                               sizeof(struct lpfc_mqe));
8170
8171         /* Post the high mailbox dma address to the port and wait for ready. */
8172         dma_address = &phba->sli4_hba.bmbx.dma_address;
8173         writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8174
8175         /* wait for bootstrap mbox register for hi-address write done */
8176         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8177         if (rc)
8178                 goto exit;
8179
8180         /* Post the low mailbox dma address to the port. */
8181         writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8182
8183         /* wait for bootstrap mbox register for low address write done */
8184         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8185         if (rc)
8186                 goto exit;
8187
8188         /*
8189          * Read the CQ to ensure the mailbox has completed.
8190          * If so, update the mailbox status so that the upper layers
8191          * can complete the request normally.
8192          */
8193         lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8194                               sizeof(struct lpfc_mqe));
8195         mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8196         lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8197                               sizeof(struct lpfc_mcqe));
8198         mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8199         /*
8200          * When the CQE status indicates a failure and the mailbox status
8201          * indicates success then copy the CQE status into the mailbox status
8202          * (and prefix it with x4000).
8203          */
8204         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8205                 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8206                         bf_set(lpfc_mqe_status, mb,
8207                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
8208                 rc = MBXERR_ERROR;
8209         } else
8210                 lpfc_sli4_swap_str(phba, mboxq);
8211
8212         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8213                         "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8214                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8215                         " x%x x%x CQ: x%x x%x x%x x%x\n",
8216                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8217                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8218                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8219                         bf_get(lpfc_mqe_status, mb),
8220                         mb->un.mb_words[0], mb->un.mb_words[1],
8221                         mb->un.mb_words[2], mb->un.mb_words[3],
8222                         mb->un.mb_words[4], mb->un.mb_words[5],
8223                         mb->un.mb_words[6], mb->un.mb_words[7],
8224                         mb->un.mb_words[8], mb->un.mb_words[9],
8225                         mb->un.mb_words[10], mb->un.mb_words[11],
8226                         mb->un.mb_words[12], mboxq->mcqe.word0,
8227                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
8228                         mboxq->mcqe.trailer);
8229 exit:
8230         /* We are holding the token, no needed for lock when release */
8231         spin_lock_irqsave(&phba->hbalock, iflag);
8232         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8233         phba->sli.mbox_active = NULL;
8234         spin_unlock_irqrestore(&phba->hbalock, iflag);
8235         return rc;
8236 }
8237
8238 /**
8239  * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8240  * @phba: Pointer to HBA context object.
8241  * @pmbox: Pointer to mailbox object.
8242  * @flag: Flag indicating how the mailbox need to be processed.
8243  *
8244  * This function is called by discovery code and HBA management code to submit
8245  * a mailbox command to firmware with SLI-4 interface spec.
8246  *
8247  * Return codes the caller owns the mailbox command after the return of the
8248  * function.
8249  **/
8250 static int
8251 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8252                        uint32_t flag)
8253 {
8254         struct lpfc_sli *psli = &phba->sli;
8255         unsigned long iflags;
8256         int rc;
8257
8258         /* dump from issue mailbox command if setup */
8259         lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8260
8261         rc = lpfc_mbox_dev_check(phba);
8262         if (unlikely(rc)) {
8263                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8264                                 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8265                                 "cannot issue Data: x%x x%x\n",
8266                                 mboxq->vport ? mboxq->vport->vpi : 0,
8267                                 mboxq->u.mb.mbxCommand,
8268                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8269                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8270                                 psli->sli_flag, flag);
8271                 goto out_not_finished;
8272         }
8273
8274         /* Detect polling mode and jump to a handler */
8275         if (!phba->sli4_hba.intr_enable) {
8276                 if (flag == MBX_POLL)
8277                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8278                 else
8279                         rc = -EIO;
8280                 if (rc != MBX_SUCCESS)
8281                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8282                                         "(%d):2541 Mailbox command x%x "
8283                                         "(x%x/x%x) failure: "
8284                                         "mqe_sta: x%x mcqe_sta: x%x/x%x "
8285                                         "Data: x%x x%x\n,",
8286                                         mboxq->vport ? mboxq->vport->vpi : 0,
8287                                         mboxq->u.mb.mbxCommand,
8288                                         lpfc_sli_config_mbox_subsys_get(phba,
8289                                                                         mboxq),
8290                                         lpfc_sli_config_mbox_opcode_get(phba,
8291                                                                         mboxq),
8292                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8293                                         bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8294                                         bf_get(lpfc_mcqe_ext_status,
8295                                                &mboxq->mcqe),
8296                                         psli->sli_flag, flag);
8297                 return rc;
8298         } else if (flag == MBX_POLL) {
8299                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8300                                 "(%d):2542 Try to issue mailbox command "
8301                                 "x%x (x%x/x%x) synchronously ahead of async "
8302                                 "mailbox command queue: x%x x%x\n",
8303                                 mboxq->vport ? mboxq->vport->vpi : 0,
8304                                 mboxq->u.mb.mbxCommand,
8305                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8306                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8307                                 psli->sli_flag, flag);
8308                 /* Try to block the asynchronous mailbox posting */
8309                 rc = lpfc_sli4_async_mbox_block(phba);
8310                 if (!rc) {
8311                         /* Successfully blocked, now issue sync mbox cmd */
8312                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8313                         if (rc != MBX_SUCCESS)
8314                                 lpfc_printf_log(phba, KERN_WARNING,
8315                                         LOG_MBOX | LOG_SLI,
8316                                         "(%d):2597 Sync Mailbox command "
8317                                         "x%x (x%x/x%x) failure: "
8318                                         "mqe_sta: x%x mcqe_sta: x%x/x%x "
8319                                         "Data: x%x x%x\n,",
8320                                         mboxq->vport ? mboxq->vport->vpi : 0,
8321                                         mboxq->u.mb.mbxCommand,
8322                                         lpfc_sli_config_mbox_subsys_get(phba,
8323                                                                         mboxq),
8324                                         lpfc_sli_config_mbox_opcode_get(phba,
8325                                                                         mboxq),
8326                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8327                                         bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8328                                         bf_get(lpfc_mcqe_ext_status,
8329                                                &mboxq->mcqe),
8330                                         psli->sli_flag, flag);
8331                         /* Unblock the async mailbox posting afterward */
8332                         lpfc_sli4_async_mbox_unblock(phba);
8333                 }
8334                 return rc;
8335         }
8336
8337         /* Now, interrupt mode asynchrous mailbox command */
8338         rc = lpfc_mbox_cmd_check(phba, mboxq);
8339         if (rc) {
8340                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8341                                 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8342                                 "cannot issue Data: x%x x%x\n",
8343                                 mboxq->vport ? mboxq->vport->vpi : 0,
8344                                 mboxq->u.mb.mbxCommand,
8345                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8346                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8347                                 psli->sli_flag, flag);
8348                 goto out_not_finished;
8349         }
8350
8351         /* Put the mailbox command to the driver internal FIFO */
8352         psli->slistat.mbox_busy++;
8353         spin_lock_irqsave(&phba->hbalock, iflags);
8354         lpfc_mbox_put(phba, mboxq);
8355         spin_unlock_irqrestore(&phba->hbalock, iflags);
8356         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8357                         "(%d):0354 Mbox cmd issue - Enqueue Data: "
8358                         "x%x (x%x/x%x) x%x x%x x%x\n",
8359                         mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8360                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8361                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8362                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8363                         phba->pport->port_state,
8364                         psli->sli_flag, MBX_NOWAIT);
8365         /* Wake up worker thread to transport mailbox command from head */
8366         lpfc_worker_wake_up(phba);
8367
8368         return MBX_BUSY;
8369
8370 out_not_finished:
8371         return MBX_NOT_FINISHED;
8372 }
8373
8374 /**
8375  * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8376  * @phba: Pointer to HBA context object.
8377  *
8378  * This function is called by worker thread to send a mailbox command to
8379  * SLI4 HBA firmware.
8380  *
8381  **/
8382 int
8383 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8384 {
8385         struct lpfc_sli *psli = &phba->sli;
8386         LPFC_MBOXQ_t *mboxq;
8387         int rc = MBX_SUCCESS;
8388         unsigned long iflags;
8389         struct lpfc_mqe *mqe;
8390         uint32_t mbx_cmnd;
8391
8392         /* Check interrupt mode before post async mailbox command */
8393         if (unlikely(!phba->sli4_hba.intr_enable))
8394                 return MBX_NOT_FINISHED;
8395
8396         /* Check for mailbox command service token */
8397         spin_lock_irqsave(&phba->hbalock, iflags);
8398         if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8399                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8400                 return MBX_NOT_FINISHED;
8401         }
8402         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8403                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8404                 return MBX_NOT_FINISHED;
8405         }
8406         if (unlikely(phba->sli.mbox_active)) {
8407                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8408                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8409                                 "0384 There is pending active mailbox cmd\n");
8410                 return MBX_NOT_FINISHED;
8411         }
8412         /* Take the mailbox command service token */
8413         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8414
8415         /* Get the next mailbox command from head of queue */
8416         mboxq = lpfc_mbox_get(phba);
8417
8418         /* If no more mailbox command waiting for post, we're done */
8419         if (!mboxq) {
8420                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8421                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8422                 return MBX_SUCCESS;
8423         }
8424         phba->sli.mbox_active = mboxq;
8425         spin_unlock_irqrestore(&phba->hbalock, iflags);
8426
8427         /* Check device readiness for posting mailbox command */
8428         rc = lpfc_mbox_dev_check(phba);
8429         if (unlikely(rc))
8430                 /* Driver clean routine will clean up pending mailbox */
8431                 goto out_not_finished;
8432
8433         /* Prepare the mbox command to be posted */
8434         mqe = &mboxq->u.mqe;
8435         mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8436
8437         /* Start timer for the mbox_tmo and log some mailbox post messages */
8438         mod_timer(&psli->mbox_tmo, (jiffies +
8439                   msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
8440
8441         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8442                         "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
8443                         "x%x x%x\n",
8444                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8445                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8446                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8447                         phba->pport->port_state, psli->sli_flag);
8448
8449         if (mbx_cmnd != MBX_HEARTBEAT) {
8450                 if (mboxq->vport) {
8451                         lpfc_debugfs_disc_trc(mboxq->vport,
8452                                 LPFC_DISC_TRC_MBOX_VPORT,
8453                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8454                                 mbx_cmnd, mqe->un.mb_words[0],
8455                                 mqe->un.mb_words[1]);
8456                 } else {
8457                         lpfc_debugfs_disc_trc(phba->pport,
8458                                 LPFC_DISC_TRC_MBOX,
8459                                 "MBOX Send: cmd:x%x mb:x%x x%x",
8460                                 mbx_cmnd, mqe->un.mb_words[0],
8461                                 mqe->un.mb_words[1]);
8462                 }
8463         }
8464         psli->slistat.mbox_cmd++;
8465
8466         /* Post the mailbox command to the port */
8467         rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8468         if (rc != MBX_SUCCESS) {
8469                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8470                                 "(%d):2533 Mailbox command x%x (x%x/x%x) "
8471                                 "cannot issue Data: x%x x%x\n",
8472                                 mboxq->vport ? mboxq->vport->vpi : 0,
8473                                 mboxq->u.mb.mbxCommand,
8474                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8475                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8476                                 psli->sli_flag, MBX_NOWAIT);
8477                 goto out_not_finished;
8478         }
8479
8480         return rc;
8481
8482 out_not_finished:
8483         spin_lock_irqsave(&phba->hbalock, iflags);
8484         if (phba->sli.mbox_active) {
8485                 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8486                 __lpfc_mbox_cmpl_put(phba, mboxq);
8487                 /* Release the token */
8488                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8489                 phba->sli.mbox_active = NULL;
8490         }
8491         spin_unlock_irqrestore(&phba->hbalock, iflags);
8492
8493         return MBX_NOT_FINISHED;
8494 }
8495
8496 /**
8497  * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8498  * @phba: Pointer to HBA context object.
8499  * @pmbox: Pointer to mailbox object.
8500  * @flag: Flag indicating how the mailbox need to be processed.
8501  *
8502  * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8503  * the API jump table function pointer from the lpfc_hba struct.
8504  *
8505  * Return codes the caller owns the mailbox command after the return of the
8506  * function.
8507  **/
8508 int
8509 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8510 {
8511         return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8512 }
8513
8514 /**
8515  * lpfc_mbox_api_table_setup - Set up mbox api function jump table
8516  * @phba: The hba struct for which this call is being executed.
8517  * @dev_grp: The HBA PCI-Device group number.
8518  *
8519  * This routine sets up the mbox interface API function jump table in @phba
8520  * struct.
8521  * Returns: 0 - success, -ENODEV - failure.
8522  **/
8523 int
8524 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8525 {
8526
8527         switch (dev_grp) {
8528         case LPFC_PCI_DEV_LP:
8529                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8530                 phba->lpfc_sli_handle_slow_ring_event =
8531                                 lpfc_sli_handle_slow_ring_event_s3;
8532                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8533                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8534                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8535                 break;
8536         case LPFC_PCI_DEV_OC:
8537                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8538                 phba->lpfc_sli_handle_slow_ring_event =
8539                                 lpfc_sli_handle_slow_ring_event_s4;
8540                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8541                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8542                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8543                 break;
8544         default:
8545                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8546                                 "1420 Invalid HBA PCI-device group: 0x%x\n",
8547                                 dev_grp);
8548                 return -ENODEV;
8549                 break;
8550         }
8551         return 0;
8552 }
8553
8554 /**
8555  * __lpfc_sli_ringtx_put - Add an iocb to the txq
8556  * @phba: Pointer to HBA context object.
8557  * @pring: Pointer to driver SLI ring object.
8558  * @piocb: Pointer to address of newly added command iocb.
8559  *
8560  * This function is called with hbalock held to add a command
8561  * iocb to the txq when SLI layer cannot submit the command iocb
8562  * to the ring.
8563  **/
8564 void
8565 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8566                     struct lpfc_iocbq *piocb)
8567 {
8568         lockdep_assert_held(&phba->hbalock);
8569         /* Insert the caller's iocb in the txq tail for later processing. */
8570         list_add_tail(&piocb->list, &pring->txq);
8571 }
8572
8573 /**
8574  * lpfc_sli_next_iocb - Get the next iocb in the txq
8575  * @phba: Pointer to HBA context object.
8576  * @pring: Pointer to driver SLI ring object.
8577  * @piocb: Pointer to address of newly added command iocb.
8578  *
8579  * This function is called with hbalock held before a new
8580  * iocb is submitted to the firmware. This function checks
8581  * txq to flush the iocbs in txq to Firmware before
8582  * submitting new iocbs to the Firmware.
8583  * If there are iocbs in the txq which need to be submitted
8584  * to firmware, lpfc_sli_next_iocb returns the first element
8585  * of the txq after dequeuing it from txq.
8586  * If there is no iocb in the txq then the function will return
8587  * *piocb and *piocb is set to NULL. Caller needs to check
8588  * *piocb to find if there are more commands in the txq.
8589  **/
8590 static struct lpfc_iocbq *
8591 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8592                    struct lpfc_iocbq **piocb)
8593 {
8594         struct lpfc_iocbq * nextiocb;
8595
8596         lockdep_assert_held(&phba->hbalock);
8597
8598         nextiocb = lpfc_sli_ringtx_get(phba, pring);
8599         if (!nextiocb) {
8600                 nextiocb = *piocb;
8601                 *piocb = NULL;
8602         }
8603
8604         return nextiocb;
8605 }
8606
8607 /**
8608  * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
8609  * @phba: Pointer to HBA context object.
8610  * @ring_number: SLI ring number to issue iocb on.
8611  * @piocb: Pointer to command iocb.
8612  * @flag: Flag indicating if this command can be put into txq.
8613  *
8614  * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
8615  * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
8616  * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
8617  * flag is turned on, the function returns IOCB_ERROR. When the link is down,
8618  * this function allows only iocbs for posting buffers. This function finds
8619  * next available slot in the command ring and posts the command to the
8620  * available slot and writes the port attention register to request HBA start
8621  * processing new iocb. If there is no slot available in the ring and
8622  * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
8623  * the function returns IOCB_BUSY.
8624  *
8625  * This function is called with hbalock held. The function will return success
8626  * after it successfully submit the iocb to firmware or after adding to the
8627  * txq.
8628  **/
8629 static int
8630 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
8631                     struct lpfc_iocbq *piocb, uint32_t flag)
8632 {
8633         struct lpfc_iocbq *nextiocb;
8634         IOCB_t *iocb;
8635         struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
8636
8637         lockdep_assert_held(&phba->hbalock);
8638
8639         if (piocb->iocb_cmpl && (!piocb->vport) &&
8640            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
8641            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
8642                 lpfc_printf_log(phba, KERN_ERR,
8643                                 LOG_SLI | LOG_VPORT,
8644                                 "1807 IOCB x%x failed. No vport\n",
8645                                 piocb->iocb.ulpCommand);
8646                 dump_stack();
8647                 return IOCB_ERROR;
8648         }
8649
8650
8651         /* If the PCI channel is in offline state, do not post iocbs. */
8652         if (unlikely(pci_channel_offline(phba->pcidev)))
8653                 return IOCB_ERROR;
8654
8655         /* If HBA has a deferred error attention, fail the iocb. */
8656         if (unlikely(phba->hba_flag & DEFER_ERATT))
8657                 return IOCB_ERROR;
8658
8659         /*
8660          * We should never get an IOCB if we are in a < LINK_DOWN state
8661          */
8662         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
8663                 return IOCB_ERROR;
8664
8665         /*
8666          * Check to see if we are blocking IOCB processing because of a
8667          * outstanding event.
8668          */
8669         if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
8670                 goto iocb_busy;
8671
8672         if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
8673                 /*
8674                  * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
8675                  * can be issued if the link is not up.
8676                  */
8677                 switch (piocb->iocb.ulpCommand) {
8678                 case CMD_GEN_REQUEST64_CR:
8679                 case CMD_GEN_REQUEST64_CX:
8680                         if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
8681                                 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
8682                                         FC_RCTL_DD_UNSOL_CMD) ||
8683                                 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
8684                                         MENLO_TRANSPORT_TYPE))
8685
8686                                 goto iocb_busy;
8687                         break;
8688                 case CMD_QUE_RING_BUF_CN:
8689                 case CMD_QUE_RING_BUF64_CN:
8690                         /*
8691                          * For IOCBs, like QUE_RING_BUF, that have no rsp ring
8692                          * completion, iocb_cmpl MUST be 0.
8693                          */
8694                         if (piocb->iocb_cmpl)
8695                                 piocb->iocb_cmpl = NULL;
8696                         /*FALLTHROUGH*/
8697                 case CMD_CREATE_XRI_CR:
8698                 case CMD_CLOSE_XRI_CN:
8699                 case CMD_CLOSE_XRI_CX:
8700                         break;
8701                 default:
8702                         goto iocb_busy;
8703                 }
8704
8705         /*
8706          * For FCP commands, we must be in a state where we can process link
8707          * attention events.
8708          */
8709         } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
8710                             !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
8711                 goto iocb_busy;
8712         }
8713
8714         while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
8715                (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
8716                 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
8717
8718         if (iocb)
8719                 lpfc_sli_update_ring(phba, pring);
8720         else
8721                 lpfc_sli_update_full_ring(phba, pring);
8722
8723         if (!piocb)
8724                 return IOCB_SUCCESS;
8725
8726         goto out_busy;
8727
8728  iocb_busy:
8729         pring->stats.iocb_cmd_delay++;
8730
8731  out_busy:
8732
8733         if (!(flag & SLI_IOCB_RET_IOCB)) {
8734                 __lpfc_sli_ringtx_put(phba, pring, piocb);
8735                 return IOCB_SUCCESS;
8736         }
8737
8738         return IOCB_BUSY;
8739 }
8740
8741 /**
8742  * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
8743  * @phba: Pointer to HBA context object.
8744  * @piocb: Pointer to command iocb.
8745  * @sglq: Pointer to the scatter gather queue object.
8746  *
8747  * This routine converts the bpl or bde that is in the IOCB
8748  * to a sgl list for the sli4 hardware. The physical address
8749  * of the bpl/bde is converted back to a virtual address.
8750  * If the IOCB contains a BPL then the list of BDE's is
8751  * converted to sli4_sge's. If the IOCB contains a single
8752  * BDE then it is converted to a single sli_sge.
8753  * The IOCB is still in cpu endianess so the contents of
8754  * the bpl can be used without byte swapping.
8755  *
8756  * Returns valid XRI = Success, NO_XRI = Failure.
8757 **/
8758 static uint16_t
8759 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
8760                 struct lpfc_sglq *sglq)
8761 {
8762         uint16_t xritag = NO_XRI;
8763         struct ulp_bde64 *bpl = NULL;
8764         struct ulp_bde64 bde;
8765         struct sli4_sge *sgl  = NULL;
8766         struct lpfc_dmabuf *dmabuf;
8767         IOCB_t *icmd;
8768         int numBdes = 0;
8769         int i = 0;
8770         uint32_t offset = 0; /* accumulated offset in the sg request list */
8771         int inbound = 0; /* number of sg reply entries inbound from firmware */
8772
8773         if (!piocbq || !sglq)
8774                 return xritag;
8775
8776         sgl  = (struct sli4_sge *)sglq->sgl;
8777         icmd = &piocbq->iocb;
8778         if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
8779                 return sglq->sli4_xritag;
8780         if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8781                 numBdes = icmd->un.genreq64.bdl.bdeSize /
8782                                 sizeof(struct ulp_bde64);
8783                 /* The addrHigh and addrLow fields within the IOCB
8784                  * have not been byteswapped yet so there is no
8785                  * need to swap them back.
8786                  */
8787                 if (piocbq->context3)
8788                         dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
8789                 else
8790                         return xritag;
8791
8792                 bpl  = (struct ulp_bde64 *)dmabuf->virt;
8793                 if (!bpl)
8794                         return xritag;
8795
8796                 for (i = 0; i < numBdes; i++) {
8797                         /* Should already be byte swapped. */
8798                         sgl->addr_hi = bpl->addrHigh;
8799                         sgl->addr_lo = bpl->addrLow;
8800
8801                         sgl->word2 = le32_to_cpu(sgl->word2);
8802                         if ((i+1) == numBdes)
8803                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
8804                         else
8805                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
8806                         /* swap the size field back to the cpu so we
8807                          * can assign it to the sgl.
8808                          */
8809                         bde.tus.w = le32_to_cpu(bpl->tus.w);
8810                         sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
8811                         /* The offsets in the sgl need to be accumulated
8812                          * separately for the request and reply lists.
8813                          * The request is always first, the reply follows.
8814                          */
8815                         if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
8816                                 /* add up the reply sg entries */
8817                                 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
8818                                         inbound++;
8819                                 /* first inbound? reset the offset */
8820                                 if (inbound == 1)
8821                                         offset = 0;
8822                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
8823                                 bf_set(lpfc_sli4_sge_type, sgl,
8824                                         LPFC_SGE_TYPE_DATA);
8825                                 offset += bde.tus.f.bdeSize;
8826                         }
8827                         sgl->word2 = cpu_to_le32(sgl->word2);
8828                         bpl++;
8829                         sgl++;
8830                 }
8831         } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
8832                         /* The addrHigh and addrLow fields of the BDE have not
8833                          * been byteswapped yet so they need to be swapped
8834                          * before putting them in the sgl.
8835                          */
8836                         sgl->addr_hi =
8837                                 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
8838                         sgl->addr_lo =
8839                                 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
8840                         sgl->word2 = le32_to_cpu(sgl->word2);
8841                         bf_set(lpfc_sli4_sge_last, sgl, 1);
8842                         sgl->word2 = cpu_to_le32(sgl->word2);
8843                         sgl->sge_len =
8844                                 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
8845         }
8846         return sglq->sli4_xritag;
8847 }
8848
8849 /**
8850  * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
8851  * @phba: Pointer to HBA context object.
8852  * @piocb: Pointer to command iocb.
8853  * @wqe: Pointer to the work queue entry.
8854  *
8855  * This routine converts the iocb command to its Work Queue Entry
8856  * equivalent. The wqe pointer should not have any fields set when
8857  * this routine is called because it will memcpy over them.
8858  * This routine does not set the CQ_ID or the WQEC bits in the
8859  * wqe.
8860  *
8861  * Returns: 0 = Success, IOCB_ERROR = Failure.
8862  **/
8863 static int
8864 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8865                 union lpfc_wqe *wqe)
8866 {
8867         uint32_t xmit_len = 0, total_len = 0;
8868         uint8_t ct = 0;
8869         uint32_t fip;
8870         uint32_t abort_tag;
8871         uint8_t command_type = ELS_COMMAND_NON_FIP;
8872         uint8_t cmnd;
8873         uint16_t xritag;
8874         uint16_t abrt_iotag;
8875         struct lpfc_iocbq *abrtiocbq;
8876         struct ulp_bde64 *bpl = NULL;
8877         uint32_t els_id = LPFC_ELS_ID_DEFAULT;
8878         int numBdes, i;
8879         struct ulp_bde64 bde;
8880         struct lpfc_nodelist *ndlp;
8881         uint32_t *pcmd;
8882         uint32_t if_type;
8883
8884         fip = phba->hba_flag & HBA_FIP_SUPPORT;
8885         /* The fcp commands will set command type */
8886         if (iocbq->iocb_flag &  LPFC_IO_FCP)
8887                 command_type = FCP_COMMAND;
8888         else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
8889                 command_type = ELS_COMMAND_FIP;
8890         else
8891                 command_type = ELS_COMMAND_NON_FIP;
8892
8893         if (phba->fcp_embed_io)
8894                 memset(wqe, 0, sizeof(union lpfc_wqe128));
8895         /* Some of the fields are in the right position already */
8896         memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
8897         if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
8898                 /* The ct field has moved so reset */
8899                 wqe->generic.wqe_com.word7 = 0;
8900                 wqe->generic.wqe_com.word10 = 0;
8901         }
8902
8903         abort_tag = (uint32_t) iocbq->iotag;
8904         xritag = iocbq->sli4_xritag;
8905         /* words0-2 bpl convert bde */
8906         if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8907                 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8908                                 sizeof(struct ulp_bde64);
8909                 bpl  = (struct ulp_bde64 *)
8910                         ((struct lpfc_dmabuf *)iocbq->context3)->virt;
8911                 if (!bpl)
8912                         return IOCB_ERROR;
8913
8914                 /* Should already be byte swapped. */
8915                 wqe->generic.bde.addrHigh =  le32_to_cpu(bpl->addrHigh);
8916                 wqe->generic.bde.addrLow =  le32_to_cpu(bpl->addrLow);
8917                 /* swap the size field back to the cpu so we
8918                  * can assign it to the sgl.
8919                  */
8920                 wqe->generic.bde.tus.w  = le32_to_cpu(bpl->tus.w);
8921                 xmit_len = wqe->generic.bde.tus.f.bdeSize;
8922                 total_len = 0;
8923                 for (i = 0; i < numBdes; i++) {
8924                         bde.tus.w  = le32_to_cpu(bpl[i].tus.w);
8925                         total_len += bde.tus.f.bdeSize;
8926                 }
8927         } else
8928                 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
8929
8930         iocbq->iocb.ulpIoTag = iocbq->iotag;
8931         cmnd = iocbq->iocb.ulpCommand;
8932
8933         switch (iocbq->iocb.ulpCommand) {
8934         case CMD_ELS_REQUEST64_CR:
8935                 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
8936                         ndlp = iocbq->context_un.ndlp;
8937                 else
8938                         ndlp = (struct lpfc_nodelist *)iocbq->context1;
8939                 if (!iocbq->iocb.ulpLe) {
8940                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8941                                 "2007 Only Limited Edition cmd Format"
8942                                 " supported 0x%x\n",
8943                                 iocbq->iocb.ulpCommand);
8944                         return IOCB_ERROR;
8945                 }
8946
8947                 wqe->els_req.payload_len = xmit_len;
8948                 /* Els_reguest64 has a TMO */
8949                 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
8950                         iocbq->iocb.ulpTimeout);
8951                 /* Need a VF for word 4 set the vf bit*/
8952                 bf_set(els_req64_vf, &wqe->els_req, 0);
8953                 /* And a VFID for word 12 */
8954                 bf_set(els_req64_vfid, &wqe->els_req, 0);
8955                 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8956                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8957                        iocbq->iocb.ulpContext);
8958                 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
8959                 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
8960                 /* CCP CCPE PV PRI in word10 were set in the memcpy */
8961                 if (command_type == ELS_COMMAND_FIP)
8962                         els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
8963                                         >> LPFC_FIP_ELS_ID_SHIFT);
8964                 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8965                                         iocbq->context2)->virt);
8966                 if_type = bf_get(lpfc_sli_intf_if_type,
8967                                         &phba->sli4_hba.sli_intf);
8968                 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
8969                         if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
8970                                 *pcmd == ELS_CMD_SCR ||
8971                                 *pcmd == ELS_CMD_FDISC ||
8972                                 *pcmd == ELS_CMD_LOGO ||
8973                                 *pcmd == ELS_CMD_PLOGI)) {
8974                                 bf_set(els_req64_sp, &wqe->els_req, 1);
8975                                 bf_set(els_req64_sid, &wqe->els_req,
8976                                         iocbq->vport->fc_myDID);
8977                                 if ((*pcmd == ELS_CMD_FLOGI) &&
8978                                         !(phba->fc_topology ==
8979                                                 LPFC_TOPOLOGY_LOOP))
8980                                         bf_set(els_req64_sid, &wqe->els_req, 0);
8981                                 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
8982                                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8983                                         phba->vpi_ids[iocbq->vport->vpi]);
8984                         } else if (pcmd && iocbq->context1) {
8985                                 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
8986                                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8987                                         phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8988                         }
8989                 }
8990                 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
8991                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8992                 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
8993                 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
8994                 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
8995                 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
8996                 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8997                 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
8998                 wqe->els_req.max_response_payload_len = total_len - xmit_len;
8999                 break;
9000         case CMD_XMIT_SEQUENCE64_CX:
9001                 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9002                        iocbq->iocb.un.ulpWord[3]);
9003                 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9004                        iocbq->iocb.unsli3.rcvsli3.ox_id);
9005                 /* The entire sequence is transmitted for this IOCB */
9006                 xmit_len = total_len;
9007                 cmnd = CMD_XMIT_SEQUENCE64_CR;
9008                 if (phba->link_flag & LS_LOOPBACK_MODE)
9009                         bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9010         case CMD_XMIT_SEQUENCE64_CR:
9011                 /* word3 iocb=io_tag32 wqe=reserved */
9012                 wqe->xmit_sequence.rsvd3 = 0;
9013                 /* word4 relative_offset memcpy */
9014                 /* word5 r_ctl/df_ctl memcpy */
9015                 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9016                 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9017                 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9018                        LPFC_WQE_IOD_WRITE);
9019                 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9020                        LPFC_WQE_LENLOC_WORD12);
9021                 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9022                 wqe->xmit_sequence.xmit_len = xmit_len;
9023                 command_type = OTHER_COMMAND;
9024                 break;
9025         case CMD_XMIT_BCAST64_CN:
9026                 /* word3 iocb=iotag32 wqe=seq_payload_len */
9027                 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9028                 /* word4 iocb=rsvd wqe=rsvd */
9029                 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9030                 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9031                 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9032                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9033                 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9034                 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9035                 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9036                        LPFC_WQE_LENLOC_WORD3);
9037                 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9038                 break;
9039         case CMD_FCP_IWRITE64_CR:
9040                 command_type = FCP_COMMAND_DATA_OUT;
9041                 /* word3 iocb=iotag wqe=payload_offset_len */
9042                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9043                 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9044                        xmit_len + sizeof(struct fcp_rsp));
9045                 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9046                        0);
9047                 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9048                 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9049                 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9050                        iocbq->iocb.ulpFCP2Rcvy);
9051                 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9052                 /* Always open the exchange */
9053                 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9054                 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9055                        LPFC_WQE_LENLOC_WORD4);
9056                 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9057                 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9058                 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9059                         bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9060                         bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9061                         if (iocbq->priority) {
9062                                 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9063                                        (iocbq->priority << 1));
9064                         } else {
9065                                 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9066                                        (phba->cfg_XLanePriority << 1));
9067                         }
9068                 }
9069                 /* Note, word 10 is already initialized to 0 */
9070
9071                 /* Don't set PBDE for Perf hints, just fcp_embed_pbde */
9072                 if (phba->fcp_embed_pbde)
9073                         bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9074                 else
9075                         bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9076
9077                 if (phba->fcp_embed_io) {
9078                         struct lpfc_scsi_buf *lpfc_cmd;
9079                         struct sli4_sge *sgl;
9080                         union lpfc_wqe128 *wqe128;
9081                         struct fcp_cmnd *fcp_cmnd;
9082                         uint32_t *ptr;
9083
9084                         /* 128 byte wqe support here */
9085                         wqe128 = (union lpfc_wqe128 *)wqe;
9086
9087                         lpfc_cmd = iocbq->context1;
9088                         sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9089                         fcp_cmnd = lpfc_cmd->fcp_cmnd;
9090
9091                         /* Word 0-2 - FCP_CMND */
9092                         wqe128->generic.bde.tus.f.bdeFlags =
9093                                 BUFF_TYPE_BDE_IMMED;
9094                         wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
9095                         wqe128->generic.bde.addrHigh = 0;
9096                         wqe128->generic.bde.addrLow =  88;  /* Word 22 */
9097
9098                         bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1);
9099
9100                         /* Word 22-29  FCP CMND Payload */
9101                         ptr = &wqe128->words[22];
9102                         memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9103                 }
9104                 break;
9105         case CMD_FCP_IREAD64_CR:
9106                 /* word3 iocb=iotag wqe=payload_offset_len */
9107                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9108                 bf_set(payload_offset_len, &wqe->fcp_iread,
9109                        xmit_len + sizeof(struct fcp_rsp));
9110                 bf_set(cmd_buff_len, &wqe->fcp_iread,
9111                        0);
9112                 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9113                 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9114                 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9115                        iocbq->iocb.ulpFCP2Rcvy);
9116                 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9117                 /* Always open the exchange */
9118                 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9119                 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9120                        LPFC_WQE_LENLOC_WORD4);
9121                 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9122                 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9123                 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9124                         bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9125                         bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9126                         if (iocbq->priority) {
9127                                 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9128                                        (iocbq->priority << 1));
9129                         } else {
9130                                 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9131                                        (phba->cfg_XLanePriority << 1));
9132                         }
9133                 }
9134                 /* Note, word 10 is already initialized to 0 */
9135
9136                 /* Don't set PBDE for Perf hints, just fcp_embed_pbde */
9137                 if (phba->fcp_embed_pbde)
9138                         bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9139                 else
9140                         bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9141
9142                 if (phba->fcp_embed_io) {
9143                         struct lpfc_scsi_buf *lpfc_cmd;
9144                         struct sli4_sge *sgl;
9145                         union lpfc_wqe128 *wqe128;
9146                         struct fcp_cmnd *fcp_cmnd;
9147                         uint32_t *ptr;
9148
9149                         /* 128 byte wqe support here */
9150                         wqe128 = (union lpfc_wqe128 *)wqe;
9151
9152                         lpfc_cmd = iocbq->context1;
9153                         sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9154                         fcp_cmnd = lpfc_cmd->fcp_cmnd;
9155
9156                         /* Word 0-2 - FCP_CMND */
9157                         wqe128->generic.bde.tus.f.bdeFlags =
9158                                 BUFF_TYPE_BDE_IMMED;
9159                         wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
9160                         wqe128->generic.bde.addrHigh = 0;
9161                         wqe128->generic.bde.addrLow =  88;  /* Word 22 */
9162
9163                         bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1);
9164
9165                         /* Word 22-29  FCP CMND Payload */
9166                         ptr = &wqe128->words[22];
9167                         memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9168                 }
9169                 break;
9170         case CMD_FCP_ICMND64_CR:
9171                 /* word3 iocb=iotag wqe=payload_offset_len */
9172                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9173                 bf_set(payload_offset_len, &wqe->fcp_icmd,
9174                        xmit_len + sizeof(struct fcp_rsp));
9175                 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9176                        0);
9177                 /* word3 iocb=IO_TAG wqe=reserved */
9178                 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9179                 /* Always open the exchange */
9180                 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9181                 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9182                 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9183                 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9184                        LPFC_WQE_LENLOC_NONE);
9185                 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9186                        iocbq->iocb.ulpFCP2Rcvy);
9187                 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9188                         bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9189                         bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9190                         if (iocbq->priority) {
9191                                 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9192                                        (iocbq->priority << 1));
9193                         } else {
9194                                 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9195                                        (phba->cfg_XLanePriority << 1));
9196                         }
9197                 }
9198                 /* Note, word 10 is already initialized to 0 */
9199
9200                 if (phba->fcp_embed_io) {
9201                         struct lpfc_scsi_buf *lpfc_cmd;
9202                         struct sli4_sge *sgl;
9203                         union lpfc_wqe128 *wqe128;
9204                         struct fcp_cmnd *fcp_cmnd;
9205                         uint32_t *ptr;
9206
9207                         /* 128 byte wqe support here */
9208                         wqe128 = (union lpfc_wqe128 *)wqe;
9209
9210                         lpfc_cmd = iocbq->context1;
9211                         sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9212                         fcp_cmnd = lpfc_cmd->fcp_cmnd;
9213
9214                         /* Word 0-2 - FCP_CMND */
9215                         wqe128->generic.bde.tus.f.bdeFlags =
9216                                 BUFF_TYPE_BDE_IMMED;
9217                         wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
9218                         wqe128->generic.bde.addrHigh = 0;
9219                         wqe128->generic.bde.addrLow =  88;  /* Word 22 */
9220
9221                         bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1);
9222
9223                         /* Word 22-29  FCP CMND Payload */
9224                         ptr = &wqe128->words[22];
9225                         memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9226                 }
9227                 break;
9228         case CMD_GEN_REQUEST64_CR:
9229                 /* For this command calculate the xmit length of the
9230                  * request bde.
9231                  */
9232                 xmit_len = 0;
9233                 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9234                         sizeof(struct ulp_bde64);
9235                 for (i = 0; i < numBdes; i++) {
9236                         bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9237                         if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9238                                 break;
9239                         xmit_len += bde.tus.f.bdeSize;
9240                 }
9241                 /* word3 iocb=IO_TAG wqe=request_payload_len */
9242                 wqe->gen_req.request_payload_len = xmit_len;
9243                 /* word4 iocb=parameter wqe=relative_offset memcpy */
9244                 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
9245                 /* word6 context tag copied in memcpy */
9246                 if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
9247                         ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9248                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9249                                 "2015 Invalid CT %x command 0x%x\n",
9250                                 ct, iocbq->iocb.ulpCommand);
9251                         return IOCB_ERROR;
9252                 }
9253                 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9254                 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9255                 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9256                 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9257                 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9258                 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9259                 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9260                 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9261                 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9262                 command_type = OTHER_COMMAND;
9263                 break;
9264         case CMD_XMIT_ELS_RSP64_CX:
9265                 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9266                 /* words0-2 BDE memcpy */
9267                 /* word3 iocb=iotag32 wqe=response_payload_len */
9268                 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9269                 /* word4 */
9270                 wqe->xmit_els_rsp.word4 = 0;
9271                 /* word5 iocb=rsvd wge=did */
9272                 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9273                          iocbq->iocb.un.xseq64.xmit_els_remoteID);
9274
9275                 if_type = bf_get(lpfc_sli_intf_if_type,
9276                                         &phba->sli4_hba.sli_intf);
9277                 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9278                         if (iocbq->vport->fc_flag & FC_PT2PT) {
9279                                 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9280                                 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9281                                         iocbq->vport->fc_myDID);
9282                                 if (iocbq->vport->fc_myDID == Fabric_DID) {
9283                                         bf_set(wqe_els_did,
9284                                                 &wqe->xmit_els_rsp.wqe_dest, 0);
9285                                 }
9286                         }
9287                 }
9288                 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9289                        ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9290                 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9291                 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9292                        iocbq->iocb.unsli3.rcvsli3.ox_id);
9293                 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9294                         bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9295                                phba->vpi_ids[iocbq->vport->vpi]);
9296                 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9297                 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9298                 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9299                 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9300                        LPFC_WQE_LENLOC_WORD3);
9301                 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9302                 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9303                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9304                 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9305                                         iocbq->context2)->virt);
9306                 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9307                                 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9308                                 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9309                                         iocbq->vport->fc_myDID);
9310                                 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9311                                 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9312                                         phba->vpi_ids[phba->pport->vpi]);
9313                 }
9314                 command_type = OTHER_COMMAND;
9315                 break;
9316         case CMD_CLOSE_XRI_CN:
9317         case CMD_ABORT_XRI_CN:
9318         case CMD_ABORT_XRI_CX:
9319                 /* words 0-2 memcpy should be 0 rserved */
9320                 /* port will send abts */
9321                 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9322                 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9323                         abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9324                         fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9325                 } else
9326                         fip = 0;
9327
9328                 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9329                         /*
9330                          * The link is down, or the command was ELS_FIP
9331                          * so the fw does not need to send abts
9332                          * on the wire.
9333                          */
9334                         bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9335                 else
9336                         bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9337                 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9338                 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9339                 wqe->abort_cmd.rsrvd5 = 0;
9340                 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9341                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9342                 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9343                 /*
9344                  * The abort handler will send us CMD_ABORT_XRI_CN or
9345                  * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9346                  */
9347                 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9348                 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9349                 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9350                        LPFC_WQE_LENLOC_NONE);
9351                 cmnd = CMD_ABORT_XRI_CX;
9352                 command_type = OTHER_COMMAND;
9353                 xritag = 0;
9354                 break;
9355         case CMD_XMIT_BLS_RSP64_CX:
9356                 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9357                 /* As BLS ABTS RSP WQE is very different from other WQEs,
9358                  * we re-construct this WQE here based on information in
9359                  * iocbq from scratch.
9360                  */
9361                 memset(wqe, 0, sizeof(union lpfc_wqe));
9362                 /* OX_ID is invariable to who sent ABTS to CT exchange */
9363                 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
9364                        bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9365                 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
9366                     LPFC_ABTS_UNSOL_INT) {
9367                         /* ABTS sent by initiator to CT exchange, the
9368                          * RX_ID field will be filled with the newly
9369                          * allocated responder XRI.
9370                          */
9371                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9372                                iocbq->sli4_xritag);
9373                 } else {
9374                         /* ABTS sent by responder to CT exchange, the
9375                          * RX_ID field will be filled with the responder
9376                          * RX_ID from ABTS.
9377                          */
9378                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9379                                bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
9380                 }
9381                 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9382                 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
9383
9384                 /* Use CT=VPI */
9385                 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9386                         ndlp->nlp_DID);
9387                 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9388                         iocbq->iocb.ulpContext);
9389                 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
9390                 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
9391                         phba->vpi_ids[phba->pport->vpi]);
9392                 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9393                 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9394                        LPFC_WQE_LENLOC_NONE);
9395                 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9396                 command_type = OTHER_COMMAND;
9397                 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9398                         bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9399                                bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9400                         bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9401                                bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9402                         bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9403                                bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9404                 }
9405
9406                 break;
9407         case CMD_SEND_FRAME:
9408                 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9409                 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9410                 return 0;
9411         case CMD_XRI_ABORTED_CX:
9412         case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
9413         case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9414         case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9415         case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9416         case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9417         default:
9418                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9419                                 "2014 Invalid command 0x%x\n",
9420                                 iocbq->iocb.ulpCommand);
9421                 return IOCB_ERROR;
9422                 break;
9423         }
9424
9425         if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9426                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9427         else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9428                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9429         else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9430                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9431         iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9432                               LPFC_IO_DIF_INSERT);
9433         bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9434         bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9435         wqe->generic.wqe_com.abort_tag = abort_tag;
9436         bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9437         bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9438         bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9439         bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9440         return 0;
9441 }
9442
9443 /**
9444  * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9445  * @phba: Pointer to HBA context object.
9446  * @ring_number: SLI ring number to issue iocb on.
9447  * @piocb: Pointer to command iocb.
9448  * @flag: Flag indicating if this command can be put into txq.
9449  *
9450  * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9451  * an iocb command to an HBA with SLI-4 interface spec.
9452  *
9453  * This function is called with hbalock held. The function will return success
9454  * after it successfully submit the iocb to firmware or after adding to the
9455  * txq.
9456  **/
9457 static int
9458 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9459                          struct lpfc_iocbq *piocb, uint32_t flag)
9460 {
9461         struct lpfc_sglq *sglq;
9462         union lpfc_wqe *wqe;
9463         union lpfc_wqe128 wqe128;
9464         struct lpfc_queue *wq;
9465         struct lpfc_sli_ring *pring;
9466
9467         /* Get the WQ */
9468         if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9469             (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9470                 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS)))
9471                         wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
9472                 else
9473                         wq = phba->sli4_hba.oas_wq;
9474         } else {
9475                 wq = phba->sli4_hba.els_wq;
9476         }
9477
9478         /* Get corresponding ring */
9479         pring = wq->pring;
9480
9481         /*
9482          * The WQE can be either 64 or 128 bytes,
9483          * so allocate space on the stack assuming the largest.
9484          */
9485         wqe = (union lpfc_wqe *)&wqe128;
9486
9487         lockdep_assert_held(&phba->hbalock);
9488
9489         if (piocb->sli4_xritag == NO_XRI) {
9490                 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
9491                     piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
9492                         sglq = NULL;
9493                 else {
9494                         if (!list_empty(&pring->txq)) {
9495                                 if (!(flag & SLI_IOCB_RET_IOCB)) {
9496                                         __lpfc_sli_ringtx_put(phba,
9497                                                 pring, piocb);
9498                                         return IOCB_SUCCESS;
9499                                 } else {
9500                                         return IOCB_BUSY;
9501                                 }
9502                         } else {
9503                                 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
9504                                 if (!sglq) {
9505                                         if (!(flag & SLI_IOCB_RET_IOCB)) {
9506                                                 __lpfc_sli_ringtx_put(phba,
9507                                                                 pring,
9508                                                                 piocb);
9509                                                 return IOCB_SUCCESS;
9510                                         } else
9511                                                 return IOCB_BUSY;
9512                                 }
9513                         }
9514                 }
9515         } else if (piocb->iocb_flag &  LPFC_IO_FCP)
9516                 /* These IO's already have an XRI and a mapped sgl. */
9517                 sglq = NULL;
9518         else {
9519                 /*
9520                  * This is a continuation of a commandi,(CX) so this
9521                  * sglq is on the active list
9522                  */
9523                 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
9524                 if (!sglq)
9525                         return IOCB_ERROR;
9526         }
9527
9528         if (sglq) {
9529                 piocb->sli4_lxritag = sglq->sli4_lxritag;
9530                 piocb->sli4_xritag = sglq->sli4_xritag;
9531                 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
9532                         return IOCB_ERROR;
9533         }
9534
9535         if (lpfc_sli4_iocb2wqe(phba, piocb, wqe))
9536                 return IOCB_ERROR;
9537
9538         if (lpfc_sli4_wq_put(wq, wqe))
9539                 return IOCB_ERROR;
9540         lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9541
9542         return 0;
9543 }
9544
9545 /**
9546  * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
9547  *
9548  * This routine wraps the actual lockless version for issusing IOCB function
9549  * pointer from the lpfc_hba struct.
9550  *
9551  * Return codes:
9552  * IOCB_ERROR - Error
9553  * IOCB_SUCCESS - Success
9554  * IOCB_BUSY - Busy
9555  **/
9556 int
9557 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9558                 struct lpfc_iocbq *piocb, uint32_t flag)
9559 {
9560         return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9561 }
9562
9563 /**
9564  * lpfc_sli_api_table_setup - Set up sli api function jump table
9565  * @phba: The hba struct for which this call is being executed.
9566  * @dev_grp: The HBA PCI-Device group number.
9567  *
9568  * This routine sets up the SLI interface API function jump table in @phba
9569  * struct.
9570  * Returns: 0 - success, -ENODEV - failure.
9571  **/
9572 int
9573 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9574 {
9575
9576         switch (dev_grp) {
9577         case LPFC_PCI_DEV_LP:
9578                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
9579                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
9580                 break;
9581         case LPFC_PCI_DEV_OC:
9582                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
9583                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
9584                 break;
9585         default:
9586                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9587                                 "1419 Invalid HBA PCI-device group: 0x%x\n",
9588                                 dev_grp);
9589                 return -ENODEV;
9590                 break;
9591         }
9592         phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
9593         return 0;
9594 }
9595
9596 /**
9597  * lpfc_sli4_calc_ring - Calculates which ring to use
9598  * @phba: Pointer to HBA context object.
9599  * @piocb: Pointer to command iocb.
9600  *
9601  * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
9602  * hba_wqidx, thus we need to calculate the corresponding ring.
9603  * Since ABORTS must go on the same WQ of the command they are
9604  * aborting, we use command's hba_wqidx.
9605  */
9606 struct lpfc_sli_ring *
9607 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
9608 {
9609         if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
9610                 if (!(phba->cfg_fof) ||
9611                     (!(piocb->iocb_flag & LPFC_IO_FOF))) {
9612                         if (unlikely(!phba->sli4_hba.fcp_wq))
9613                                 return NULL;
9614                         /*
9615                          * for abort iocb hba_wqidx should already
9616                          * be setup based on what work queue we used.
9617                          */
9618                         if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9619                                 piocb->hba_wqidx =
9620                                         lpfc_sli4_scmd_to_wqidx_distr(phba,
9621                                                               piocb->context1);
9622                                 piocb->hba_wqidx = piocb->hba_wqidx %
9623                                         phba->cfg_fcp_io_channel;
9624                         }
9625                         return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
9626                 } else {
9627                         if (unlikely(!phba->sli4_hba.oas_wq))
9628                                 return NULL;
9629                         piocb->hba_wqidx = 0;
9630                         return phba->sli4_hba.oas_wq->pring;
9631                 }
9632         } else {
9633                 if (unlikely(!phba->sli4_hba.els_wq))
9634                         return NULL;
9635                 piocb->hba_wqidx = 0;
9636                 return phba->sli4_hba.els_wq->pring;
9637         }
9638 }
9639
9640 /**
9641  * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
9642  * @phba: Pointer to HBA context object.
9643  * @pring: Pointer to driver SLI ring object.
9644  * @piocb: Pointer to command iocb.
9645  * @flag: Flag indicating if this command can be put into txq.
9646  *
9647  * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
9648  * function. This function gets the hbalock and calls
9649  * __lpfc_sli_issue_iocb function and will return the error returned
9650  * by __lpfc_sli_issue_iocb function. This wrapper is used by
9651  * functions which do not hold hbalock.
9652  **/
9653 int
9654 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9655                     struct lpfc_iocbq *piocb, uint32_t flag)
9656 {
9657         struct lpfc_hba_eq_hdl *hba_eq_hdl;
9658         struct lpfc_sli_ring *pring;
9659         struct lpfc_queue *fpeq;
9660         struct lpfc_eqe *eqe;
9661         unsigned long iflags;
9662         int rc, idx;
9663
9664         if (phba->sli_rev == LPFC_SLI_REV4) {
9665                 pring = lpfc_sli4_calc_ring(phba, piocb);
9666                 if (unlikely(pring == NULL))
9667                         return IOCB_ERROR;
9668
9669                 spin_lock_irqsave(&pring->ring_lock, iflags);
9670                 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9671                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
9672
9673                 if (lpfc_fcp_look_ahead && (piocb->iocb_flag &  LPFC_IO_FCP)) {
9674                         idx = piocb->hba_wqidx;
9675                         hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx];
9676
9677                         if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
9678
9679                                 /* Get associated EQ with this index */
9680                                 fpeq = phba->sli4_hba.hba_eq[idx];
9681
9682                                 /* Turn off interrupts from this EQ */
9683                                 phba->sli4_hba.sli4_eq_clr_intr(fpeq);
9684
9685                                 /*
9686                                  * Process all the events on FCP EQ
9687                                  */
9688                                 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9689                                         lpfc_sli4_hba_handle_eqe(phba,
9690                                                 eqe, idx);
9691                                         fpeq->EQ_processed++;
9692                                 }
9693
9694                                 /* Always clear and re-arm the EQ */
9695                                 phba->sli4_hba.sli4_eq_release(fpeq,
9696                                         LPFC_QUEUE_REARM);
9697                         }
9698                         atomic_inc(&hba_eq_hdl->hba_eq_in_use);
9699                 }
9700         } else {
9701                 /* For now, SLI2/3 will still use hbalock */
9702                 spin_lock_irqsave(&phba->hbalock, iflags);
9703                 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9704                 spin_unlock_irqrestore(&phba->hbalock, iflags);
9705         }
9706         return rc;
9707 }
9708
9709 /**
9710  * lpfc_extra_ring_setup - Extra ring setup function
9711  * @phba: Pointer to HBA context object.
9712  *
9713  * This function is called while driver attaches with the
9714  * HBA to setup the extra ring. The extra ring is used
9715  * only when driver needs to support target mode functionality
9716  * or IP over FC functionalities.
9717  *
9718  * This function is called with no lock held. SLI3 only.
9719  **/
9720 static int
9721 lpfc_extra_ring_setup( struct lpfc_hba *phba)
9722 {
9723         struct lpfc_sli *psli;
9724         struct lpfc_sli_ring *pring;
9725
9726         psli = &phba->sli;
9727
9728         /* Adjust cmd/rsp ring iocb entries more evenly */
9729
9730         /* Take some away from the FCP ring */
9731         pring = &psli->sli3_ring[LPFC_FCP_RING];
9732         pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9733         pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9734         pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9735         pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9736
9737         /* and give them to the extra ring */
9738         pring = &psli->sli3_ring[LPFC_EXTRA_RING];
9739
9740         pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9741         pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9742         pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9743         pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9744
9745         /* Setup default profile for this ring */
9746         pring->iotag_max = 4096;
9747         pring->num_mask = 1;
9748         pring->prt[0].profile = 0;      /* Mask 0 */
9749         pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
9750         pring->prt[0].type = phba->cfg_multi_ring_type;
9751         pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
9752         return 0;
9753 }
9754
9755 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
9756  * @phba: Pointer to HBA context object.
9757  * @iocbq: Pointer to iocb object.
9758  *
9759  * The async_event handler calls this routine when it receives
9760  * an ASYNC_STATUS_CN event from the port.  The port generates
9761  * this event when an Abort Sequence request to an rport fails
9762  * twice in succession.  The abort could be originated by the
9763  * driver or by the port.  The ABTS could have been for an ELS
9764  * or FCP IO.  The port only generates this event when an ABTS
9765  * fails to complete after one retry.
9766  */
9767 static void
9768 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
9769                           struct lpfc_iocbq *iocbq)
9770 {
9771         struct lpfc_nodelist *ndlp = NULL;
9772         uint16_t rpi = 0, vpi = 0;
9773         struct lpfc_vport *vport = NULL;
9774
9775         /* The rpi in the ulpContext is vport-sensitive. */
9776         vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
9777         rpi = iocbq->iocb.ulpContext;
9778
9779         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9780                         "3092 Port generated ABTS async event "
9781                         "on vpi %d rpi %d status 0x%x\n",
9782                         vpi, rpi, iocbq->iocb.ulpStatus);
9783
9784         vport = lpfc_find_vport_by_vpid(phba, vpi);
9785         if (!vport)
9786                 goto err_exit;
9787         ndlp = lpfc_findnode_rpi(vport, rpi);
9788         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
9789                 goto err_exit;
9790
9791         if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
9792                 lpfc_sli_abts_recover_port(vport, ndlp);
9793         return;
9794
9795  err_exit:
9796         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9797                         "3095 Event Context not found, no "
9798                         "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
9799                         iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
9800                         vpi, rpi);
9801 }
9802
9803 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
9804  * @phba: pointer to HBA context object.
9805  * @ndlp: nodelist pointer for the impacted rport.
9806  * @axri: pointer to the wcqe containing the failed exchange.
9807  *
9808  * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
9809  * port.  The port generates this event when an abort exchange request to an
9810  * rport fails twice in succession with no reply.  The abort could be originated
9811  * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
9812  */
9813 void
9814 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
9815                            struct lpfc_nodelist *ndlp,
9816                            struct sli4_wcqe_xri_aborted *axri)
9817 {
9818         struct lpfc_vport *vport;
9819         uint32_t ext_status = 0;
9820
9821         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
9822                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9823                                 "3115 Node Context not found, driver "
9824                                 "ignoring abts err event\n");
9825                 return;
9826         }
9827
9828         vport = ndlp->vport;
9829         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9830                         "3116 Port generated FCP XRI ABORT event on "
9831                         "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
9832                         ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
9833                         bf_get(lpfc_wcqe_xa_xri, axri),
9834                         bf_get(lpfc_wcqe_xa_status, axri),
9835                         axri->parameter);
9836
9837         /*
9838          * Catch the ABTS protocol failure case.  Older OCe FW releases returned
9839          * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
9840          * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
9841          */
9842         ext_status = axri->parameter & IOERR_PARAM_MASK;
9843         if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
9844             ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
9845                 lpfc_sli_abts_recover_port(vport, ndlp);
9846 }
9847
9848 /**
9849  * lpfc_sli_async_event_handler - ASYNC iocb handler function
9850  * @phba: Pointer to HBA context object.
9851  * @pring: Pointer to driver SLI ring object.
9852  * @iocbq: Pointer to iocb object.
9853  *
9854  * This function is called by the slow ring event handler
9855  * function when there is an ASYNC event iocb in the ring.
9856  * This function is called with no lock held.
9857  * Currently this function handles only temperature related
9858  * ASYNC events. The function decodes the temperature sensor
9859  * event message and posts events for the management applications.
9860  **/
9861 static void
9862 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
9863         struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
9864 {
9865         IOCB_t *icmd;
9866         uint16_t evt_code;
9867         struct temp_event temp_event_data;
9868         struct Scsi_Host *shost;
9869         uint32_t *iocb_w;
9870
9871         icmd = &iocbq->iocb;
9872         evt_code = icmd->un.asyncstat.evt_code;
9873
9874         switch (evt_code) {
9875         case ASYNC_TEMP_WARN:
9876         case ASYNC_TEMP_SAFE:
9877                 temp_event_data.data = (uint32_t) icmd->ulpContext;
9878                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
9879                 if (evt_code == ASYNC_TEMP_WARN) {
9880                         temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
9881                         lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9882                                 "0347 Adapter is very hot, please take "
9883                                 "corrective action. temperature : %d Celsius\n",
9884                                 (uint32_t) icmd->ulpContext);
9885                 } else {
9886                         temp_event_data.event_code = LPFC_NORMAL_TEMP;
9887                         lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9888                                 "0340 Adapter temperature is OK now. "
9889                                 "temperature : %d Celsius\n",
9890                                 (uint32_t) icmd->ulpContext);
9891                 }
9892
9893                 /* Send temperature change event to applications */
9894                 shost = lpfc_shost_from_vport(phba->pport);
9895                 fc_host_post_vendor_event(shost, fc_get_event_number(),
9896                         sizeof(temp_event_data), (char *) &temp_event_data,
9897                         LPFC_NL_VENDOR_ID);
9898                 break;
9899         case ASYNC_STATUS_CN:
9900                 lpfc_sli_abts_err_handler(phba, iocbq);
9901                 break;
9902         default:
9903                 iocb_w = (uint32_t *) icmd;
9904                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9905                         "0346 Ring %d handler: unexpected ASYNC_STATUS"
9906                         " evt_code 0x%x\n"
9907                         "W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
9908                         "W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
9909                         "W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
9910                         "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
9911                         pring->ringno, icmd->un.asyncstat.evt_code,
9912                         iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
9913                         iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
9914                         iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
9915                         iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
9916
9917                 break;
9918         }
9919 }
9920
9921
9922 /**
9923  * lpfc_sli4_setup - SLI ring setup function
9924  * @phba: Pointer to HBA context object.
9925  *
9926  * lpfc_sli_setup sets up rings of the SLI interface with
9927  * number of iocbs per ring and iotags. This function is
9928  * called while driver attach to the HBA and before the
9929  * interrupts are enabled. So there is no need for locking.
9930  *
9931  * This function always returns 0.
9932  **/
9933 int
9934 lpfc_sli4_setup(struct lpfc_hba *phba)
9935 {
9936         struct lpfc_sli_ring *pring;
9937
9938         pring = phba->sli4_hba.els_wq->pring;
9939         pring->num_mask = LPFC_MAX_RING_MASK;
9940         pring->prt[0].profile = 0;      /* Mask 0 */
9941         pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9942         pring->prt[0].type = FC_TYPE_ELS;
9943         pring->prt[0].lpfc_sli_rcv_unsol_event =
9944             lpfc_els_unsol_event;
9945         pring->prt[1].profile = 0;      /* Mask 1 */
9946         pring->prt[1].rctl = FC_RCTL_ELS_REP;
9947         pring->prt[1].type = FC_TYPE_ELS;
9948         pring->prt[1].lpfc_sli_rcv_unsol_event =
9949             lpfc_els_unsol_event;
9950         pring->prt[2].profile = 0;      /* Mask 2 */
9951         /* NameServer Inquiry */
9952         pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
9953         /* NameServer */
9954         pring->prt[2].type = FC_TYPE_CT;
9955         pring->prt[2].lpfc_sli_rcv_unsol_event =
9956             lpfc_ct_unsol_event;
9957         pring->prt[3].profile = 0;      /* Mask 3 */
9958         /* NameServer response */
9959         pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
9960         /* NameServer */
9961         pring->prt[3].type = FC_TYPE_CT;
9962         pring->prt[3].lpfc_sli_rcv_unsol_event =
9963             lpfc_ct_unsol_event;
9964         return 0;
9965 }
9966
9967 /**
9968  * lpfc_sli_setup - SLI ring setup function
9969  * @phba: Pointer to HBA context object.
9970  *
9971  * lpfc_sli_setup sets up rings of the SLI interface with
9972  * number of iocbs per ring and iotags. This function is
9973  * called while driver attach to the HBA and before the
9974  * interrupts are enabled. So there is no need for locking.
9975  *
9976  * This function always returns 0. SLI3 only.
9977  **/
9978 int
9979 lpfc_sli_setup(struct lpfc_hba *phba)
9980 {
9981         int i, totiocbsize = 0;
9982         struct lpfc_sli *psli = &phba->sli;
9983         struct lpfc_sli_ring *pring;
9984
9985         psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
9986         psli->sli_flag = 0;
9987
9988         psli->iocbq_lookup = NULL;
9989         psli->iocbq_lookup_len = 0;
9990         psli->last_iotag = 0;
9991
9992         for (i = 0; i < psli->num_rings; i++) {
9993                 pring = &psli->sli3_ring[i];
9994                 switch (i) {
9995                 case LPFC_FCP_RING:     /* ring 0 - FCP */
9996                         /* numCiocb and numRiocb are used in config_port */
9997                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
9998                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
9999                         pring->sli.sli3.numCiocb +=
10000                                 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10001                         pring->sli.sli3.numRiocb +=
10002                                 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10003                         pring->sli.sli3.numCiocb +=
10004                                 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10005                         pring->sli.sli3.numRiocb +=
10006                                 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10007                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10008                                                         SLI3_IOCB_CMD_SIZE :
10009                                                         SLI2_IOCB_CMD_SIZE;
10010                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10011                                                         SLI3_IOCB_RSP_SIZE :
10012                                                         SLI2_IOCB_RSP_SIZE;
10013                         pring->iotag_ctr = 0;
10014                         pring->iotag_max =
10015                             (phba->cfg_hba_queue_depth * 2);
10016                         pring->fast_iotag = pring->iotag_max;
10017                         pring->num_mask = 0;
10018                         break;
10019                 case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
10020                         /* numCiocb and numRiocb are used in config_port */
10021                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10022                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10023                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10024                                                         SLI3_IOCB_CMD_SIZE :
10025                                                         SLI2_IOCB_CMD_SIZE;
10026                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10027                                                         SLI3_IOCB_RSP_SIZE :
10028                                                         SLI2_IOCB_RSP_SIZE;
10029                         pring->iotag_max = phba->cfg_hba_queue_depth;
10030                         pring->num_mask = 0;
10031                         break;
10032                 case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
10033                         /* numCiocb and numRiocb are used in config_port */
10034                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10035                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10036                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10037                                                         SLI3_IOCB_CMD_SIZE :
10038                                                         SLI2_IOCB_CMD_SIZE;
10039                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10040                                                         SLI3_IOCB_RSP_SIZE :
10041                                                         SLI2_IOCB_RSP_SIZE;
10042                         pring->fast_iotag = 0;
10043                         pring->iotag_ctr = 0;
10044                         pring->iotag_max = 4096;
10045                         pring->lpfc_sli_rcv_async_status =
10046                                 lpfc_sli_async_event_handler;
10047                         pring->num_mask = LPFC_MAX_RING_MASK;
10048                         pring->prt[0].profile = 0;      /* Mask 0 */
10049                         pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10050                         pring->prt[0].type = FC_TYPE_ELS;
10051                         pring->prt[0].lpfc_sli_rcv_unsol_event =
10052                             lpfc_els_unsol_event;
10053                         pring->prt[1].profile = 0;      /* Mask 1 */
10054                         pring->prt[1].rctl = FC_RCTL_ELS_REP;
10055                         pring->prt[1].type = FC_TYPE_ELS;
10056                         pring->prt[1].lpfc_sli_rcv_unsol_event =
10057                             lpfc_els_unsol_event;
10058                         pring->prt[2].profile = 0;      /* Mask 2 */
10059                         /* NameServer Inquiry */
10060                         pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10061                         /* NameServer */
10062                         pring->prt[2].type = FC_TYPE_CT;
10063                         pring->prt[2].lpfc_sli_rcv_unsol_event =
10064                             lpfc_ct_unsol_event;
10065                         pring->prt[3].profile = 0;      /* Mask 3 */
10066                         /* NameServer response */
10067                         pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10068                         /* NameServer */
10069                         pring->prt[3].type = FC_TYPE_CT;
10070                         pring->prt[3].lpfc_sli_rcv_unsol_event =
10071                             lpfc_ct_unsol_event;
10072                         break;
10073                 }
10074                 totiocbsize += (pring->sli.sli3.numCiocb *
10075                         pring->sli.sli3.sizeCiocb) +
10076                         (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10077         }
10078         if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10079                 /* Too many cmd / rsp ring entries in SLI2 SLIM */
10080                 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10081                        "SLI2 SLIM Data: x%x x%lx\n",
10082                        phba->brd_no, totiocbsize,
10083                        (unsigned long) MAX_SLIM_IOCB_SIZE);
10084         }
10085         if (phba->cfg_multi_ring_support == 2)
10086                 lpfc_extra_ring_setup(phba);
10087
10088         return 0;
10089 }
10090
10091 /**
10092  * lpfc_sli4_queue_init - Queue initialization function
10093  * @phba: Pointer to HBA context object.
10094  *
10095  * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
10096  * ring. This function also initializes ring indices of each ring.
10097  * This function is called during the initialization of the SLI
10098  * interface of an HBA.
10099  * This function is called with no lock held and always returns
10100  * 1.
10101  **/
10102 void
10103 lpfc_sli4_queue_init(struct lpfc_hba *phba)
10104 {
10105         struct lpfc_sli *psli;
10106         struct lpfc_sli_ring *pring;
10107         int i;
10108
10109         psli = &phba->sli;
10110         spin_lock_irq(&phba->hbalock);
10111         INIT_LIST_HEAD(&psli->mboxq);
10112         INIT_LIST_HEAD(&psli->mboxq_cmpl);
10113         /* Initialize list headers for txq and txcmplq as double linked lists */
10114         for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
10115                 pring = phba->sli4_hba.fcp_wq[i]->pring;
10116                 pring->flag = 0;
10117                 pring->ringno = LPFC_FCP_RING;
10118                 INIT_LIST_HEAD(&pring->txq);
10119                 INIT_LIST_HEAD(&pring->txcmplq);
10120                 INIT_LIST_HEAD(&pring->iocb_continueq);
10121                 spin_lock_init(&pring->ring_lock);
10122         }
10123         for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
10124                 pring = phba->sli4_hba.nvme_wq[i]->pring;
10125                 pring->flag = 0;
10126                 pring->ringno = LPFC_FCP_RING;
10127                 INIT_LIST_HEAD(&pring->txq);
10128                 INIT_LIST_HEAD(&pring->txcmplq);
10129                 INIT_LIST_HEAD(&pring->iocb_continueq);
10130                 spin_lock_init(&pring->ring_lock);
10131         }
10132         pring = phba->sli4_hba.els_wq->pring;
10133         pring->flag = 0;
10134         pring->ringno = LPFC_ELS_RING;
10135         INIT_LIST_HEAD(&pring->txq);
10136         INIT_LIST_HEAD(&pring->txcmplq);
10137         INIT_LIST_HEAD(&pring->iocb_continueq);
10138         spin_lock_init(&pring->ring_lock);
10139
10140         if (phba->cfg_nvme_io_channel) {
10141                 pring = phba->sli4_hba.nvmels_wq->pring;
10142                 pring->flag = 0;
10143                 pring->ringno = LPFC_ELS_RING;
10144                 INIT_LIST_HEAD(&pring->txq);
10145                 INIT_LIST_HEAD(&pring->txcmplq);
10146                 INIT_LIST_HEAD(&pring->iocb_continueq);
10147                 spin_lock_init(&pring->ring_lock);
10148         }
10149
10150         if (phba->cfg_fof) {
10151                 pring = phba->sli4_hba.oas_wq->pring;
10152                 pring->flag = 0;
10153                 pring->ringno = LPFC_FCP_RING;
10154                 INIT_LIST_HEAD(&pring->txq);
10155                 INIT_LIST_HEAD(&pring->txcmplq);
10156                 INIT_LIST_HEAD(&pring->iocb_continueq);
10157                 spin_lock_init(&pring->ring_lock);
10158         }
10159
10160         spin_unlock_irq(&phba->hbalock);
10161 }
10162
10163 /**
10164  * lpfc_sli_queue_init - Queue initialization function
10165  * @phba: Pointer to HBA context object.
10166  *
10167  * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10168  * ring. This function also initializes ring indices of each ring.
10169  * This function is called during the initialization of the SLI
10170  * interface of an HBA.
10171  * This function is called with no lock held and always returns
10172  * 1.
10173  **/
10174 void
10175 lpfc_sli_queue_init(struct lpfc_hba *phba)
10176 {
10177         struct lpfc_sli *psli;
10178         struct lpfc_sli_ring *pring;
10179         int i;
10180
10181         psli = &phba->sli;
10182         spin_lock_irq(&phba->hbalock);
10183         INIT_LIST_HEAD(&psli->mboxq);
10184         INIT_LIST_HEAD(&psli->mboxq_cmpl);
10185         /* Initialize list headers for txq and txcmplq as double linked lists */
10186         for (i = 0; i < psli->num_rings; i++) {
10187                 pring = &psli->sli3_ring[i];
10188                 pring->ringno = i;
10189                 pring->sli.sli3.next_cmdidx  = 0;
10190                 pring->sli.sli3.local_getidx = 0;
10191                 pring->sli.sli3.cmdidx = 0;
10192                 INIT_LIST_HEAD(&pring->iocb_continueq);
10193                 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10194                 INIT_LIST_HEAD(&pring->postbufq);
10195                 pring->flag = 0;
10196                 INIT_LIST_HEAD(&pring->txq);
10197                 INIT_LIST_HEAD(&pring->txcmplq);
10198                 spin_lock_init(&pring->ring_lock);
10199         }
10200         spin_unlock_irq(&phba->hbalock);
10201 }
10202
10203 /**
10204  * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10205  * @phba: Pointer to HBA context object.
10206  *
10207  * This routine flushes the mailbox command subsystem. It will unconditionally
10208  * flush all the mailbox commands in the three possible stages in the mailbox
10209  * command sub-system: pending mailbox command queue; the outstanding mailbox
10210  * command; and completed mailbox command queue. It is caller's responsibility
10211  * to make sure that the driver is in the proper state to flush the mailbox
10212  * command sub-system. Namely, the posting of mailbox commands into the
10213  * pending mailbox command queue from the various clients must be stopped;
10214  * either the HBA is in a state that it will never works on the outstanding
10215  * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10216  * mailbox command has been completed.
10217  **/
10218 static void
10219 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10220 {
10221         LIST_HEAD(completions);
10222         struct lpfc_sli *psli = &phba->sli;
10223         LPFC_MBOXQ_t *pmb;
10224         unsigned long iflag;
10225
10226         /* Flush all the mailbox commands in the mbox system */
10227         spin_lock_irqsave(&phba->hbalock, iflag);
10228         /* The pending mailbox command queue */
10229         list_splice_init(&phba->sli.mboxq, &completions);
10230         /* The outstanding active mailbox command */
10231         if (psli->mbox_active) {
10232                 list_add_tail(&psli->mbox_active->list, &completions);
10233                 psli->mbox_active = NULL;
10234                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10235         }
10236         /* The completed mailbox command queue */
10237         list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10238         spin_unlock_irqrestore(&phba->hbalock, iflag);
10239
10240         /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10241         while (!list_empty(&completions)) {
10242                 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10243                 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10244                 if (pmb->mbox_cmpl)
10245                         pmb->mbox_cmpl(phba, pmb);
10246         }
10247 }
10248
10249 /**
10250  * lpfc_sli_host_down - Vport cleanup function
10251  * @vport: Pointer to virtual port object.
10252  *
10253  * lpfc_sli_host_down is called to clean up the resources
10254  * associated with a vport before destroying virtual
10255  * port data structures.
10256  * This function does following operations:
10257  * - Free discovery resources associated with this virtual
10258  *   port.
10259  * - Free iocbs associated with this virtual port in
10260  *   the txq.
10261  * - Send abort for all iocb commands associated with this
10262  *   vport in txcmplq.
10263  *
10264  * This function is called with no lock held and always returns 1.
10265  **/
10266 int
10267 lpfc_sli_host_down(struct lpfc_vport *vport)
10268 {
10269         LIST_HEAD(completions);
10270         struct lpfc_hba *phba = vport->phba;
10271         struct lpfc_sli *psli = &phba->sli;
10272         struct lpfc_queue *qp = NULL;
10273         struct lpfc_sli_ring *pring;
10274         struct lpfc_iocbq *iocb, *next_iocb;
10275         int i;
10276         unsigned long flags = 0;
10277         uint16_t prev_pring_flag;
10278
10279         lpfc_cleanup_discovery_resources(vport);
10280
10281         spin_lock_irqsave(&phba->hbalock, flags);
10282
10283         /*
10284          * Error everything on the txq since these iocbs
10285          * have not been given to the FW yet.
10286          * Also issue ABTS for everything on the txcmplq
10287          */
10288         if (phba->sli_rev != LPFC_SLI_REV4) {
10289                 for (i = 0; i < psli->num_rings; i++) {
10290                         pring = &psli->sli3_ring[i];
10291                         prev_pring_flag = pring->flag;
10292                         /* Only slow rings */
10293                         if (pring->ringno == LPFC_ELS_RING) {
10294                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10295                                 /* Set the lpfc data pending flag */
10296                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
10297                         }
10298                         list_for_each_entry_safe(iocb, next_iocb,
10299                                                  &pring->txq, list) {
10300                                 if (iocb->vport != vport)
10301                                         continue;
10302                                 list_move_tail(&iocb->list, &completions);
10303                         }
10304                         list_for_each_entry_safe(iocb, next_iocb,
10305                                                  &pring->txcmplq, list) {
10306                                 if (iocb->vport != vport)
10307                                         continue;
10308                                 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10309                         }
10310                         pring->flag = prev_pring_flag;
10311                 }
10312         } else {
10313                 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10314                         pring = qp->pring;
10315                         if (!pring)
10316                                 continue;
10317                         if (pring == phba->sli4_hba.els_wq->pring) {
10318                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10319                                 /* Set the lpfc data pending flag */
10320                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
10321                         }
10322                         prev_pring_flag = pring->flag;
10323                         spin_lock_irq(&pring->ring_lock);
10324                         list_for_each_entry_safe(iocb, next_iocb,
10325                                                  &pring->txq, list) {
10326                                 if (iocb->vport != vport)
10327                                         continue;
10328                                 list_move_tail(&iocb->list, &completions);
10329                         }
10330                         spin_unlock_irq(&pring->ring_lock);
10331                         list_for_each_entry_safe(iocb, next_iocb,
10332                                                  &pring->txcmplq, list) {
10333                                 if (iocb->vport != vport)
10334                                         continue;
10335                                 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10336                         }
10337                         pring->flag = prev_pring_flag;
10338                 }
10339         }
10340         spin_unlock_irqrestore(&phba->hbalock, flags);
10341
10342         /* Cancel all the IOCBs from the completions list */
10343         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10344                               IOERR_SLI_DOWN);
10345         return 1;
10346 }
10347
10348 /**
10349  * lpfc_sli_hba_down - Resource cleanup function for the HBA
10350  * @phba: Pointer to HBA context object.
10351  *
10352  * This function cleans up all iocb, buffers, mailbox commands
10353  * while shutting down the HBA. This function is called with no
10354  * lock held and always returns 1.
10355  * This function does the following to cleanup driver resources:
10356  * - Free discovery resources for each virtual port
10357  * - Cleanup any pending fabric iocbs
10358  * - Iterate through the iocb txq and free each entry
10359  *   in the list.
10360  * - Free up any buffer posted to the HBA
10361  * - Free mailbox commands in the mailbox queue.
10362  **/
10363 int
10364 lpfc_sli_hba_down(struct lpfc_hba *phba)
10365 {
10366         LIST_HEAD(completions);
10367         struct lpfc_sli *psli = &phba->sli;
10368         struct lpfc_queue *qp = NULL;
10369         struct lpfc_sli_ring *pring;
10370         struct lpfc_dmabuf *buf_ptr;
10371         unsigned long flags = 0;
10372         int i;
10373
10374         /* Shutdown the mailbox command sub-system */
10375         lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10376
10377         lpfc_hba_down_prep(phba);
10378
10379         lpfc_fabric_abort_hba(phba);
10380
10381         spin_lock_irqsave(&phba->hbalock, flags);
10382
10383         /*
10384          * Error everything on the txq since these iocbs
10385          * have not been given to the FW yet.
10386          */
10387         if (phba->sli_rev != LPFC_SLI_REV4) {
10388                 for (i = 0; i < psli->num_rings; i++) {
10389                         pring = &psli->sli3_ring[i];
10390                         /* Only slow rings */
10391                         if (pring->ringno == LPFC_ELS_RING) {
10392                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10393                                 /* Set the lpfc data pending flag */
10394                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
10395                         }
10396                         list_splice_init(&pring->txq, &completions);
10397                 }
10398         } else {
10399                 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10400                         pring = qp->pring;
10401                         if (!pring)
10402                                 continue;
10403                         spin_lock_irq(&pring->ring_lock);
10404                         list_splice_init(&pring->txq, &completions);
10405                         spin_unlock_irq(&pring->ring_lock);
10406                         if (pring == phba->sli4_hba.els_wq->pring) {
10407                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10408                                 /* Set the lpfc data pending flag */
10409                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
10410                         }
10411                 }
10412         }
10413         spin_unlock_irqrestore(&phba->hbalock, flags);
10414
10415         /* Cancel all the IOCBs from the completions list */
10416         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10417                               IOERR_SLI_DOWN);
10418
10419         spin_lock_irqsave(&phba->hbalock, flags);
10420         list_splice_init(&phba->elsbuf, &completions);
10421         phba->elsbuf_cnt = 0;
10422         phba->elsbuf_prev_cnt = 0;
10423         spin_unlock_irqrestore(&phba->hbalock, flags);
10424
10425         while (!list_empty(&completions)) {
10426                 list_remove_head(&completions, buf_ptr,
10427                         struct lpfc_dmabuf, list);
10428                 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10429                 kfree(buf_ptr);
10430         }
10431
10432         /* Return any active mbox cmds */
10433         del_timer_sync(&psli->mbox_tmo);
10434
10435         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
10436         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10437         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
10438
10439         return 1;
10440 }
10441
10442 /**
10443  * lpfc_sli_pcimem_bcopy - SLI memory copy function
10444  * @srcp: Source memory pointer.
10445  * @destp: Destination memory pointer.
10446  * @cnt: Number of words required to be copied.
10447  *
10448  * This function is used for copying data between driver memory
10449  * and the SLI memory. This function also changes the endianness
10450  * of each word if native endianness is different from SLI
10451  * endianness. This function can be called with or without
10452  * lock.
10453  **/
10454 void
10455 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10456 {
10457         uint32_t *src = srcp;
10458         uint32_t *dest = destp;
10459         uint32_t ldata;
10460         int i;
10461
10462         for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10463                 ldata = *src;
10464                 ldata = le32_to_cpu(ldata);
10465                 *dest = ldata;
10466                 src++;
10467                 dest++;
10468         }
10469 }
10470
10471
10472 /**
10473  * lpfc_sli_bemem_bcopy - SLI memory copy function
10474  * @srcp: Source memory pointer.
10475  * @destp: Destination memory pointer.
10476  * @cnt: Number of words required to be copied.
10477  *
10478  * This function is used for copying data between a data structure
10479  * with big endian representation to local endianness.
10480  * This function can be called with or without lock.
10481  **/
10482 void
10483 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10484 {
10485         uint32_t *src = srcp;
10486         uint32_t *dest = destp;
10487         uint32_t ldata;
10488         int i;
10489
10490         for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10491                 ldata = *src;
10492                 ldata = be32_to_cpu(ldata);
10493                 *dest = ldata;
10494                 src++;
10495                 dest++;
10496         }
10497 }
10498
10499 /**
10500  * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
10501  * @phba: Pointer to HBA context object.
10502  * @pring: Pointer to driver SLI ring object.
10503  * @mp: Pointer to driver buffer object.
10504  *
10505  * This function is called with no lock held.
10506  * It always return zero after adding the buffer to the postbufq
10507  * buffer list.
10508  **/
10509 int
10510 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10511                          struct lpfc_dmabuf *mp)
10512 {
10513         /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10514            later */
10515         spin_lock_irq(&phba->hbalock);
10516         list_add_tail(&mp->list, &pring->postbufq);
10517         pring->postbufq_cnt++;
10518         spin_unlock_irq(&phba->hbalock);
10519         return 0;
10520 }
10521
10522 /**
10523  * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
10524  * @phba: Pointer to HBA context object.
10525  *
10526  * When HBQ is enabled, buffers are searched based on tags. This function
10527  * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10528  * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10529  * does not conflict with tags of buffer posted for unsolicited events.
10530  * The function returns the allocated tag. The function is called with
10531  * no locks held.
10532  **/
10533 uint32_t
10534 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10535 {
10536         spin_lock_irq(&phba->hbalock);
10537         phba->buffer_tag_count++;
10538         /*
10539          * Always set the QUE_BUFTAG_BIT to distiguish between
10540          * a tag assigned by HBQ.
10541          */
10542         phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10543         spin_unlock_irq(&phba->hbalock);
10544         return phba->buffer_tag_count;
10545 }
10546
10547 /**
10548  * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
10549  * @phba: Pointer to HBA context object.
10550  * @pring: Pointer to driver SLI ring object.
10551  * @tag: Buffer tag.
10552  *
10553  * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10554  * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10555  * iocb is posted to the response ring with the tag of the buffer.
10556  * This function searches the pring->postbufq list using the tag
10557  * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10558  * iocb. If the buffer is found then lpfc_dmabuf object of the
10559  * buffer is returned to the caller else NULL is returned.
10560  * This function is called with no lock held.
10561  **/
10562 struct lpfc_dmabuf *
10563 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10564                         uint32_t tag)
10565 {
10566         struct lpfc_dmabuf *mp, *next_mp;
10567         struct list_head *slp = &pring->postbufq;
10568
10569         /* Search postbufq, from the beginning, looking for a match on tag */
10570         spin_lock_irq(&phba->hbalock);
10571         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10572                 if (mp->buffer_tag == tag) {
10573                         list_del_init(&mp->list);
10574                         pring->postbufq_cnt--;
10575                         spin_unlock_irq(&phba->hbalock);
10576                         return mp;
10577                 }
10578         }
10579
10580         spin_unlock_irq(&phba->hbalock);
10581         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10582                         "0402 Cannot find virtual addr for buffer tag on "
10583                         "ring %d Data x%lx x%p x%p x%x\n",
10584                         pring->ringno, (unsigned long) tag,
10585                         slp->next, slp->prev, pring->postbufq_cnt);
10586
10587         return NULL;
10588 }
10589
10590 /**
10591  * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
10592  * @phba: Pointer to HBA context object.
10593  * @pring: Pointer to driver SLI ring object.
10594  * @phys: DMA address of the buffer.
10595  *
10596  * This function searches the buffer list using the dma_address
10597  * of unsolicited event to find the driver's lpfc_dmabuf object
10598  * corresponding to the dma_address. The function returns the
10599  * lpfc_dmabuf object if a buffer is found else it returns NULL.
10600  * This function is called by the ct and els unsolicited event
10601  * handlers to get the buffer associated with the unsolicited
10602  * event.
10603  *
10604  * This function is called with no lock held.
10605  **/
10606 struct lpfc_dmabuf *
10607 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10608                          dma_addr_t phys)
10609 {
10610         struct lpfc_dmabuf *mp, *next_mp;
10611         struct list_head *slp = &pring->postbufq;
10612
10613         /* Search postbufq, from the beginning, looking for a match on phys */
10614         spin_lock_irq(&phba->hbalock);
10615         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10616                 if (mp->phys == phys) {
10617                         list_del_init(&mp->list);
10618                         pring->postbufq_cnt--;
10619                         spin_unlock_irq(&phba->hbalock);
10620                         return mp;
10621                 }
10622         }
10623
10624         spin_unlock_irq(&phba->hbalock);
10625         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10626                         "0410 Cannot find virtual addr for mapped buf on "
10627                         "ring %d Data x%llx x%p x%p x%x\n",
10628                         pring->ringno, (unsigned long long)phys,
10629                         slp->next, slp->prev, pring->postbufq_cnt);
10630         return NULL;
10631 }
10632
10633 /**
10634  * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
10635  * @phba: Pointer to HBA context object.
10636  * @cmdiocb: Pointer to driver command iocb object.
10637  * @rspiocb: Pointer to driver response iocb object.
10638  *
10639  * This function is the completion handler for the abort iocbs for
10640  * ELS commands. This function is called from the ELS ring event
10641  * handler with no lock held. This function frees memory resources
10642  * associated with the abort iocb.
10643  **/
10644 static void
10645 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10646                         struct lpfc_iocbq *rspiocb)
10647 {
10648         IOCB_t *irsp = &rspiocb->iocb;
10649         uint16_t abort_iotag, abort_context;
10650         struct lpfc_iocbq *abort_iocb = NULL;
10651
10652         if (irsp->ulpStatus) {
10653
10654                 /*
10655                  * Assume that the port already completed and returned, or
10656                  * will return the iocb. Just Log the message.
10657                  */
10658                 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
10659                 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
10660
10661                 spin_lock_irq(&phba->hbalock);
10662                 if (phba->sli_rev < LPFC_SLI_REV4) {
10663                         if (abort_iotag != 0 &&
10664                                 abort_iotag <= phba->sli.last_iotag)
10665                                 abort_iocb =
10666                                         phba->sli.iocbq_lookup[abort_iotag];
10667                 } else
10668                         /* For sli4 the abort_tag is the XRI,
10669                          * so the abort routine puts the iotag  of the iocb
10670                          * being aborted in the context field of the abort
10671                          * IOCB.
10672                          */
10673                         abort_iocb = phba->sli.iocbq_lookup[abort_context];
10674
10675                 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
10676                                 "0327 Cannot abort els iocb %p "
10677                                 "with tag %x context %x, abort status %x, "
10678                                 "abort code %x\n",
10679                                 abort_iocb, abort_iotag, abort_context,
10680                                 irsp->ulpStatus, irsp->un.ulpWord[4]);
10681
10682                 spin_unlock_irq(&phba->hbalock);
10683         }
10684         lpfc_sli_release_iocbq(phba, cmdiocb);
10685         return;
10686 }
10687
10688 /**
10689  * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
10690  * @phba: Pointer to HBA context object.
10691  * @cmdiocb: Pointer to driver command iocb object.
10692  * @rspiocb: Pointer to driver response iocb object.
10693  *
10694  * The function is called from SLI ring event handler with no
10695  * lock held. This function is the completion handler for ELS commands
10696  * which are aborted. The function frees memory resources used for
10697  * the aborted ELS commands.
10698  **/
10699 static void
10700 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10701                      struct lpfc_iocbq *rspiocb)
10702 {
10703         IOCB_t *irsp = &rspiocb->iocb;
10704
10705         /* ELS cmd tag <ulpIoTag> completes */
10706         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10707                         "0139 Ignoring ELS cmd tag x%x completion Data: "
10708                         "x%x x%x x%x\n",
10709                         irsp->ulpIoTag, irsp->ulpStatus,
10710                         irsp->un.ulpWord[4], irsp->ulpTimeout);
10711         if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
10712                 lpfc_ct_free_iocb(phba, cmdiocb);
10713         else
10714                 lpfc_els_free_iocb(phba, cmdiocb);
10715         return;
10716 }
10717
10718 /**
10719  * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
10720  * @phba: Pointer to HBA context object.
10721  * @pring: Pointer to driver SLI ring object.
10722  * @cmdiocb: Pointer to driver command iocb object.
10723  *
10724  * This function issues an abort iocb for the provided command iocb down to
10725  * the port. Other than the case the outstanding command iocb is an abort
10726  * request, this function issues abort out unconditionally. This function is
10727  * called with hbalock held. The function returns 0 when it fails due to
10728  * memory allocation failure or when the command iocb is an abort request.
10729  **/
10730 static int
10731 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10732                            struct lpfc_iocbq *cmdiocb)
10733 {
10734         struct lpfc_vport *vport = cmdiocb->vport;
10735         struct lpfc_iocbq *abtsiocbp;
10736         IOCB_t *icmd = NULL;
10737         IOCB_t *iabt = NULL;
10738         int retval;
10739         unsigned long iflags;
10740
10741         lockdep_assert_held(&phba->hbalock);
10742
10743         /*
10744          * There are certain command types we don't want to abort.  And we
10745          * don't want to abort commands that are already in the process of
10746          * being aborted.
10747          */
10748         icmd = &cmdiocb->iocb;
10749         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10750             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10751             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10752                 return 0;
10753
10754         /* issue ABTS for this IOCB based on iotag */
10755         abtsiocbp = __lpfc_sli_get_iocbq(phba);
10756         if (abtsiocbp == NULL)
10757                 return 0;
10758
10759         /* This signals the response to set the correct status
10760          * before calling the completion handler
10761          */
10762         cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10763
10764         iabt = &abtsiocbp->iocb;
10765         iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
10766         iabt->un.acxri.abortContextTag = icmd->ulpContext;
10767         if (phba->sli_rev == LPFC_SLI_REV4) {
10768                 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
10769                 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
10770         }
10771         else
10772                 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
10773         iabt->ulpLe = 1;
10774         iabt->ulpClass = icmd->ulpClass;
10775
10776         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10777         abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
10778         if (cmdiocb->iocb_flag & LPFC_IO_FCP)
10779                 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
10780         if (cmdiocb->iocb_flag & LPFC_IO_FOF)
10781                 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
10782
10783         if (phba->link_state >= LPFC_LINK_UP)
10784                 iabt->ulpCommand = CMD_ABORT_XRI_CN;
10785         else
10786                 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
10787
10788         abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
10789         abtsiocbp->vport = vport;
10790
10791         lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
10792                          "0339 Abort xri x%x, original iotag x%x, "
10793                          "abort cmd iotag x%x\n",
10794                          iabt->un.acxri.abortIoTag,
10795                          iabt->un.acxri.abortContextTag,
10796                          abtsiocbp->iotag);
10797
10798         if (phba->sli_rev == LPFC_SLI_REV4) {
10799                 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
10800                 if (unlikely(pring == NULL))
10801                         return 0;
10802                 /* Note: both hbalock and ring_lock need to be set here */
10803                 spin_lock_irqsave(&pring->ring_lock, iflags);
10804                 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10805                         abtsiocbp, 0);
10806                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10807         } else {
10808                 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10809                         abtsiocbp, 0);
10810         }
10811
10812         if (retval)
10813                 __lpfc_sli_release_iocbq(phba, abtsiocbp);
10814
10815         /*
10816          * Caller to this routine should check for IOCB_ERROR
10817          * and handle it properly.  This routine no longer removes
10818          * iocb off txcmplq and call compl in case of IOCB_ERROR.
10819          */
10820         return retval;
10821 }
10822
10823 /**
10824  * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
10825  * @phba: Pointer to HBA context object.
10826  * @pring: Pointer to driver SLI ring object.
10827  * @cmdiocb: Pointer to driver command iocb object.
10828  *
10829  * This function issues an abort iocb for the provided command iocb. In case
10830  * of unloading, the abort iocb will not be issued to commands on the ELS
10831  * ring. Instead, the callback function shall be changed to those commands
10832  * so that nothing happens when them finishes. This function is called with
10833  * hbalock held. The function returns 0 when the command iocb is an abort
10834  * request.
10835  **/
10836 int
10837 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10838                            struct lpfc_iocbq *cmdiocb)
10839 {
10840         struct lpfc_vport *vport = cmdiocb->vport;
10841         int retval = IOCB_ERROR;
10842         IOCB_t *icmd = NULL;
10843
10844         lockdep_assert_held(&phba->hbalock);
10845
10846         /*
10847          * There are certain command types we don't want to abort.  And we
10848          * don't want to abort commands that are already in the process of
10849          * being aborted.
10850          */
10851         icmd = &cmdiocb->iocb;
10852         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10853             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10854             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10855                 return 0;
10856
10857         if (!pring) {
10858                 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10859                         cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10860                 else
10861                         cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10862                 goto abort_iotag_exit;
10863         }
10864
10865         /*
10866          * If we're unloading, don't abort iocb on the ELS ring, but change
10867          * the callback so that nothing happens when it finishes.
10868          */
10869         if ((vport->load_flag & FC_UNLOADING) &&
10870             (pring->ringno == LPFC_ELS_RING)) {
10871                 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10872                         cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10873                 else
10874                         cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10875                 goto abort_iotag_exit;
10876         }
10877
10878         /* Now, we try to issue the abort to the cmdiocb out */
10879         retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
10880
10881 abort_iotag_exit:
10882         /*
10883          * Caller to this routine should check for IOCB_ERROR
10884          * and handle it properly.  This routine no longer removes
10885          * iocb off txcmplq and call compl in case of IOCB_ERROR.
10886          */
10887         return retval;
10888 }
10889
10890 /**
10891  * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
10892  * @phba: Pointer to HBA context object.
10893  * @pring: Pointer to driver SLI ring object.
10894  * @cmdiocb: Pointer to driver command iocb object.
10895  *
10896  * This function issues an abort iocb for the provided command iocb down to
10897  * the port. Other than the case the outstanding command iocb is an abort
10898  * request, this function issues abort out unconditionally. This function is
10899  * called with hbalock held. The function returns 0 when it fails due to
10900  * memory allocation failure or when the command iocb is an abort request.
10901  **/
10902 static int
10903 lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10904                         struct lpfc_iocbq *cmdiocb)
10905 {
10906         struct lpfc_vport *vport = cmdiocb->vport;
10907         struct lpfc_iocbq *abtsiocbp;
10908         union lpfc_wqe *abts_wqe;
10909         int retval;
10910
10911         /*
10912          * There are certain command types we don't want to abort.  And we
10913          * don't want to abort commands that are already in the process of
10914          * being aborted.
10915          */
10916         if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10917             cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
10918             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10919                 return 0;
10920
10921         /* issue ABTS for this io based on iotag */
10922         abtsiocbp = __lpfc_sli_get_iocbq(phba);
10923         if (abtsiocbp == NULL)
10924                 return 0;
10925
10926         /* This signals the response to set the correct status
10927          * before calling the completion handler
10928          */
10929         cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10930
10931         /* Complete prepping the abort wqe and issue to the FW. */
10932         abts_wqe = &abtsiocbp->wqe;
10933         bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0);
10934         bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
10935
10936         /* Explicitly set reserved fields to zero.*/
10937         abts_wqe->abort_cmd.rsrvd4 = 0;
10938         abts_wqe->abort_cmd.rsrvd5 = 0;
10939
10940         /* WQE Common - word 6.  Context is XRI tag.  Set 0. */
10941         bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0);
10942         bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0);
10943
10944         /* word 7 */
10945         bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
10946         bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
10947         bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
10948                cmdiocb->iocb.ulpClass);
10949
10950         /* word 8 - tell the FW to abort the IO associated with this
10951          * outstanding exchange ID.
10952          */
10953         abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
10954
10955         /* word 9 - this is the iotag for the abts_wqe completion. */
10956         bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
10957                abtsiocbp->iotag);
10958
10959         /* word 10 */
10960         bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx);
10961         bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
10962         bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
10963
10964         /* word 11 */
10965         bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
10966         bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
10967         bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10968
10969         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10970         abtsiocbp->iocb_flag |= LPFC_IO_NVME;
10971         abtsiocbp->vport = vport;
10972         abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
10973         retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
10974         if (retval) {
10975                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
10976                                  "6147 Failed abts issue_wqe with status x%x "
10977                                  "for oxid x%x\n",
10978                                  retval, cmdiocb->sli4_xritag);
10979                 lpfc_sli_release_iocbq(phba, abtsiocbp);
10980                 return retval;
10981         }
10982
10983         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
10984                          "6148 Drv Abort NVME Request Issued for "
10985                          "ox_id x%x on reqtag x%x\n",
10986                          cmdiocb->sli4_xritag,
10987                          abtsiocbp->iotag);
10988
10989         return retval;
10990 }
10991
10992 /**
10993  * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
10994  * @phba: pointer to lpfc HBA data structure.
10995  *
10996  * This routine will abort all pending and outstanding iocbs to an HBA.
10997  **/
10998 void
10999 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11000 {
11001         struct lpfc_sli *psli = &phba->sli;
11002         struct lpfc_sli_ring *pring;
11003         struct lpfc_queue *qp = NULL;
11004         int i;
11005
11006         if (phba->sli_rev != LPFC_SLI_REV4) {
11007                 for (i = 0; i < psli->num_rings; i++) {
11008                         pring = &psli->sli3_ring[i];
11009                         lpfc_sli_abort_iocb_ring(phba, pring);
11010                 }
11011                 return;
11012         }
11013         list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11014                 pring = qp->pring;
11015                 if (!pring)
11016                         continue;
11017                 lpfc_sli_abort_iocb_ring(phba, pring);
11018         }
11019 }
11020
11021 /**
11022  * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11023  * @iocbq: Pointer to driver iocb object.
11024  * @vport: Pointer to driver virtual port object.
11025  * @tgt_id: SCSI ID of the target.
11026  * @lun_id: LUN ID of the scsi device.
11027  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11028  *
11029  * This function acts as an iocb filter for functions which abort or count
11030  * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11031  * 0 if the filtering criteria is met for the given iocb and will return
11032  * 1 if the filtering criteria is not met.
11033  * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11034  * given iocb is for the SCSI device specified by vport, tgt_id and
11035  * lun_id parameter.
11036  * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
11037  * given iocb is for the SCSI target specified by vport and tgt_id
11038  * parameters.
11039  * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11040  * given iocb is for the SCSI host associated with the given vport.
11041  * This function is called with no locks held.
11042  **/
11043 static int
11044 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11045                            uint16_t tgt_id, uint64_t lun_id,
11046                            lpfc_ctx_cmd ctx_cmd)
11047 {
11048         struct lpfc_scsi_buf *lpfc_cmd;
11049         int rc = 1;
11050
11051         if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
11052                 return rc;
11053
11054         if (iocbq->vport != vport)
11055                 return rc;
11056
11057         lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
11058
11059         if (lpfc_cmd->pCmd == NULL)
11060                 return rc;
11061
11062         switch (ctx_cmd) {
11063         case LPFC_CTX_LUN:
11064                 if ((lpfc_cmd->rdata->pnode) &&
11065                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11066                     (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11067                         rc = 0;
11068                 break;
11069         case LPFC_CTX_TGT:
11070                 if ((lpfc_cmd->rdata->pnode) &&
11071                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11072                         rc = 0;
11073                 break;
11074         case LPFC_CTX_HOST:
11075                 rc = 0;
11076                 break;
11077         default:
11078                 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11079                         __func__, ctx_cmd);
11080                 break;
11081         }
11082
11083         return rc;
11084 }
11085
11086 /**
11087  * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11088  * @vport: Pointer to virtual port.
11089  * @tgt_id: SCSI ID of the target.
11090  * @lun_id: LUN ID of the scsi device.
11091  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11092  *
11093  * This function returns number of FCP commands pending for the vport.
11094  * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11095  * commands pending on the vport associated with SCSI device specified
11096  * by tgt_id and lun_id parameters.
11097  * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11098  * commands pending on the vport associated with SCSI target specified
11099  * by tgt_id parameter.
11100  * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11101  * commands pending on the vport.
11102  * This function returns the number of iocbs which satisfy the filter.
11103  * This function is called without any lock held.
11104  **/
11105 int
11106 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11107                   lpfc_ctx_cmd ctx_cmd)
11108 {
11109         struct lpfc_hba *phba = vport->phba;
11110         struct lpfc_iocbq *iocbq;
11111         int sum, i;
11112
11113         spin_lock_irq(&phba->hbalock);
11114         for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11115                 iocbq = phba->sli.iocbq_lookup[i];
11116
11117                 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11118                                                 ctx_cmd) == 0)
11119                         sum++;
11120         }
11121         spin_unlock_irq(&phba->hbalock);
11122
11123         return sum;
11124 }
11125
11126 /**
11127  * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11128  * @phba: Pointer to HBA context object
11129  * @cmdiocb: Pointer to command iocb object.
11130  * @rspiocb: Pointer to response iocb object.
11131  *
11132  * This function is called when an aborted FCP iocb completes. This
11133  * function is called by the ring event handler with no lock held.
11134  * This function frees the iocb.
11135  **/
11136 void
11137 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11138                         struct lpfc_iocbq *rspiocb)
11139 {
11140         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11141                         "3096 ABORT_XRI_CN completing on rpi x%x "
11142                         "original iotag x%x, abort cmd iotag x%x "
11143                         "status 0x%x, reason 0x%x\n",
11144                         cmdiocb->iocb.un.acxri.abortContextTag,
11145                         cmdiocb->iocb.un.acxri.abortIoTag,
11146                         cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11147                         rspiocb->iocb.un.ulpWord[4]);
11148         lpfc_sli_release_iocbq(phba, cmdiocb);
11149         return;
11150 }
11151
11152 /**
11153  * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11154  * @vport: Pointer to virtual port.
11155  * @pring: Pointer to driver SLI ring object.
11156  * @tgt_id: SCSI ID of the target.
11157  * @lun_id: LUN ID of the scsi device.
11158  * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11159  *
11160  * This function sends an abort command for every SCSI command
11161  * associated with the given virtual port pending on the ring
11162  * filtered by lpfc_sli_validate_fcp_iocb function.
11163  * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11164  * FCP iocbs associated with lun specified by tgt_id and lun_id
11165  * parameters
11166  * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11167  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11168  * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11169  * FCP iocbs associated with virtual port.
11170  * This function returns number of iocbs it failed to abort.
11171  * This function is called with no locks held.
11172  **/
11173 int
11174 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11175                     uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11176 {
11177         struct lpfc_hba *phba = vport->phba;
11178         struct lpfc_iocbq *iocbq;
11179         struct lpfc_iocbq *abtsiocb;
11180         struct lpfc_sli_ring *pring_s4;
11181         IOCB_t *cmd = NULL;
11182         int errcnt = 0, ret_val = 0;
11183         int i;
11184
11185         for (i = 1; i <= phba->sli.last_iotag; i++) {
11186                 iocbq = phba->sli.iocbq_lookup[i];
11187
11188                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11189                                                abort_cmd) != 0)
11190                         continue;
11191
11192                 /*
11193                  * If the iocbq is already being aborted, don't take a second
11194                  * action, but do count it.
11195                  */
11196                 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11197                         continue;
11198
11199                 /* issue ABTS for this IOCB based on iotag */
11200                 abtsiocb = lpfc_sli_get_iocbq(phba);
11201                 if (abtsiocb == NULL) {
11202                         errcnt++;
11203                         continue;
11204                 }
11205
11206                 /* indicate the IO is being aborted by the driver. */
11207                 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11208
11209                 cmd = &iocbq->iocb;
11210                 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11211                 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11212                 if (phba->sli_rev == LPFC_SLI_REV4)
11213                         abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11214                 else
11215                         abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11216                 abtsiocb->iocb.ulpLe = 1;
11217                 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11218                 abtsiocb->vport = vport;
11219
11220                 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11221                 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11222                 if (iocbq->iocb_flag & LPFC_IO_FCP)
11223                         abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11224                 if (iocbq->iocb_flag & LPFC_IO_FOF)
11225                         abtsiocb->iocb_flag |= LPFC_IO_FOF;
11226
11227                 if (lpfc_is_link_up(phba))
11228                         abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11229                 else
11230                         abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11231
11232                 /* Setup callback routine and issue the command. */
11233                 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11234                 if (phba->sli_rev == LPFC_SLI_REV4) {
11235                         pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11236                         if (!pring_s4)
11237                                 continue;
11238                         ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11239                                                       abtsiocb, 0);
11240                 } else
11241                         ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11242                                                       abtsiocb, 0);
11243                 if (ret_val == IOCB_ERROR) {
11244                         lpfc_sli_release_iocbq(phba, abtsiocb);
11245                         errcnt++;
11246                         continue;
11247                 }
11248         }
11249
11250         return errcnt;
11251 }
11252
11253 /**
11254  * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11255  * @vport: Pointer to virtual port.
11256  * @pring: Pointer to driver SLI ring object.
11257  * @tgt_id: SCSI ID of the target.
11258  * @lun_id: LUN ID of the scsi device.
11259  * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11260  *
11261  * This function sends an abort command for every SCSI command
11262  * associated with the given virtual port pending on the ring
11263  * filtered by lpfc_sli_validate_fcp_iocb function.
11264  * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11265  * FCP iocbs associated with lun specified by tgt_id and lun_id
11266  * parameters
11267  * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11268  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11269  * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11270  * FCP iocbs associated with virtual port.
11271  * This function returns number of iocbs it aborted .
11272  * This function is called with no locks held right after a taskmgmt
11273  * command is sent.
11274  **/
11275 int
11276 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11277                         uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11278 {
11279         struct lpfc_hba *phba = vport->phba;
11280         struct lpfc_scsi_buf *lpfc_cmd;
11281         struct lpfc_iocbq *abtsiocbq;
11282         struct lpfc_nodelist *ndlp;
11283         struct lpfc_iocbq *iocbq;
11284         IOCB_t *icmd;
11285         int sum, i, ret_val;
11286         unsigned long iflags;
11287         struct lpfc_sli_ring *pring_s4;
11288
11289         spin_lock_irq(&phba->hbalock);
11290
11291         /* all I/Os are in process of being flushed */
11292         if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
11293                 spin_unlock_irq(&phba->hbalock);
11294                 return 0;
11295         }
11296         sum = 0;
11297
11298         for (i = 1; i <= phba->sli.last_iotag; i++) {
11299                 iocbq = phba->sli.iocbq_lookup[i];
11300
11301                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11302                                                cmd) != 0)
11303                         continue;
11304
11305                 /*
11306                  * If the iocbq is already being aborted, don't take a second
11307                  * action, but do count it.
11308                  */
11309                 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11310                         continue;
11311
11312                 /* issue ABTS for this IOCB based on iotag */
11313                 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11314                 if (abtsiocbq == NULL)
11315                         continue;
11316
11317                 icmd = &iocbq->iocb;
11318                 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11319                 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11320                 if (phba->sli_rev == LPFC_SLI_REV4)
11321                         abtsiocbq->iocb.un.acxri.abortIoTag =
11322                                                          iocbq->sli4_xritag;
11323                 else
11324                         abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11325                 abtsiocbq->iocb.ulpLe = 1;
11326                 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11327                 abtsiocbq->vport = vport;
11328
11329                 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11330                 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11331                 if (iocbq->iocb_flag & LPFC_IO_FCP)
11332                         abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11333                 if (iocbq->iocb_flag & LPFC_IO_FOF)
11334                         abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11335
11336                 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
11337                 ndlp = lpfc_cmd->rdata->pnode;
11338
11339                 if (lpfc_is_link_up(phba) &&
11340                     (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11341                         abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11342                 else
11343                         abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11344
11345                 /* Setup callback routine and issue the command. */
11346                 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11347
11348                 /*
11349                  * Indicate the IO is being aborted by the driver and set
11350                  * the caller's flag into the aborted IO.
11351                  */
11352                 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11353
11354                 if (phba->sli_rev == LPFC_SLI_REV4) {
11355                         pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11356                         if (pring_s4 == NULL)
11357                                 continue;
11358                         /* Note: both hbalock and ring_lock must be set here */
11359                         spin_lock_irqsave(&pring_s4->ring_lock, iflags);
11360                         ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11361                                                         abtsiocbq, 0);
11362                         spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
11363                 } else {
11364                         ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11365                                                         abtsiocbq, 0);
11366                 }
11367
11368
11369                 if (ret_val == IOCB_ERROR)
11370                         __lpfc_sli_release_iocbq(phba, abtsiocbq);
11371                 else
11372                         sum++;
11373         }
11374         spin_unlock_irq(&phba->hbalock);
11375         return sum;
11376 }
11377
11378 /**
11379  * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
11380  * @phba: Pointer to HBA context object.
11381  * @cmdiocbq: Pointer to command iocb.
11382  * @rspiocbq: Pointer to response iocb.
11383  *
11384  * This function is the completion handler for iocbs issued using
11385  * lpfc_sli_issue_iocb_wait function. This function is called by the
11386  * ring event handler function without any lock held. This function
11387  * can be called from both worker thread context and interrupt
11388  * context. This function also can be called from other thread which
11389  * cleans up the SLI layer objects.
11390  * This function copy the contents of the response iocb to the
11391  * response iocb memory object provided by the caller of
11392  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11393  * sleeps for the iocb completion.
11394  **/
11395 static void
11396 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11397                         struct lpfc_iocbq *cmdiocbq,
11398                         struct lpfc_iocbq *rspiocbq)
11399 {
11400         wait_queue_head_t *pdone_q;
11401         unsigned long iflags;
11402         struct lpfc_scsi_buf *lpfc_cmd;
11403
11404         spin_lock_irqsave(&phba->hbalock, iflags);
11405         if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11406
11407                 /*
11408                  * A time out has occurred for the iocb.  If a time out
11409                  * completion handler has been supplied, call it.  Otherwise,
11410                  * just free the iocbq.
11411                  */
11412
11413                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11414                 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11415                 cmdiocbq->wait_iocb_cmpl = NULL;
11416                 if (cmdiocbq->iocb_cmpl)
11417                         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11418                 else
11419                         lpfc_sli_release_iocbq(phba, cmdiocbq);
11420                 return;
11421         }
11422
11423         cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11424         if (cmdiocbq->context2 && rspiocbq)
11425                 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11426                        &rspiocbq->iocb, sizeof(IOCB_t));
11427
11428         /* Set the exchange busy flag for task management commands */
11429         if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11430                 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11431                 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
11432                         cur_iocbq);
11433                 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11434         }
11435
11436         pdone_q = cmdiocbq->context_un.wait_queue;
11437         if (pdone_q)
11438                 wake_up(pdone_q);
11439         spin_unlock_irqrestore(&phba->hbalock, iflags);
11440         return;
11441 }
11442
11443 /**
11444  * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11445  * @phba: Pointer to HBA context object..
11446  * @piocbq: Pointer to command iocb.
11447  * @flag: Flag to test.
11448  *
11449  * This routine grabs the hbalock and then test the iocb_flag to
11450  * see if the passed in flag is set.
11451  * Returns:
11452  * 1 if flag is set.
11453  * 0 if flag is not set.
11454  **/
11455 static int
11456 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11457                  struct lpfc_iocbq *piocbq, uint32_t flag)
11458 {
11459         unsigned long iflags;
11460         int ret;
11461
11462         spin_lock_irqsave(&phba->hbalock, iflags);
11463         ret = piocbq->iocb_flag & flag;
11464         spin_unlock_irqrestore(&phba->hbalock, iflags);
11465         return ret;
11466
11467 }
11468
11469 /**
11470  * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
11471  * @phba: Pointer to HBA context object..
11472  * @pring: Pointer to sli ring.
11473  * @piocb: Pointer to command iocb.
11474  * @prspiocbq: Pointer to response iocb.
11475  * @timeout: Timeout in number of seconds.
11476  *
11477  * This function issues the iocb to firmware and waits for the
11478  * iocb to complete. The iocb_cmpl field of the shall be used
11479  * to handle iocbs which time out. If the field is NULL, the
11480  * function shall free the iocbq structure.  If more clean up is
11481  * needed, the caller is expected to provide a completion function
11482  * that will provide the needed clean up.  If the iocb command is
11483  * not completed within timeout seconds, the function will either
11484  * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11485  * completion function set in the iocb_cmpl field and then return
11486  * a status of IOCB_TIMEDOUT.  The caller should not free the iocb
11487  * resources if this function returns IOCB_TIMEDOUT.
11488  * The function waits for the iocb completion using an
11489  * non-interruptible wait.
11490  * This function will sleep while waiting for iocb completion.
11491  * So, this function should not be called from any context which
11492  * does not allow sleeping. Due to the same reason, this function
11493  * cannot be called with interrupt disabled.
11494  * This function assumes that the iocb completions occur while
11495  * this function sleep. So, this function cannot be called from
11496  * the thread which process iocb completion for this ring.
11497  * This function clears the iocb_flag of the iocb object before
11498  * issuing the iocb and the iocb completion handler sets this
11499  * flag and wakes this thread when the iocb completes.
11500  * The contents of the response iocb will be copied to prspiocbq
11501  * by the completion handler when the command completes.
11502  * This function returns IOCB_SUCCESS when success.
11503  * This function is called with no lock held.
11504  **/
11505 int
11506 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
11507                          uint32_t ring_number,
11508                          struct lpfc_iocbq *piocb,
11509                          struct lpfc_iocbq *prspiocbq,
11510                          uint32_t timeout)
11511 {
11512         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11513         long timeleft, timeout_req = 0;
11514         int retval = IOCB_SUCCESS;
11515         uint32_t creg_val;
11516         struct lpfc_iocbq *iocb;
11517         int txq_cnt = 0;
11518         int txcmplq_cnt = 0;
11519         struct lpfc_sli_ring *pring;
11520         unsigned long iflags;
11521         bool iocb_completed = true;
11522
11523         if (phba->sli_rev >= LPFC_SLI_REV4)
11524                 pring = lpfc_sli4_calc_ring(phba, piocb);
11525         else
11526                 pring = &phba->sli.sli3_ring[ring_number];
11527         /*
11528          * If the caller has provided a response iocbq buffer, then context2
11529          * is NULL or its an error.
11530          */
11531         if (prspiocbq) {
11532                 if (piocb->context2)
11533                         return IOCB_ERROR;
11534                 piocb->context2 = prspiocbq;
11535         }
11536
11537         piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
11538         piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11539         piocb->context_un.wait_queue = &done_q;
11540         piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
11541
11542         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11543                 if (lpfc_readl(phba->HCregaddr, &creg_val))
11544                         return IOCB_ERROR;
11545                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11546                 writel(creg_val, phba->HCregaddr);
11547                 readl(phba->HCregaddr); /* flush */
11548         }
11549
11550         retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11551                                      SLI_IOCB_RET_IOCB);
11552         if (retval == IOCB_SUCCESS) {
11553                 timeout_req = msecs_to_jiffies(timeout * 1000);
11554                 timeleft = wait_event_timeout(done_q,
11555                                 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
11556                                 timeout_req);
11557                 spin_lock_irqsave(&phba->hbalock, iflags);
11558                 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11559
11560                         /*
11561                          * IOCB timed out.  Inform the wake iocb wait
11562                          * completion function and set local status
11563                          */
11564
11565                         iocb_completed = false;
11566                         piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11567                 }
11568                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11569                 if (iocb_completed) {
11570                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11571                                         "0331 IOCB wake signaled\n");
11572                         /* Note: we are not indicating if the IOCB has a success
11573                          * status or not - that's for the caller to check.
11574                          * IOCB_SUCCESS means just that the command was sent and
11575                          * completed. Not that it completed successfully.
11576                          * */
11577                 } else if (timeleft == 0) {
11578                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11579                                         "0338 IOCB wait timeout error - no "
11580                                         "wake response Data x%x\n", timeout);
11581                         retval = IOCB_TIMEDOUT;
11582                 } else {
11583                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11584                                         "0330 IOCB wake NOT set, "
11585                                         "Data x%x x%lx\n",
11586                                         timeout, (timeleft / jiffies));
11587                         retval = IOCB_TIMEDOUT;
11588                 }
11589         } else if (retval == IOCB_BUSY) {
11590                 if (phba->cfg_log_verbose & LOG_SLI) {
11591                         list_for_each_entry(iocb, &pring->txq, list) {
11592                                 txq_cnt++;
11593                         }
11594                         list_for_each_entry(iocb, &pring->txcmplq, list) {
11595                                 txcmplq_cnt++;
11596                         }
11597                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11598                                 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11599                                 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
11600                 }
11601                 return retval;
11602         } else {
11603                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11604                                 "0332 IOCB wait issue failed, Data x%x\n",
11605                                 retval);
11606                 retval = IOCB_ERROR;
11607         }
11608
11609         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11610                 if (lpfc_readl(phba->HCregaddr, &creg_val))
11611                         return IOCB_ERROR;
11612                 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
11613                 writel(creg_val, phba->HCregaddr);
11614                 readl(phba->HCregaddr); /* flush */
11615         }
11616
11617         if (prspiocbq)
11618                 piocb->context2 = NULL;
11619
11620         piocb->context_un.wait_queue = NULL;
11621         piocb->iocb_cmpl = NULL;
11622         return retval;
11623 }
11624
11625 /**
11626  * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
11627  * @phba: Pointer to HBA context object.
11628  * @pmboxq: Pointer to driver mailbox object.
11629  * @timeout: Timeout in number of seconds.
11630  *
11631  * This function issues the mailbox to firmware and waits for the
11632  * mailbox command to complete. If the mailbox command is not
11633  * completed within timeout seconds, it returns MBX_TIMEOUT.
11634  * The function waits for the mailbox completion using an
11635  * interruptible wait. If the thread is woken up due to a
11636  * signal, MBX_TIMEOUT error is returned to the caller. Caller
11637  * should not free the mailbox resources, if this function returns
11638  * MBX_TIMEOUT.
11639  * This function will sleep while waiting for mailbox completion.
11640  * So, this function should not be called from any context which
11641  * does not allow sleeping. Due to the same reason, this function
11642  * cannot be called with interrupt disabled.
11643  * This function assumes that the mailbox completion occurs while
11644  * this function sleep. So, this function cannot be called from
11645  * the worker thread which processes mailbox completion.
11646  * This function is called in the context of HBA management
11647  * applications.
11648  * This function returns MBX_SUCCESS when successful.
11649  * This function is called with no lock held.
11650  **/
11651 int
11652 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
11653                          uint32_t timeout)
11654 {
11655         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11656         MAILBOX_t *mb = NULL;
11657         int retval;
11658         unsigned long flag;
11659
11660         /* The caller might set context1 for extended buffer */
11661         if (pmboxq->context1)
11662                 mb = (MAILBOX_t *)pmboxq->context1;
11663
11664         pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
11665         /* setup wake call as IOCB callback */
11666         pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
11667         /* setup context field to pass wait_queue pointer to wake function  */
11668         pmboxq->context1 = &done_q;
11669
11670         /* now issue the command */
11671         retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
11672         if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
11673                 wait_event_interruptible_timeout(done_q,
11674                                 pmboxq->mbox_flag & LPFC_MBX_WAKE,
11675                                 msecs_to_jiffies(timeout * 1000));
11676
11677                 spin_lock_irqsave(&phba->hbalock, flag);
11678                 /* restore the possible extended buffer for free resource */
11679                 pmboxq->context1 = (uint8_t *)mb;
11680                 /*
11681                  * if LPFC_MBX_WAKE flag is set the mailbox is completed
11682                  * else do not free the resources.
11683                  */
11684                 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
11685                         retval = MBX_SUCCESS;
11686                 } else {
11687                         retval = MBX_TIMEOUT;
11688                         pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
11689                 }
11690                 spin_unlock_irqrestore(&phba->hbalock, flag);
11691         } else {
11692                 /* restore the possible extended buffer for free resource */
11693                 pmboxq->context1 = (uint8_t *)mb;
11694         }
11695
11696         return retval;
11697 }
11698
11699 /**
11700  * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
11701  * @phba: Pointer to HBA context.
11702  *
11703  * This function is called to shutdown the driver's mailbox sub-system.
11704  * It first marks the mailbox sub-system is in a block state to prevent
11705  * the asynchronous mailbox command from issued off the pending mailbox
11706  * command queue. If the mailbox command sub-system shutdown is due to
11707  * HBA error conditions such as EEH or ERATT, this routine shall invoke
11708  * the mailbox sub-system flush routine to forcefully bring down the
11709  * mailbox sub-system. Otherwise, if it is due to normal condition (such
11710  * as with offline or HBA function reset), this routine will wait for the
11711  * outstanding mailbox command to complete before invoking the mailbox
11712  * sub-system flush routine to gracefully bring down mailbox sub-system.
11713  **/
11714 void
11715 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
11716 {
11717         struct lpfc_sli *psli = &phba->sli;
11718         unsigned long timeout;
11719
11720         if (mbx_action == LPFC_MBX_NO_WAIT) {
11721                 /* delay 100ms for port state */
11722                 msleep(100);
11723                 lpfc_sli_mbox_sys_flush(phba);
11724                 return;
11725         }
11726         timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
11727
11728         spin_lock_irq(&phba->hbalock);
11729         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
11730
11731         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
11732                 /* Determine how long we might wait for the active mailbox
11733                  * command to be gracefully completed by firmware.
11734                  */
11735                 if (phba->sli.mbox_active)
11736                         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
11737                                                 phba->sli.mbox_active) *
11738                                                 1000) + jiffies;
11739                 spin_unlock_irq(&phba->hbalock);
11740
11741                 while (phba->sli.mbox_active) {
11742                         /* Check active mailbox complete status every 2ms */
11743                         msleep(2);
11744                         if (time_after(jiffies, timeout))
11745                                 /* Timeout, let the mailbox flush routine to
11746                                  * forcefully release active mailbox command
11747                                  */
11748                                 break;
11749                 }
11750         } else
11751                 spin_unlock_irq(&phba->hbalock);
11752
11753         lpfc_sli_mbox_sys_flush(phba);
11754 }
11755
11756 /**
11757  * lpfc_sli_eratt_read - read sli-3 error attention events
11758  * @phba: Pointer to HBA context.
11759  *
11760  * This function is called to read the SLI3 device error attention registers
11761  * for possible error attention events. The caller must hold the hostlock
11762  * with spin_lock_irq().
11763  *
11764  * This function returns 1 when there is Error Attention in the Host Attention
11765  * Register and returns 0 otherwise.
11766  **/
11767 static int
11768 lpfc_sli_eratt_read(struct lpfc_hba *phba)
11769 {
11770         uint32_t ha_copy;
11771
11772         /* Read chip Host Attention (HA) register */
11773         if (lpfc_readl(phba->HAregaddr, &ha_copy))
11774                 goto unplug_err;
11775
11776         if (ha_copy & HA_ERATT) {
11777                 /* Read host status register to retrieve error event */
11778                 if (lpfc_sli_read_hs(phba))
11779                         goto unplug_err;
11780
11781                 /* Check if there is a deferred error condition is active */
11782                 if ((HS_FFER1 & phba->work_hs) &&
11783                     ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
11784                       HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
11785                         phba->hba_flag |= DEFER_ERATT;
11786                         /* Clear all interrupt enable conditions */
11787                         writel(0, phba->HCregaddr);
11788                         readl(phba->HCregaddr);
11789                 }
11790
11791                 /* Set the driver HA work bitmap */
11792                 phba->work_ha |= HA_ERATT;
11793                 /* Indicate polling handles this ERATT */
11794                 phba->hba_flag |= HBA_ERATT_HANDLED;
11795                 return 1;
11796         }
11797         return 0;
11798
11799 unplug_err:
11800         /* Set the driver HS work bitmap */
11801         phba->work_hs |= UNPLUG_ERR;
11802         /* Set the driver HA work bitmap */
11803         phba->work_ha |= HA_ERATT;
11804         /* Indicate polling handles this ERATT */
11805         phba->hba_flag |= HBA_ERATT_HANDLED;
11806         return 1;
11807 }
11808
11809 /**
11810  * lpfc_sli4_eratt_read - read sli-4 error attention events
11811  * @phba: Pointer to HBA context.
11812  *
11813  * This function is called to read the SLI4 device error attention registers
11814  * for possible error attention events. The caller must hold the hostlock
11815  * with spin_lock_irq().
11816  *
11817  * This function returns 1 when there is Error Attention in the Host Attention
11818  * Register and returns 0 otherwise.
11819  **/
11820 static int
11821 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
11822 {
11823         uint32_t uerr_sta_hi, uerr_sta_lo;
11824         uint32_t if_type, portsmphr;
11825         struct lpfc_register portstat_reg;
11826
11827         /*
11828          * For now, use the SLI4 device internal unrecoverable error
11829          * registers for error attention. This can be changed later.
11830          */
11831         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11832         switch (if_type) {
11833         case LPFC_SLI_INTF_IF_TYPE_0:
11834                 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
11835                         &uerr_sta_lo) ||
11836                         lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
11837                         &uerr_sta_hi)) {
11838                         phba->work_hs |= UNPLUG_ERR;
11839                         phba->work_ha |= HA_ERATT;
11840                         phba->hba_flag |= HBA_ERATT_HANDLED;
11841                         return 1;
11842                 }
11843                 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
11844                     (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
11845                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11846                                         "1423 HBA Unrecoverable error: "
11847                                         "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
11848                                         "ue_mask_lo_reg=0x%x, "
11849                                         "ue_mask_hi_reg=0x%x\n",
11850                                         uerr_sta_lo, uerr_sta_hi,
11851                                         phba->sli4_hba.ue_mask_lo,
11852                                         phba->sli4_hba.ue_mask_hi);
11853                         phba->work_status[0] = uerr_sta_lo;
11854                         phba->work_status[1] = uerr_sta_hi;
11855                         phba->work_ha |= HA_ERATT;
11856                         phba->hba_flag |= HBA_ERATT_HANDLED;
11857                         return 1;
11858                 }
11859                 break;
11860         case LPFC_SLI_INTF_IF_TYPE_2:
11861         case LPFC_SLI_INTF_IF_TYPE_6:
11862                 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
11863                         &portstat_reg.word0) ||
11864                         lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
11865                         &portsmphr)){
11866                         phba->work_hs |= UNPLUG_ERR;
11867                         phba->work_ha |= HA_ERATT;
11868                         phba->hba_flag |= HBA_ERATT_HANDLED;
11869                         return 1;
11870                 }
11871                 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
11872                         phba->work_status[0] =
11873                                 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
11874                         phba->work_status[1] =
11875                                 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
11876                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11877                                         "2885 Port Status Event: "
11878                                         "port status reg 0x%x, "
11879                                         "port smphr reg 0x%x, "
11880                                         "error 1=0x%x, error 2=0x%x\n",
11881                                         portstat_reg.word0,
11882                                         portsmphr,
11883                                         phba->work_status[0],
11884                                         phba->work_status[1]);
11885                         phba->work_ha |= HA_ERATT;
11886                         phba->hba_flag |= HBA_ERATT_HANDLED;
11887                         return 1;
11888                 }
11889                 break;
11890         case LPFC_SLI_INTF_IF_TYPE_1:
11891         default:
11892                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11893                                 "2886 HBA Error Attention on unsupported "
11894                                 "if type %d.", if_type);
11895                 return 1;
11896         }
11897
11898         return 0;
11899 }
11900
11901 /**
11902  * lpfc_sli_check_eratt - check error attention events
11903  * @phba: Pointer to HBA context.
11904  *
11905  * This function is called from timer soft interrupt context to check HBA's
11906  * error attention register bit for error attention events.
11907  *
11908  * This function returns 1 when there is Error Attention in the Host Attention
11909  * Register and returns 0 otherwise.
11910  **/
11911 int
11912 lpfc_sli_check_eratt(struct lpfc_hba *phba)
11913 {
11914         uint32_t ha_copy;
11915
11916         /* If somebody is waiting to handle an eratt, don't process it
11917          * here. The brdkill function will do this.
11918          */
11919         if (phba->link_flag & LS_IGNORE_ERATT)
11920                 return 0;
11921
11922         /* Check if interrupt handler handles this ERATT */
11923         spin_lock_irq(&phba->hbalock);
11924         if (phba->hba_flag & HBA_ERATT_HANDLED) {
11925                 /* Interrupt handler has handled ERATT */
11926                 spin_unlock_irq(&phba->hbalock);
11927                 return 0;
11928         }
11929
11930         /*
11931          * If there is deferred error attention, do not check for error
11932          * attention
11933          */
11934         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11935                 spin_unlock_irq(&phba->hbalock);
11936                 return 0;
11937         }
11938
11939         /* If PCI channel is offline, don't process it */
11940         if (unlikely(pci_channel_offline(phba->pcidev))) {
11941                 spin_unlock_irq(&phba->hbalock);
11942                 return 0;
11943         }
11944
11945         switch (phba->sli_rev) {
11946         case LPFC_SLI_REV2:
11947         case LPFC_SLI_REV3:
11948                 /* Read chip Host Attention (HA) register */
11949                 ha_copy = lpfc_sli_eratt_read(phba);
11950                 break;
11951         case LPFC_SLI_REV4:
11952                 /* Read device Uncoverable Error (UERR) registers */
11953                 ha_copy = lpfc_sli4_eratt_read(phba);
11954                 break;
11955         default:
11956                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11957                                 "0299 Invalid SLI revision (%d)\n",
11958                                 phba->sli_rev);
11959                 ha_copy = 0;
11960                 break;
11961         }
11962         spin_unlock_irq(&phba->hbalock);
11963
11964         return ha_copy;
11965 }
11966
11967 /**
11968  * lpfc_intr_state_check - Check device state for interrupt handling
11969  * @phba: Pointer to HBA context.
11970  *
11971  * This inline routine checks whether a device or its PCI slot is in a state
11972  * that the interrupt should be handled.
11973  *
11974  * This function returns 0 if the device or the PCI slot is in a state that
11975  * interrupt should be handled, otherwise -EIO.
11976  */
11977 static inline int
11978 lpfc_intr_state_check(struct lpfc_hba *phba)
11979 {
11980         /* If the pci channel is offline, ignore all the interrupts */
11981         if (unlikely(pci_channel_offline(phba->pcidev)))
11982                 return -EIO;
11983
11984         /* Update device level interrupt statistics */
11985         phba->sli.slistat.sli_intr++;
11986
11987         /* Ignore all interrupts during initialization. */
11988         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
11989                 return -EIO;
11990
11991         return 0;
11992 }
11993
11994 /**
11995  * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
11996  * @irq: Interrupt number.
11997  * @dev_id: The device context pointer.
11998  *
11999  * This function is directly called from the PCI layer as an interrupt
12000  * service routine when device with SLI-3 interface spec is enabled with
12001  * MSI-X multi-message interrupt mode and there are slow-path events in
12002  * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12003  * interrupt mode, this function is called as part of the device-level
12004  * interrupt handler. When the PCI slot is in error recovery or the HBA
12005  * is undergoing initialization, the interrupt handler will not process
12006  * the interrupt. The link attention and ELS ring attention events are
12007  * handled by the worker thread. The interrupt handler signals the worker
12008  * thread and returns for these events. This function is called without
12009  * any lock held. It gets the hbalock to access and update SLI data
12010  * structures.
12011  *
12012  * This function returns IRQ_HANDLED when interrupt is handled else it
12013  * returns IRQ_NONE.
12014  **/
12015 irqreturn_t
12016 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12017 {
12018         struct lpfc_hba  *phba;
12019         uint32_t ha_copy, hc_copy;
12020         uint32_t work_ha_copy;
12021         unsigned long status;
12022         unsigned long iflag;
12023         uint32_t control;
12024
12025         MAILBOX_t *mbox, *pmbox;
12026         struct lpfc_vport *vport;
12027         struct lpfc_nodelist *ndlp;
12028         struct lpfc_dmabuf *mp;
12029         LPFC_MBOXQ_t *pmb;
12030         int rc;
12031
12032         /*
12033          * Get the driver's phba structure from the dev_id and
12034          * assume the HBA is not interrupting.
12035          */
12036         phba = (struct lpfc_hba *)dev_id;
12037
12038         if (unlikely(!phba))
12039                 return IRQ_NONE;
12040
12041         /*
12042          * Stuff needs to be attented to when this function is invoked as an
12043          * individual interrupt handler in MSI-X multi-message interrupt mode
12044          */
12045         if (phba->intr_type == MSIX) {
12046                 /* Check device state for handling interrupt */
12047                 if (lpfc_intr_state_check(phba))
12048                         return IRQ_NONE;
12049                 /* Need to read HA REG for slow-path events */
12050                 spin_lock_irqsave(&phba->hbalock, iflag);
12051                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12052                         goto unplug_error;
12053                 /* If somebody is waiting to handle an eratt don't process it
12054                  * here. The brdkill function will do this.
12055                  */
12056                 if (phba->link_flag & LS_IGNORE_ERATT)
12057                         ha_copy &= ~HA_ERATT;
12058                 /* Check the need for handling ERATT in interrupt handler */
12059                 if (ha_copy & HA_ERATT) {
12060                         if (phba->hba_flag & HBA_ERATT_HANDLED)
12061                                 /* ERATT polling has handled ERATT */
12062                                 ha_copy &= ~HA_ERATT;
12063                         else
12064                                 /* Indicate interrupt handler handles ERATT */
12065                                 phba->hba_flag |= HBA_ERATT_HANDLED;
12066                 }
12067
12068                 /*
12069                  * If there is deferred error attention, do not check for any
12070                  * interrupt.
12071                  */
12072                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12073                         spin_unlock_irqrestore(&phba->hbalock, iflag);
12074                         return IRQ_NONE;
12075                 }
12076
12077                 /* Clear up only attention source related to slow-path */
12078                 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12079                         goto unplug_error;
12080
12081                 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12082                         HC_LAINT_ENA | HC_ERINT_ENA),
12083                         phba->HCregaddr);
12084                 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12085                         phba->HAregaddr);
12086                 writel(hc_copy, phba->HCregaddr);
12087                 readl(phba->HAregaddr); /* flush */
12088                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12089         } else
12090                 ha_copy = phba->ha_copy;
12091
12092         work_ha_copy = ha_copy & phba->work_ha_mask;
12093
12094         if (work_ha_copy) {
12095                 if (work_ha_copy & HA_LATT) {
12096                         if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12097                                 /*
12098                                  * Turn off Link Attention interrupts
12099                                  * until CLEAR_LA done
12100                                  */
12101                                 spin_lock_irqsave(&phba->hbalock, iflag);
12102                                 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12103                                 if (lpfc_readl(phba->HCregaddr, &control))
12104                                         goto unplug_error;
12105                                 control &= ~HC_LAINT_ENA;
12106                                 writel(control, phba->HCregaddr);
12107                                 readl(phba->HCregaddr); /* flush */
12108                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12109                         }
12110                         else
12111                                 work_ha_copy &= ~HA_LATT;
12112                 }
12113
12114                 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12115                         /*
12116                          * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12117                          * the only slow ring.
12118                          */
12119                         status = (work_ha_copy &
12120                                 (HA_RXMASK  << (4*LPFC_ELS_RING)));
12121                         status >>= (4*LPFC_ELS_RING);
12122                         if (status & HA_RXMASK) {
12123                                 spin_lock_irqsave(&phba->hbalock, iflag);
12124                                 if (lpfc_readl(phba->HCregaddr, &control))
12125                                         goto unplug_error;
12126
12127                                 lpfc_debugfs_slow_ring_trc(phba,
12128                                 "ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
12129                                 control, status,
12130                                 (uint32_t)phba->sli.slistat.sli_intr);
12131
12132                                 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12133                                         lpfc_debugfs_slow_ring_trc(phba,
12134                                                 "ISR Disable ring:"
12135                                                 "pwork:x%x hawork:x%x wait:x%x",
12136                                                 phba->work_ha, work_ha_copy,
12137                                                 (uint32_t)((unsigned long)
12138                                                 &phba->work_waitq));
12139
12140                                         control &=
12141                                             ~(HC_R0INT_ENA << LPFC_ELS_RING);
12142                                         writel(control, phba->HCregaddr);
12143                                         readl(phba->HCregaddr); /* flush */
12144                                 }
12145                                 else {
12146                                         lpfc_debugfs_slow_ring_trc(phba,
12147                                                 "ISR slow ring:   pwork:"
12148                                                 "x%x hawork:x%x wait:x%x",
12149                                                 phba->work_ha, work_ha_copy,
12150                                                 (uint32_t)((unsigned long)
12151                                                 &phba->work_waitq));
12152                                 }
12153                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12154                         }
12155                 }
12156                 spin_lock_irqsave(&phba->hbalock, iflag);
12157                 if (work_ha_copy & HA_ERATT) {
12158                         if (lpfc_sli_read_hs(phba))
12159                                 goto unplug_error;
12160                         /*
12161                          * Check if there is a deferred error condition
12162                          * is active
12163                          */
12164                         if ((HS_FFER1 & phba->work_hs) &&
12165                                 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12166                                   HS_FFER6 | HS_FFER7 | HS_FFER8) &
12167                                   phba->work_hs)) {
12168                                 phba->hba_flag |= DEFER_ERATT;
12169                                 /* Clear all interrupt enable conditions */
12170                                 writel(0, phba->HCregaddr);
12171                                 readl(phba->HCregaddr);
12172                         }
12173                 }
12174
12175                 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12176                         pmb = phba->sli.mbox_active;
12177                         pmbox = &pmb->u.mb;
12178                         mbox = phba->mbox;
12179                         vport = pmb->vport;
12180
12181                         /* First check out the status word */
12182                         lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12183                         if (pmbox->mbxOwner != OWN_HOST) {
12184                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12185                                 /*
12186                                  * Stray Mailbox Interrupt, mbxCommand <cmd>
12187                                  * mbxStatus <status>
12188                                  */
12189                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12190                                                 LOG_SLI,
12191                                                 "(%d):0304 Stray Mailbox "
12192                                                 "Interrupt mbxCommand x%x "
12193                                                 "mbxStatus x%x\n",
12194                                                 (vport ? vport->vpi : 0),
12195                                                 pmbox->mbxCommand,
12196                                                 pmbox->mbxStatus);
12197                                 /* clear mailbox attention bit */
12198                                 work_ha_copy &= ~HA_MBATT;
12199                         } else {
12200                                 phba->sli.mbox_active = NULL;
12201                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12202                                 phba->last_completion_time = jiffies;
12203                                 del_timer(&phba->sli.mbox_tmo);
12204                                 if (pmb->mbox_cmpl) {
12205                                         lpfc_sli_pcimem_bcopy(mbox, pmbox,
12206                                                         MAILBOX_CMD_SIZE);
12207                                         if (pmb->out_ext_byte_len &&
12208                                                 pmb->context2)
12209                                                 lpfc_sli_pcimem_bcopy(
12210                                                 phba->mbox_ext,
12211                                                 pmb->context2,
12212                                                 pmb->out_ext_byte_len);
12213                                 }
12214                                 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12215                                         pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12216
12217                                         lpfc_debugfs_disc_trc(vport,
12218                                                 LPFC_DISC_TRC_MBOX_VPORT,
12219                                                 "MBOX dflt rpi: : "
12220                                                 "status:x%x rpi:x%x",
12221                                                 (uint32_t)pmbox->mbxStatus,
12222                                                 pmbox->un.varWords[0], 0);
12223
12224                                         if (!pmbox->mbxStatus) {
12225                                                 mp = (struct lpfc_dmabuf *)
12226                                                         (pmb->context1);
12227                                                 ndlp = (struct lpfc_nodelist *)
12228                                                         pmb->context2;
12229
12230                                                 /* Reg_LOGIN of dflt RPI was
12231                                                  * successful. new lets get
12232                                                  * rid of the RPI using the
12233                                                  * same mbox buffer.
12234                                                  */
12235                                                 lpfc_unreg_login(phba,
12236                                                         vport->vpi,
12237                                                         pmbox->un.varWords[0],
12238                                                         pmb);
12239                                                 pmb->mbox_cmpl =
12240                                                         lpfc_mbx_cmpl_dflt_rpi;
12241                                                 pmb->context1 = mp;
12242                                                 pmb->context2 = ndlp;
12243                                                 pmb->vport = vport;
12244                                                 rc = lpfc_sli_issue_mbox(phba,
12245                                                                 pmb,
12246                                                                 MBX_NOWAIT);
12247                                                 if (rc != MBX_BUSY)
12248                                                         lpfc_printf_log(phba,
12249                                                         KERN_ERR,
12250                                                         LOG_MBOX | LOG_SLI,
12251                                                         "0350 rc should have"
12252                                                         "been MBX_BUSY\n");
12253                                                 if (rc != MBX_NOT_FINISHED)
12254                                                         goto send_current_mbox;
12255                                         }
12256                                 }
12257                                 spin_lock_irqsave(
12258                                                 &phba->pport->work_port_lock,
12259                                                 iflag);
12260                                 phba->pport->work_port_events &=
12261                                         ~WORKER_MBOX_TMO;
12262                                 spin_unlock_irqrestore(
12263                                                 &phba->pport->work_port_lock,
12264                                                 iflag);
12265                                 lpfc_mbox_cmpl_put(phba, pmb);
12266                         }
12267                 } else
12268                         spin_unlock_irqrestore(&phba->hbalock, iflag);
12269
12270                 if ((work_ha_copy & HA_MBATT) &&
12271                     (phba->sli.mbox_active == NULL)) {
12272 send_current_mbox:
12273                         /* Process next mailbox command if there is one */
12274                         do {
12275                                 rc = lpfc_sli_issue_mbox(phba, NULL,
12276                                                          MBX_NOWAIT);
12277                         } while (rc == MBX_NOT_FINISHED);
12278                         if (rc != MBX_SUCCESS)
12279                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12280                                                 LOG_SLI, "0349 rc should be "
12281                                                 "MBX_SUCCESS\n");
12282                 }
12283
12284                 spin_lock_irqsave(&phba->hbalock, iflag);
12285                 phba->work_ha |= work_ha_copy;
12286                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12287                 lpfc_worker_wake_up(phba);
12288         }
12289         return IRQ_HANDLED;
12290 unplug_error:
12291         spin_unlock_irqrestore(&phba->hbalock, iflag);
12292         return IRQ_HANDLED;
12293
12294 } /* lpfc_sli_sp_intr_handler */
12295
12296 /**
12297  * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
12298  * @irq: Interrupt number.
12299  * @dev_id: The device context pointer.
12300  *
12301  * This function is directly called from the PCI layer as an interrupt
12302  * service routine when device with SLI-3 interface spec is enabled with
12303  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12304  * ring event in the HBA. However, when the device is enabled with either
12305  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12306  * device-level interrupt handler. When the PCI slot is in error recovery
12307  * or the HBA is undergoing initialization, the interrupt handler will not
12308  * process the interrupt. The SCSI FCP fast-path ring event are handled in
12309  * the intrrupt context. This function is called without any lock held.
12310  * It gets the hbalock to access and update SLI data structures.
12311  *
12312  * This function returns IRQ_HANDLED when interrupt is handled else it
12313  * returns IRQ_NONE.
12314  **/
12315 irqreturn_t
12316 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12317 {
12318         struct lpfc_hba  *phba;
12319         uint32_t ha_copy;
12320         unsigned long status;
12321         unsigned long iflag;
12322         struct lpfc_sli_ring *pring;
12323
12324         /* Get the driver's phba structure from the dev_id and
12325          * assume the HBA is not interrupting.
12326          */
12327         phba = (struct lpfc_hba *) dev_id;
12328
12329         if (unlikely(!phba))
12330                 return IRQ_NONE;
12331
12332         /*
12333          * Stuff needs to be attented to when this function is invoked as an
12334          * individual interrupt handler in MSI-X multi-message interrupt mode
12335          */
12336         if (phba->intr_type == MSIX) {
12337                 /* Check device state for handling interrupt */
12338                 if (lpfc_intr_state_check(phba))
12339                         return IRQ_NONE;
12340                 /* Need to read HA REG for FCP ring and other ring events */
12341                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12342                         return IRQ_HANDLED;
12343                 /* Clear up only attention source related to fast-path */
12344                 spin_lock_irqsave(&phba->hbalock, iflag);
12345                 /*
12346                  * If there is deferred error attention, do not check for
12347                  * any interrupt.
12348                  */
12349                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12350                         spin_unlock_irqrestore(&phba->hbalock, iflag);
12351                         return IRQ_NONE;
12352                 }
12353                 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12354                         phba->HAregaddr);
12355                 readl(phba->HAregaddr); /* flush */
12356                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12357         } else
12358                 ha_copy = phba->ha_copy;
12359
12360         /*
12361          * Process all events on FCP ring. Take the optimized path for FCP IO.
12362          */
12363         ha_copy &= ~(phba->work_ha_mask);
12364
12365         status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12366         status >>= (4*LPFC_FCP_RING);
12367         pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12368         if (status & HA_RXMASK)
12369                 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12370
12371         if (phba->cfg_multi_ring_support == 2) {
12372                 /*
12373                  * Process all events on extra ring. Take the optimized path
12374                  * for extra ring IO.
12375                  */
12376                 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12377                 status >>= (4*LPFC_EXTRA_RING);
12378                 if (status & HA_RXMASK) {
12379                         lpfc_sli_handle_fast_ring_event(phba,
12380                                         &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12381                                         status);
12382                 }
12383         }
12384         return IRQ_HANDLED;
12385 }  /* lpfc_sli_fp_intr_handler */
12386
12387 /**
12388  * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
12389  * @irq: Interrupt number.
12390  * @dev_id: The device context pointer.
12391  *
12392  * This function is the HBA device-level interrupt handler to device with
12393  * SLI-3 interface spec, called from the PCI layer when either MSI or
12394  * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12395  * requires driver attention. This function invokes the slow-path interrupt
12396  * attention handling function and fast-path interrupt attention handling
12397  * function in turn to process the relevant HBA attention events. This
12398  * function is called without any lock held. It gets the hbalock to access
12399  * and update SLI data structures.
12400  *
12401  * This function returns IRQ_HANDLED when interrupt is handled, else it
12402  * returns IRQ_NONE.
12403  **/
12404 irqreturn_t
12405 lpfc_sli_intr_handler(int irq, void *dev_id)
12406 {
12407         struct lpfc_hba  *phba;
12408         irqreturn_t sp_irq_rc, fp_irq_rc;
12409         unsigned long status1, status2;
12410         uint32_t hc_copy;
12411
12412         /*
12413          * Get the driver's phba structure from the dev_id and
12414          * assume the HBA is not interrupting.
12415          */
12416         phba = (struct lpfc_hba *) dev_id;
12417
12418         if (unlikely(!phba))
12419                 return IRQ_NONE;
12420
12421         /* Check device state for handling interrupt */
12422         if (lpfc_intr_state_check(phba))
12423                 return IRQ_NONE;
12424
12425         spin_lock(&phba->hbalock);
12426         if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12427                 spin_unlock(&phba->hbalock);
12428                 return IRQ_HANDLED;
12429         }
12430
12431         if (unlikely(!phba->ha_copy)) {
12432                 spin_unlock(&phba->hbalock);
12433                 return IRQ_NONE;
12434         } else if (phba->ha_copy & HA_ERATT) {
12435                 if (phba->hba_flag & HBA_ERATT_HANDLED)
12436                         /* ERATT polling has handled ERATT */
12437                         phba->ha_copy &= ~HA_ERATT;
12438                 else
12439                         /* Indicate interrupt handler handles ERATT */
12440                         phba->hba_flag |= HBA_ERATT_HANDLED;
12441         }
12442
12443         /*
12444          * If there is deferred error attention, do not check for any interrupt.
12445          */
12446         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12447                 spin_unlock(&phba->hbalock);
12448                 return IRQ_NONE;
12449         }
12450
12451         /* Clear attention sources except link and error attentions */
12452         if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12453                 spin_unlock(&phba->hbalock);
12454                 return IRQ_HANDLED;
12455         }
12456         writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12457                 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12458                 phba->HCregaddr);
12459         writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
12460         writel(hc_copy, phba->HCregaddr);
12461         readl(phba->HAregaddr); /* flush */
12462         spin_unlock(&phba->hbalock);
12463
12464         /*
12465          * Invokes slow-path host attention interrupt handling as appropriate.
12466          */
12467
12468         /* status of events with mailbox and link attention */
12469         status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12470
12471         /* status of events with ELS ring */
12472         status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
12473         status2 >>= (4*LPFC_ELS_RING);
12474
12475         if (status1 || (status2 & HA_RXMASK))
12476                 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
12477         else
12478                 sp_irq_rc = IRQ_NONE;
12479
12480         /*
12481          * Invoke fast-path host attention interrupt handling as appropriate.
12482          */
12483
12484         /* status of events with FCP ring */
12485         status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12486         status1 >>= (4*LPFC_FCP_RING);
12487
12488         /* status of events with extra ring */
12489         if (phba->cfg_multi_ring_support == 2) {
12490                 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12491                 status2 >>= (4*LPFC_EXTRA_RING);
12492         } else
12493                 status2 = 0;
12494
12495         if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
12496                 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
12497         else
12498                 fp_irq_rc = IRQ_NONE;
12499
12500         /* Return device-level interrupt handling status */
12501         return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
12502 }  /* lpfc_sli_intr_handler */
12503
12504 /**
12505  * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
12506  * @phba: pointer to lpfc hba data structure.
12507  *
12508  * This routine is invoked by the worker thread to process all the pending
12509  * SLI4 FCP abort XRI events.
12510  **/
12511 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
12512 {
12513         struct lpfc_cq_event *cq_event;
12514
12515         /* First, declare the fcp xri abort event has been handled */
12516         spin_lock_irq(&phba->hbalock);
12517         phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
12518         spin_unlock_irq(&phba->hbalock);
12519         /* Now, handle all the fcp xri abort events */
12520         while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
12521                 /* Get the first event from the head of the event queue */
12522                 spin_lock_irq(&phba->hbalock);
12523                 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
12524                                  cq_event, struct lpfc_cq_event, list);
12525                 spin_unlock_irq(&phba->hbalock);
12526                 /* Notify aborted XRI for FCP work queue */
12527                 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12528                 /* Free the event processed back to the free pool */
12529                 lpfc_sli4_cq_event_release(phba, cq_event);
12530         }
12531 }
12532
12533 /**
12534  * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12535  * @phba: pointer to lpfc hba data structure.
12536  *
12537  * This routine is invoked by the worker thread to process all the pending
12538  * SLI4 els abort xri events.
12539  **/
12540 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12541 {
12542         struct lpfc_cq_event *cq_event;
12543
12544         /* First, declare the els xri abort event has been handled */
12545         spin_lock_irq(&phba->hbalock);
12546         phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12547         spin_unlock_irq(&phba->hbalock);
12548         /* Now, handle all the els xri abort events */
12549         while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12550                 /* Get the first event from the head of the event queue */
12551                 spin_lock_irq(&phba->hbalock);
12552                 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12553                                  cq_event, struct lpfc_cq_event, list);
12554                 spin_unlock_irq(&phba->hbalock);
12555                 /* Notify aborted XRI for ELS work queue */
12556                 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12557                 /* Free the event processed back to the free pool */
12558                 lpfc_sli4_cq_event_release(phba, cq_event);
12559         }
12560 }
12561
12562 /**
12563  * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12564  * @phba: pointer to lpfc hba data structure
12565  * @pIocbIn: pointer to the rspiocbq
12566  * @pIocbOut: pointer to the cmdiocbq
12567  * @wcqe: pointer to the complete wcqe
12568  *
12569  * This routine transfers the fields of a command iocbq to a response iocbq
12570  * by copying all the IOCB fields from command iocbq and transferring the
12571  * completion status information from the complete wcqe.
12572  **/
12573 static void
12574 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12575                               struct lpfc_iocbq *pIocbIn,
12576                               struct lpfc_iocbq *pIocbOut,
12577                               struct lpfc_wcqe_complete *wcqe)
12578 {
12579         int numBdes, i;
12580         unsigned long iflags;
12581         uint32_t status, max_response;
12582         struct lpfc_dmabuf *dmabuf;
12583         struct ulp_bde64 *bpl, bde;
12584         size_t offset = offsetof(struct lpfc_iocbq, iocb);
12585
12586         memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12587                sizeof(struct lpfc_iocbq) - offset);
12588         /* Map WCQE parameters into irspiocb parameters */
12589         status = bf_get(lpfc_wcqe_c_status, wcqe);
12590         pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
12591         if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12592                 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12593                         pIocbIn->iocb.un.fcpi.fcpi_parm =
12594                                         pIocbOut->iocb.un.fcpi.fcpi_parm -
12595                                         wcqe->total_data_placed;
12596                 else
12597                         pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12598         else {
12599                 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12600                 switch (pIocbOut->iocb.ulpCommand) {
12601                 case CMD_ELS_REQUEST64_CR:
12602                         dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12603                         bpl  = (struct ulp_bde64 *)dmabuf->virt;
12604                         bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12605                         max_response = bde.tus.f.bdeSize;
12606                         break;
12607                 case CMD_GEN_REQUEST64_CR:
12608                         max_response = 0;
12609                         if (!pIocbOut->context3)
12610                                 break;
12611                         numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12612                                         sizeof(struct ulp_bde64);
12613                         dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12614                         bpl = (struct ulp_bde64 *)dmabuf->virt;
12615                         for (i = 0; i < numBdes; i++) {
12616                                 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12617                                 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12618                                         max_response += bde.tus.f.bdeSize;
12619                         }
12620                         break;
12621                 default:
12622                         max_response = wcqe->total_data_placed;
12623                         break;
12624                 }
12625                 if (max_response < wcqe->total_data_placed)
12626                         pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12627                 else
12628                         pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12629                                 wcqe->total_data_placed;
12630         }
12631
12632         /* Convert BG errors for completion status */
12633         if (status == CQE_STATUS_DI_ERROR) {
12634                 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
12635
12636                 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
12637                         pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
12638                 else
12639                         pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
12640
12641                 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
12642                 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
12643                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12644                                 BGS_GUARD_ERR_MASK;
12645                 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
12646                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12647                                 BGS_APPTAG_ERR_MASK;
12648                 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
12649                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12650                                 BGS_REFTAG_ERR_MASK;
12651
12652                 /* Check to see if there was any good data before the error */
12653                 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
12654                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12655                                 BGS_HI_WATER_MARK_PRESENT_MASK;
12656                         pIocbIn->iocb.unsli3.sli3_bg.bghm =
12657                                 wcqe->total_data_placed;
12658                 }
12659
12660                 /*
12661                 * Set ALL the error bits to indicate we don't know what
12662                 * type of error it is.
12663                 */
12664                 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
12665                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12666                                 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
12667                                 BGS_GUARD_ERR_MASK);
12668         }
12669
12670         /* Pick up HBA exchange busy condition */
12671         if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
12672                 spin_lock_irqsave(&phba->hbalock, iflags);
12673                 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
12674                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12675         }
12676 }
12677
12678 /**
12679  * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
12680  * @phba: Pointer to HBA context object.
12681  * @wcqe: Pointer to work-queue completion queue entry.
12682  *
12683  * This routine handles an ELS work-queue completion event and construct
12684  * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
12685  * discovery engine to handle.
12686  *
12687  * Return: Pointer to the receive IOCBQ, NULL otherwise.
12688  **/
12689 static struct lpfc_iocbq *
12690 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
12691                                struct lpfc_iocbq *irspiocbq)
12692 {
12693         struct lpfc_sli_ring *pring;
12694         struct lpfc_iocbq *cmdiocbq;
12695         struct lpfc_wcqe_complete *wcqe;
12696         unsigned long iflags;
12697
12698         pring = lpfc_phba_elsring(phba);
12699         if (unlikely(!pring))
12700                 return NULL;
12701
12702         wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
12703         spin_lock_irqsave(&pring->ring_lock, iflags);
12704         pring->stats.iocb_event++;
12705         /* Look up the ELS command IOCB and create pseudo response IOCB */
12706         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12707                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12708         if (unlikely(!cmdiocbq)) {
12709                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12710                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12711                                 "0386 ELS complete with no corresponding "
12712                                 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
12713                                 wcqe->word0, wcqe->total_data_placed,
12714                                 wcqe->parameter, wcqe->word3);
12715                 lpfc_sli_release_iocbq(phba, irspiocbq);
12716                 return NULL;
12717         }
12718
12719         /* Put the iocb back on the txcmplq */
12720         lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
12721         spin_unlock_irqrestore(&pring->ring_lock, iflags);
12722
12723         /* Fake the irspiocbq and copy necessary response information */
12724         lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
12725
12726         return irspiocbq;
12727 }
12728
12729 inline struct lpfc_cq_event *
12730 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
12731 {
12732         struct lpfc_cq_event *cq_event;
12733
12734         /* Allocate a new internal CQ_EVENT entry */
12735         cq_event = lpfc_sli4_cq_event_alloc(phba);
12736         if (!cq_event) {
12737                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12738                                 "0602 Failed to alloc CQ_EVENT entry\n");
12739                 return NULL;
12740         }
12741
12742         /* Move the CQE into the event */
12743         memcpy(&cq_event->cqe, entry, size);
12744         return cq_event;
12745 }
12746
12747 /**
12748  * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
12749  * @phba: Pointer to HBA context object.
12750  * @cqe: Pointer to mailbox completion queue entry.
12751  *
12752  * This routine process a mailbox completion queue entry with asynchrous
12753  * event.
12754  *
12755  * Return: true if work posted to worker thread, otherwise false.
12756  **/
12757 static bool
12758 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12759 {
12760         struct lpfc_cq_event *cq_event;
12761         unsigned long iflags;
12762
12763         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12764                         "0392 Async Event: word0:x%x, word1:x%x, "
12765                         "word2:x%x, word3:x%x\n", mcqe->word0,
12766                         mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
12767
12768         cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
12769         if (!cq_event)
12770                 return false;
12771         spin_lock_irqsave(&phba->hbalock, iflags);
12772         list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
12773         /* Set the async event flag */
12774         phba->hba_flag |= ASYNC_EVENT;
12775         spin_unlock_irqrestore(&phba->hbalock, iflags);
12776
12777         return true;
12778 }
12779
12780 /**
12781  * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
12782  * @phba: Pointer to HBA context object.
12783  * @cqe: Pointer to mailbox completion queue entry.
12784  *
12785  * This routine process a mailbox completion queue entry with mailbox
12786  * completion event.
12787  *
12788  * Return: true if work posted to worker thread, otherwise false.
12789  **/
12790 static bool
12791 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12792 {
12793         uint32_t mcqe_status;
12794         MAILBOX_t *mbox, *pmbox;
12795         struct lpfc_mqe *mqe;
12796         struct lpfc_vport *vport;
12797         struct lpfc_nodelist *ndlp;
12798         struct lpfc_dmabuf *mp;
12799         unsigned long iflags;
12800         LPFC_MBOXQ_t *pmb;
12801         bool workposted = false;
12802         int rc;
12803
12804         /* If not a mailbox complete MCQE, out by checking mailbox consume */
12805         if (!bf_get(lpfc_trailer_completed, mcqe))
12806                 goto out_no_mqe_complete;
12807
12808         /* Get the reference to the active mbox command */
12809         spin_lock_irqsave(&phba->hbalock, iflags);
12810         pmb = phba->sli.mbox_active;
12811         if (unlikely(!pmb)) {
12812                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
12813                                 "1832 No pending MBOX command to handle\n");
12814                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12815                 goto out_no_mqe_complete;
12816         }
12817         spin_unlock_irqrestore(&phba->hbalock, iflags);
12818         mqe = &pmb->u.mqe;
12819         pmbox = (MAILBOX_t *)&pmb->u.mqe;
12820         mbox = phba->mbox;
12821         vport = pmb->vport;
12822
12823         /* Reset heartbeat timer */
12824         phba->last_completion_time = jiffies;
12825         del_timer(&phba->sli.mbox_tmo);
12826
12827         /* Move mbox data to caller's mailbox region, do endian swapping */
12828         if (pmb->mbox_cmpl && mbox)
12829                 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
12830
12831         /*
12832          * For mcqe errors, conditionally move a modified error code to
12833          * the mbox so that the error will not be missed.
12834          */
12835         mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
12836         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
12837                 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
12838                         bf_set(lpfc_mqe_status, mqe,
12839                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
12840         }
12841         if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12842                 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12843                 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
12844                                       "MBOX dflt rpi: status:x%x rpi:x%x",
12845                                       mcqe_status,
12846                                       pmbox->un.varWords[0], 0);
12847                 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
12848                         mp = (struct lpfc_dmabuf *)(pmb->context1);
12849                         ndlp = (struct lpfc_nodelist *)pmb->context2;
12850                         /* Reg_LOGIN of dflt RPI was successful. Now lets get
12851                          * RID of the PPI using the same mbox buffer.
12852                          */
12853                         lpfc_unreg_login(phba, vport->vpi,
12854                                          pmbox->un.varWords[0], pmb);
12855                         pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
12856                         pmb->context1 = mp;
12857                         pmb->context2 = ndlp;
12858                         pmb->vport = vport;
12859                         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
12860                         if (rc != MBX_BUSY)
12861                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12862                                                 LOG_SLI, "0385 rc should "
12863                                                 "have been MBX_BUSY\n");
12864                         if (rc != MBX_NOT_FINISHED)
12865                                 goto send_current_mbox;
12866                 }
12867         }
12868         spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
12869         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12870         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
12871
12872         /* There is mailbox completion work to do */
12873         spin_lock_irqsave(&phba->hbalock, iflags);
12874         __lpfc_mbox_cmpl_put(phba, pmb);
12875         phba->work_ha |= HA_MBATT;
12876         spin_unlock_irqrestore(&phba->hbalock, iflags);
12877         workposted = true;
12878
12879 send_current_mbox:
12880         spin_lock_irqsave(&phba->hbalock, iflags);
12881         /* Release the mailbox command posting token */
12882         phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
12883         /* Setting active mailbox pointer need to be in sync to flag clear */
12884         phba->sli.mbox_active = NULL;
12885         spin_unlock_irqrestore(&phba->hbalock, iflags);
12886         /* Wake up worker thread to post the next pending mailbox command */
12887         lpfc_worker_wake_up(phba);
12888 out_no_mqe_complete:
12889         if (bf_get(lpfc_trailer_consumed, mcqe))
12890                 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
12891         return workposted;
12892 }
12893
12894 /**
12895  * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
12896  * @phba: Pointer to HBA context object.
12897  * @cqe: Pointer to mailbox completion queue entry.
12898  *
12899  * This routine process a mailbox completion queue entry, it invokes the
12900  * proper mailbox complete handling or asynchrous event handling routine
12901  * according to the MCQE's async bit.
12902  *
12903  * Return: true if work posted to worker thread, otherwise false.
12904  **/
12905 static bool
12906 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
12907 {
12908         struct lpfc_mcqe mcqe;
12909         bool workposted;
12910
12911         /* Copy the mailbox MCQE and convert endian order as needed */
12912         lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
12913
12914         /* Invoke the proper event handling routine */
12915         if (!bf_get(lpfc_trailer_async, &mcqe))
12916                 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
12917         else
12918                 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
12919         return workposted;
12920 }
12921
12922 /**
12923  * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
12924  * @phba: Pointer to HBA context object.
12925  * @cq: Pointer to associated CQ
12926  * @wcqe: Pointer to work-queue completion queue entry.
12927  *
12928  * This routine handles an ELS work-queue completion event.
12929  *
12930  * Return: true if work posted to worker thread, otherwise false.
12931  **/
12932 static bool
12933 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12934                              struct lpfc_wcqe_complete *wcqe)
12935 {
12936         struct lpfc_iocbq *irspiocbq;
12937         unsigned long iflags;
12938         struct lpfc_sli_ring *pring = cq->pring;
12939         int txq_cnt = 0;
12940         int txcmplq_cnt = 0;
12941         int fcp_txcmplq_cnt = 0;
12942
12943         /* Get an irspiocbq for later ELS response processing use */
12944         irspiocbq = lpfc_sli_get_iocbq(phba);
12945         if (!irspiocbq) {
12946                 if (!list_empty(&pring->txq))
12947                         txq_cnt++;
12948                 if (!list_empty(&pring->txcmplq))
12949                         txcmplq_cnt++;
12950                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12951                         "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
12952                         "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
12953                         txq_cnt, phba->iocb_cnt,
12954                         fcp_txcmplq_cnt,
12955                         txcmplq_cnt);
12956                 return false;
12957         }
12958
12959         /* Save off the slow-path queue event for work thread to process */
12960         memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
12961         spin_lock_irqsave(&phba->hbalock, iflags);
12962         list_add_tail(&irspiocbq->cq_event.list,
12963                       &phba->sli4_hba.sp_queue_event);
12964         phba->hba_flag |= HBA_SP_QUEUE_EVT;
12965         spin_unlock_irqrestore(&phba->hbalock, iflags);
12966
12967         return true;
12968 }
12969
12970 /**
12971  * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
12972  * @phba: Pointer to HBA context object.
12973  * @wcqe: Pointer to work-queue completion queue entry.
12974  *
12975  * This routine handles slow-path WQ entry consumed event by invoking the
12976  * proper WQ release routine to the slow-path WQ.
12977  **/
12978 static void
12979 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
12980                              struct lpfc_wcqe_release *wcqe)
12981 {
12982         /* sanity check on queue memory */
12983         if (unlikely(!phba->sli4_hba.els_wq))
12984                 return;
12985         /* Check for the slow-path ELS work queue */
12986         if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
12987                 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
12988                                      bf_get(lpfc_wcqe_r_wqe_index, wcqe));
12989         else
12990                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12991                                 "2579 Slow-path wqe consume event carries "
12992                                 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
12993                                 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
12994                                 phba->sli4_hba.els_wq->queue_id);
12995 }
12996
12997 /**
12998  * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
12999  * @phba: Pointer to HBA context object.
13000  * @cq: Pointer to a WQ completion queue.
13001  * @wcqe: Pointer to work-queue completion queue entry.
13002  *
13003  * This routine handles an XRI abort event.
13004  *
13005  * Return: true if work posted to worker thread, otherwise false.
13006  **/
13007 static bool
13008 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13009                                    struct lpfc_queue *cq,
13010                                    struct sli4_wcqe_xri_aborted *wcqe)
13011 {
13012         bool workposted = false;
13013         struct lpfc_cq_event *cq_event;
13014         unsigned long iflags;
13015
13016         switch (cq->subtype) {
13017         case LPFC_FCP:
13018                 cq_event = lpfc_cq_event_setup(
13019                         phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13020                 if (!cq_event)
13021                         return false;
13022                 spin_lock_irqsave(&phba->hbalock, iflags);
13023                 list_add_tail(&cq_event->list,
13024                               &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
13025                 /* Set the fcp xri abort event flag */
13026                 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
13027                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13028                 workposted = true;
13029                 break;
13030         case LPFC_NVME_LS: /* NVME LS uses ELS resources */
13031         case LPFC_ELS:
13032                 cq_event = lpfc_cq_event_setup(
13033                         phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13034                 if (!cq_event)
13035                         return false;
13036                 spin_lock_irqsave(&phba->hbalock, iflags);
13037                 list_add_tail(&cq_event->list,
13038                               &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13039                 /* Set the els xri abort event flag */
13040                 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13041                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13042                 workposted = true;
13043                 break;
13044         case LPFC_NVME:
13045                 /* Notify aborted XRI for NVME work queue */
13046                 if (phba->nvmet_support)
13047                         lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13048                 else
13049                         lpfc_sli4_nvme_xri_aborted(phba, wcqe);
13050
13051                 workposted = false;
13052                 break;
13053         default:
13054                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13055                                 "0603 Invalid CQ subtype %d: "
13056                                 "%08x %08x %08x %08x\n",
13057                                 cq->subtype, wcqe->word0, wcqe->parameter,
13058                                 wcqe->word2, wcqe->word3);
13059                 workposted = false;
13060                 break;
13061         }
13062         return workposted;
13063 }
13064
13065 /**
13066  * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13067  * @phba: Pointer to HBA context object.
13068  * @rcqe: Pointer to receive-queue completion queue entry.
13069  *
13070  * This routine process a receive-queue completion queue entry.
13071  *
13072  * Return: true if work posted to worker thread, otherwise false.
13073  **/
13074 static bool
13075 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13076 {
13077         bool workposted = false;
13078         struct fc_frame_header *fc_hdr;
13079         struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13080         struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13081         struct lpfc_nvmet_tgtport *tgtp;
13082         struct hbq_dmabuf *dma_buf;
13083         uint32_t status, rq_id;
13084         unsigned long iflags;
13085
13086         /* sanity check on queue memory */
13087         if (unlikely(!hrq) || unlikely(!drq))
13088                 return workposted;
13089
13090         if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13091                 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13092         else
13093                 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13094         if (rq_id != hrq->queue_id)
13095                 goto out;
13096
13097         status = bf_get(lpfc_rcqe_status, rcqe);
13098         switch (status) {
13099         case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13100                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13101                                 "2537 Receive Frame Truncated!!\n");
13102         case FC_STATUS_RQ_SUCCESS:
13103                 spin_lock_irqsave(&phba->hbalock, iflags);
13104                 lpfc_sli4_rq_release(hrq, drq);
13105                 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13106                 if (!dma_buf) {
13107                         hrq->RQ_no_buf_found++;
13108                         spin_unlock_irqrestore(&phba->hbalock, iflags);
13109                         goto out;
13110                 }
13111                 hrq->RQ_rcv_buf++;
13112                 hrq->RQ_buf_posted--;
13113                 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13114
13115                 /* If a NVME LS event (type 0x28), treat it as Fast path */
13116                 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13117
13118                 /* save off the frame for the word thread to process */
13119                 list_add_tail(&dma_buf->cq_event.list,
13120                               &phba->sli4_hba.sp_queue_event);
13121                 /* Frame received */
13122                 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13123                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13124                 workposted = true;
13125                 break;
13126         case FC_STATUS_INSUFF_BUF_FRM_DISC:
13127                 if (phba->nvmet_support) {
13128                         tgtp = phba->targetport->private;
13129                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13130                                         "6402 RQE Error x%x, posted %d err_cnt "
13131                                         "%d: %x %x %x\n",
13132                                         status, hrq->RQ_buf_posted,
13133                                         hrq->RQ_no_posted_buf,
13134                                         atomic_read(&tgtp->rcv_fcp_cmd_in),
13135                                         atomic_read(&tgtp->rcv_fcp_cmd_out),
13136                                         atomic_read(&tgtp->xmt_fcp_release));
13137                 }
13138                 /* fallthrough */
13139
13140         case FC_STATUS_INSUFF_BUF_NEED_BUF:
13141                 hrq->RQ_no_posted_buf++;
13142                 /* Post more buffers if possible */
13143                 spin_lock_irqsave(&phba->hbalock, iflags);
13144                 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13145                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13146                 workposted = true;
13147                 break;
13148         }
13149 out:
13150         return workposted;
13151 }
13152
13153 /**
13154  * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13155  * @phba: Pointer to HBA context object.
13156  * @cq: Pointer to the completion queue.
13157  * @wcqe: Pointer to a completion queue entry.
13158  *
13159  * This routine process a slow-path work-queue or receive queue completion queue
13160  * entry.
13161  *
13162  * Return: true if work posted to worker thread, otherwise false.
13163  **/
13164 static bool
13165 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13166                          struct lpfc_cqe *cqe)
13167 {
13168         struct lpfc_cqe cqevt;
13169         bool workposted = false;
13170
13171         /* Copy the work queue CQE and convert endian order if needed */
13172         lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13173
13174         /* Check and process for different type of WCQE and dispatch */
13175         switch (bf_get(lpfc_cqe_code, &cqevt)) {
13176         case CQE_CODE_COMPL_WQE:
13177                 /* Process the WQ/RQ complete event */
13178                 phba->last_completion_time = jiffies;
13179                 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13180                                 (struct lpfc_wcqe_complete *)&cqevt);
13181                 break;
13182         case CQE_CODE_RELEASE_WQE:
13183                 /* Process the WQ release event */
13184                 lpfc_sli4_sp_handle_rel_wcqe(phba,
13185                                 (struct lpfc_wcqe_release *)&cqevt);
13186                 break;
13187         case CQE_CODE_XRI_ABORTED:
13188                 /* Process the WQ XRI abort event */
13189                 phba->last_completion_time = jiffies;
13190                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13191                                 (struct sli4_wcqe_xri_aborted *)&cqevt);
13192                 break;
13193         case CQE_CODE_RECEIVE:
13194         case CQE_CODE_RECEIVE_V1:
13195                 /* Process the RQ event */
13196                 phba->last_completion_time = jiffies;
13197                 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13198                                 (struct lpfc_rcqe *)&cqevt);
13199                 break;
13200         default:
13201                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13202                                 "0388 Not a valid WCQE code: x%x\n",
13203                                 bf_get(lpfc_cqe_code, &cqevt));
13204                 break;
13205         }
13206         return workposted;
13207 }
13208
13209 /**
13210  * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13211  * @phba: Pointer to HBA context object.
13212  * @eqe: Pointer to fast-path event queue entry.
13213  *
13214  * This routine process a event queue entry from the slow-path event queue.
13215  * It will check the MajorCode and MinorCode to determine this is for a
13216  * completion event on a completion queue, if not, an error shall be logged
13217  * and just return. Otherwise, it will get to the corresponding completion
13218  * queue and process all the entries on that completion queue, rearm the
13219  * completion queue, and then return.
13220  *
13221  **/
13222 static void
13223 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13224         struct lpfc_queue *speq)
13225 {
13226         struct lpfc_queue *cq = NULL, *childq;
13227         uint16_t cqid;
13228
13229         /* Get the reference to the corresponding CQ */
13230         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13231
13232         list_for_each_entry(childq, &speq->child_list, list) {
13233                 if (childq->queue_id == cqid) {
13234                         cq = childq;
13235                         break;
13236                 }
13237         }
13238         if (unlikely(!cq)) {
13239                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13240                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13241                                         "0365 Slow-path CQ identifier "
13242                                         "(%d) does not exist\n", cqid);
13243                 return;
13244         }
13245
13246         /* Save EQ associated with this CQ */
13247         cq->assoc_qp = speq;
13248
13249         if (!queue_work(phba->wq, &cq->spwork))
13250                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13251                                 "0390 Cannot schedule soft IRQ "
13252                                 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13253                                 cqid, cq->queue_id, smp_processor_id());
13254 }
13255
13256 /**
13257  * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13258  * @phba: Pointer to HBA context object.
13259  *
13260  * This routine process a event queue entry from the slow-path event queue.
13261  * It will check the MajorCode and MinorCode to determine this is for a
13262  * completion event on a completion queue, if not, an error shall be logged
13263  * and just return. Otherwise, it will get to the corresponding completion
13264  * queue and process all the entries on that completion queue, rearm the
13265  * completion queue, and then return.
13266  *
13267  **/
13268 static void
13269 lpfc_sli4_sp_process_cq(struct work_struct *work)
13270 {
13271         struct lpfc_queue *cq =
13272                 container_of(work, struct lpfc_queue, spwork);
13273         struct lpfc_hba *phba = cq->phba;
13274         struct lpfc_cqe *cqe;
13275         bool workposted = false;
13276         int ccount = 0;
13277
13278         /* Process all the entries to the CQ */
13279         switch (cq->type) {
13280         case LPFC_MCQ:
13281                 while ((cqe = lpfc_sli4_cq_get(cq))) {
13282                         workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
13283                         if (!(++ccount % cq->entry_repost))
13284                                 break;
13285                         cq->CQ_mbox++;
13286                 }
13287                 break;
13288         case LPFC_WCQ:
13289                 while ((cqe = lpfc_sli4_cq_get(cq))) {
13290                         if (cq->subtype == LPFC_FCP ||
13291                             cq->subtype == LPFC_NVME) {
13292 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13293                                 if (phba->ktime_on)
13294                                         cq->isr_timestamp = ktime_get_ns();
13295                                 else
13296                                         cq->isr_timestamp = 0;
13297 #endif
13298                                 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
13299                                                                        cqe);
13300                         } else {
13301                                 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
13302                                                                       cqe);
13303                         }
13304                         if (!(++ccount % cq->entry_repost))
13305                                 break;
13306                 }
13307
13308                 /* Track the max number of CQEs processed in 1 EQ */
13309                 if (ccount > cq->CQ_max_cqe)
13310                         cq->CQ_max_cqe = ccount;
13311                 break;
13312         default:
13313                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13314                                 "0370 Invalid completion queue type (%d)\n",
13315                                 cq->type);
13316                 return;
13317         }
13318
13319         /* Catch the no cq entry condition, log an error */
13320         if (unlikely(ccount == 0))
13321                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13322                                 "0371 No entry from the CQ: identifier "
13323                                 "(x%x), type (%d)\n", cq->queue_id, cq->type);
13324
13325         /* In any case, flash and re-arm the RCQ */
13326         phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
13327
13328         /* wake up worker thread if there are works to be done */
13329         if (workposted)
13330                 lpfc_worker_wake_up(phba);
13331 }
13332
13333 /**
13334  * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
13335  * @phba: Pointer to HBA context object.
13336  * @cq: Pointer to associated CQ
13337  * @wcqe: Pointer to work-queue completion queue entry.
13338  *
13339  * This routine process a fast-path work queue completion entry from fast-path
13340  * event queue for FCP command response completion.
13341  **/
13342 static void
13343 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13344                              struct lpfc_wcqe_complete *wcqe)
13345 {
13346         struct lpfc_sli_ring *pring = cq->pring;
13347         struct lpfc_iocbq *cmdiocbq;
13348         struct lpfc_iocbq irspiocbq;
13349         unsigned long iflags;
13350
13351         /* Check for response status */
13352         if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13353                 /* If resource errors reported from HBA, reduce queue
13354                  * depth of the SCSI device.
13355                  */
13356                 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13357                      IOSTAT_LOCAL_REJECT)) &&
13358                     ((wcqe->parameter & IOERR_PARAM_MASK) ==
13359                      IOERR_NO_RESOURCES))
13360                         phba->lpfc_rampdown_queue_depth(phba);
13361
13362                 /* Log the error status */
13363                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13364                                 "0373 FCP complete error: status=x%x, "
13365                                 "hw_status=x%x, total_data_specified=%d, "
13366                                 "parameter=x%x, word3=x%x\n",
13367                                 bf_get(lpfc_wcqe_c_status, wcqe),
13368                                 bf_get(lpfc_wcqe_c_hw_status, wcqe),
13369                                 wcqe->total_data_placed, wcqe->parameter,
13370                                 wcqe->word3);
13371         }
13372
13373         /* Look up the FCP command IOCB and create pseudo response IOCB */
13374         spin_lock_irqsave(&pring->ring_lock, iflags);
13375         pring->stats.iocb_event++;
13376         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13377                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13378         spin_unlock_irqrestore(&pring->ring_lock, iflags);
13379         if (unlikely(!cmdiocbq)) {
13380                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13381                                 "0374 FCP complete with no corresponding "
13382                                 "cmdiocb: iotag (%d)\n",
13383                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13384                 return;
13385         }
13386 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13387         cmdiocbq->isr_timestamp = cq->isr_timestamp;
13388 #endif
13389         if (cmdiocbq->iocb_cmpl == NULL) {
13390                 if (cmdiocbq->wqe_cmpl) {
13391                         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13392                                 spin_lock_irqsave(&phba->hbalock, iflags);
13393                                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13394                                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13395                         }
13396
13397                         /* Pass the cmd_iocb and the wcqe to the upper layer */
13398                         (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13399                         return;
13400                 }
13401                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13402                                 "0375 FCP cmdiocb not callback function "
13403                                 "iotag: (%d)\n",
13404                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13405                 return;
13406         }
13407
13408         /* Fake the irspiocb and copy necessary response information */
13409         lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
13410
13411         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13412                 spin_lock_irqsave(&phba->hbalock, iflags);
13413                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13414                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13415         }
13416
13417         /* Pass the cmd_iocb and the rsp state to the upper layer */
13418         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13419 }
13420
13421 /**
13422  * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13423  * @phba: Pointer to HBA context object.
13424  * @cq: Pointer to completion queue.
13425  * @wcqe: Pointer to work-queue completion queue entry.
13426  *
13427  * This routine handles an fast-path WQ entry consumed event by invoking the
13428  * proper WQ release routine to the slow-path WQ.
13429  **/
13430 static void
13431 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13432                              struct lpfc_wcqe_release *wcqe)
13433 {
13434         struct lpfc_queue *childwq;
13435         bool wqid_matched = false;
13436         uint16_t hba_wqid;
13437
13438         /* Check for fast-path FCP work queue release */
13439         hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
13440         list_for_each_entry(childwq, &cq->child_list, list) {
13441                 if (childwq->queue_id == hba_wqid) {
13442                         lpfc_sli4_wq_release(childwq,
13443                                         bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13444                         if (childwq->q_flag & HBA_NVMET_WQFULL)
13445                                 lpfc_nvmet_wqfull_process(phba, childwq);
13446                         wqid_matched = true;
13447                         break;
13448                 }
13449         }
13450         /* Report warning log message if no match found */
13451         if (wqid_matched != true)
13452                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13453                                 "2580 Fast-path wqe consume event carries "
13454                                 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
13455 }
13456
13457 /**
13458  * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13459  * @phba: Pointer to HBA context object.
13460  * @rcqe: Pointer to receive-queue completion queue entry.
13461  *
13462  * This routine process a receive-queue completion queue entry.
13463  *
13464  * Return: true if work posted to worker thread, otherwise false.
13465  **/
13466 static bool
13467 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13468                             struct lpfc_rcqe *rcqe)
13469 {
13470         bool workposted = false;
13471         struct lpfc_queue *hrq;
13472         struct lpfc_queue *drq;
13473         struct rqb_dmabuf *dma_buf;
13474         struct fc_frame_header *fc_hdr;
13475         struct lpfc_nvmet_tgtport *tgtp;
13476         uint32_t status, rq_id;
13477         unsigned long iflags;
13478         uint32_t fctl, idx;
13479
13480         if ((phba->nvmet_support == 0) ||
13481             (phba->sli4_hba.nvmet_cqset == NULL))
13482                 return workposted;
13483
13484         idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13485         hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13486         drq = phba->sli4_hba.nvmet_mrq_data[idx];
13487
13488         /* sanity check on queue memory */
13489         if (unlikely(!hrq) || unlikely(!drq))
13490                 return workposted;
13491
13492         if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13493                 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13494         else
13495                 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13496
13497         if ((phba->nvmet_support == 0) ||
13498             (rq_id != hrq->queue_id))
13499                 return workposted;
13500
13501         status = bf_get(lpfc_rcqe_status, rcqe);
13502         switch (status) {
13503         case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13504                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13505                                 "6126 Receive Frame Truncated!!\n");
13506                 /* Drop thru */
13507         case FC_STATUS_RQ_SUCCESS:
13508                 spin_lock_irqsave(&phba->hbalock, iflags);
13509                 lpfc_sli4_rq_release(hrq, drq);
13510                 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13511                 if (!dma_buf) {
13512                         hrq->RQ_no_buf_found++;
13513                         spin_unlock_irqrestore(&phba->hbalock, iflags);
13514                         goto out;
13515                 }
13516                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13517                 hrq->RQ_rcv_buf++;
13518                 hrq->RQ_buf_posted--;
13519                 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13520
13521                 /* Just some basic sanity checks on FCP Command frame */
13522                 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13523                 fc_hdr->fh_f_ctl[1] << 8 |
13524                 fc_hdr->fh_f_ctl[2]);
13525                 if (((fctl &
13526                     (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13527                     (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13528                     (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
13529                         goto drop;
13530
13531                 if (fc_hdr->fh_type == FC_TYPE_FCP) {
13532                         dma_buf->bytes_recv = bf_get(lpfc_rcqe_length,  rcqe);
13533                         lpfc_nvmet_unsol_fcp_event(
13534                                 phba, idx, dma_buf,
13535                                 cq->isr_timestamp);
13536                         return false;
13537                 }
13538 drop:
13539                 lpfc_in_buf_free(phba, &dma_buf->dbuf);
13540                 break;
13541         case FC_STATUS_INSUFF_BUF_FRM_DISC:
13542                 if (phba->nvmet_support) {
13543                         tgtp = phba->targetport->private;
13544                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13545                                         "6401 RQE Error x%x, posted %d err_cnt "
13546                                         "%d: %x %x %x\n",
13547                                         status, hrq->RQ_buf_posted,
13548                                         hrq->RQ_no_posted_buf,
13549                                         atomic_read(&tgtp->rcv_fcp_cmd_in),
13550                                         atomic_read(&tgtp->rcv_fcp_cmd_out),
13551                                         atomic_read(&tgtp->xmt_fcp_release));
13552                 }
13553                 /* fallthrough */
13554
13555         case FC_STATUS_INSUFF_BUF_NEED_BUF:
13556                 hrq->RQ_no_posted_buf++;
13557                 /* Post more buffers if possible */
13558                 break;
13559         }
13560 out:
13561         return workposted;
13562 }
13563
13564 /**
13565  * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
13566  * @cq: Pointer to the completion queue.
13567  * @eqe: Pointer to fast-path completion queue entry.
13568  *
13569  * This routine process a fast-path work queue completion entry from fast-path
13570  * event queue for FCP command response completion.
13571  **/
13572 static int
13573 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13574                          struct lpfc_cqe *cqe)
13575 {
13576         struct lpfc_wcqe_release wcqe;
13577         bool workposted = false;
13578
13579         /* Copy the work queue CQE and convert endian order if needed */
13580         lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
13581
13582         /* Check and process for different type of WCQE and dispatch */
13583         switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
13584         case CQE_CODE_COMPL_WQE:
13585         case CQE_CODE_NVME_ERSP:
13586                 cq->CQ_wq++;
13587                 /* Process the WQ complete event */
13588                 phba->last_completion_time = jiffies;
13589                 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
13590                         lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
13591                                 (struct lpfc_wcqe_complete *)&wcqe);
13592                 if (cq->subtype == LPFC_NVME_LS)
13593                         lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
13594                                 (struct lpfc_wcqe_complete *)&wcqe);
13595                 break;
13596         case CQE_CODE_RELEASE_WQE:
13597                 cq->CQ_release_wqe++;
13598                 /* Process the WQ release event */
13599                 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
13600                                 (struct lpfc_wcqe_release *)&wcqe);
13601                 break;
13602         case CQE_CODE_XRI_ABORTED:
13603                 cq->CQ_xri_aborted++;
13604                 /* Process the WQ XRI abort event */
13605                 phba->last_completion_time = jiffies;
13606                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13607                                 (struct sli4_wcqe_xri_aborted *)&wcqe);
13608                 break;
13609         case CQE_CODE_RECEIVE_V1:
13610         case CQE_CODE_RECEIVE:
13611                 phba->last_completion_time = jiffies;
13612                 if (cq->subtype == LPFC_NVMET) {
13613                         workposted = lpfc_sli4_nvmet_handle_rcqe(
13614                                 phba, cq, (struct lpfc_rcqe *)&wcqe);
13615                 }
13616                 break;
13617         default:
13618                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13619                                 "0144 Not a valid CQE code: x%x\n",
13620                                 bf_get(lpfc_wcqe_c_code, &wcqe));
13621                 break;
13622         }
13623         return workposted;
13624 }
13625
13626 /**
13627  * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
13628  * @phba: Pointer to HBA context object.
13629  * @eqe: Pointer to fast-path event queue entry.
13630  *
13631  * This routine process a event queue entry from the fast-path event queue.
13632  * It will check the MajorCode and MinorCode to determine this is for a
13633  * completion event on a completion queue, if not, an error shall be logged
13634  * and just return. Otherwise, it will get to the corresponding completion
13635  * queue and process all the entries on the completion queue, rearm the
13636  * completion queue, and then return.
13637  **/
13638 static void
13639 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13640                         uint32_t qidx)
13641 {
13642         struct lpfc_queue *cq = NULL;
13643         uint16_t cqid, id;
13644
13645         if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13646                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13647                                 "0366 Not a valid completion "
13648                                 "event: majorcode=x%x, minorcode=x%x\n",
13649                                 bf_get_le32(lpfc_eqe_major_code, eqe),
13650                                 bf_get_le32(lpfc_eqe_minor_code, eqe));
13651                 return;
13652         }
13653
13654         /* Get the reference to the corresponding CQ */
13655         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13656
13657         if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
13658                 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
13659                 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
13660                         /* Process NVMET unsol rcv */
13661                         cq = phba->sli4_hba.nvmet_cqset[cqid - id];
13662                         goto  process_cq;
13663                 }
13664         }
13665
13666         if (phba->sli4_hba.nvme_cq_map &&
13667             (cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
13668                 /* Process NVME / NVMET command completion */
13669                 cq = phba->sli4_hba.nvme_cq[qidx];
13670                 goto  process_cq;
13671         }
13672
13673         if (phba->sli4_hba.fcp_cq_map &&
13674             (cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
13675                 /* Process FCP command completion */
13676                 cq = phba->sli4_hba.fcp_cq[qidx];
13677                 goto  process_cq;
13678         }
13679
13680         if (phba->sli4_hba.nvmels_cq &&
13681             (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
13682                 /* Process NVME unsol rcv */
13683                 cq = phba->sli4_hba.nvmels_cq;
13684         }
13685
13686         /* Otherwise this is a Slow path event */
13687         if (cq == NULL) {
13688                 lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
13689                 return;
13690         }
13691
13692 process_cq:
13693         if (unlikely(cqid != cq->queue_id)) {
13694                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13695                                 "0368 Miss-matched fast-path completion "
13696                                 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
13697                                 cqid, cq->queue_id);
13698                 return;
13699         }
13700
13701         /* Save EQ associated with this CQ */
13702         cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
13703
13704         if (!queue_work(phba->wq, &cq->irqwork))
13705                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13706                                 "0363 Cannot schedule soft IRQ "
13707                                 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13708                                 cqid, cq->queue_id, smp_processor_id());
13709 }
13710
13711 /**
13712  * lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
13713  * @phba: Pointer to HBA context object.
13714  * @eqe: Pointer to fast-path event queue entry.
13715  *
13716  * This routine process a event queue entry from the fast-path event queue.
13717  * It will check the MajorCode and MinorCode to determine this is for a
13718  * completion event on a completion queue, if not, an error shall be logged
13719  * and just return. Otherwise, it will get to the corresponding completion
13720  * queue and process all the entries on the completion queue, rearm the
13721  * completion queue, and then return.
13722  **/
13723 static void
13724 lpfc_sli4_hba_process_cq(struct work_struct *work)
13725 {
13726         struct lpfc_queue *cq =
13727                 container_of(work, struct lpfc_queue, irqwork);
13728         struct lpfc_hba *phba = cq->phba;
13729         struct lpfc_cqe *cqe;
13730         bool workposted = false;
13731         int ccount = 0;
13732
13733         /* Process all the entries to the CQ */
13734         while ((cqe = lpfc_sli4_cq_get(cq))) {
13735 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13736                 if (phba->ktime_on)
13737                         cq->isr_timestamp = ktime_get_ns();
13738                 else
13739                         cq->isr_timestamp = 0;
13740 #endif
13741                 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
13742                 if (!(++ccount % cq->entry_repost))
13743                         break;
13744         }
13745
13746         /* Track the max number of CQEs processed in 1 EQ */
13747         if (ccount > cq->CQ_max_cqe)
13748                 cq->CQ_max_cqe = ccount;
13749         cq->assoc_qp->EQ_cqe_cnt += ccount;
13750
13751         /* Catch the no cq entry condition */
13752         if (unlikely(ccount == 0))
13753                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13754                                 "0369 No entry from fast-path completion "
13755                                 "queue fcpcqid=%d\n", cq->queue_id);
13756
13757         /* In any case, flash and re-arm the CQ */
13758         phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
13759
13760         /* wake up worker thread if there are works to be done */
13761         if (workposted)
13762                 lpfc_worker_wake_up(phba);
13763 }
13764
13765 static void
13766 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
13767 {
13768         struct lpfc_eqe *eqe;
13769
13770         /* walk all the EQ entries and drop on the floor */
13771         while ((eqe = lpfc_sli4_eq_get(eq)))
13772                 ;
13773
13774         /* Clear and re-arm the EQ */
13775         phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
13776 }
13777
13778
13779 /**
13780  * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
13781  *                           entry
13782  * @phba: Pointer to HBA context object.
13783  * @eqe: Pointer to fast-path event queue entry.
13784  *
13785  * This routine process a event queue entry from the Flash Optimized Fabric
13786  * event queue.  It will check the MajorCode and MinorCode to determine this
13787  * is for a completion event on a completion queue, if not, an error shall be
13788  * logged and just return. Otherwise, it will get to the corresponding
13789  * completion queue and process all the entries on the completion queue, rearm
13790  * the completion queue, and then return.
13791  **/
13792 static void
13793 lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
13794 {
13795         struct lpfc_queue *cq;
13796         uint16_t cqid;
13797
13798         if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13799                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13800                                 "9147 Not a valid completion "
13801                                 "event: majorcode=x%x, minorcode=x%x\n",
13802                                 bf_get_le32(lpfc_eqe_major_code, eqe),
13803                                 bf_get_le32(lpfc_eqe_minor_code, eqe));
13804                 return;
13805         }
13806
13807         /* Get the reference to the corresponding CQ */
13808         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13809
13810         /* Next check for OAS */
13811         cq = phba->sli4_hba.oas_cq;
13812         if (unlikely(!cq)) {
13813                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13814                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13815                                         "9148 OAS completion queue "
13816                                         "does not exist\n");
13817                 return;
13818         }
13819
13820         if (unlikely(cqid != cq->queue_id)) {
13821                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13822                                 "9149 Miss-matched fast-path compl "
13823                                 "queue id: eqcqid=%d, fcpcqid=%d\n",
13824                                 cqid, cq->queue_id);
13825                 return;
13826         }
13827
13828         /* Save EQ associated with this CQ */
13829         cq->assoc_qp = phba->sli4_hba.fof_eq;
13830
13831         /* CQ work will be processed on CPU affinitized to this IRQ */
13832         if (!queue_work(phba->wq, &cq->irqwork))
13833                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13834                                 "0367 Cannot schedule soft IRQ "
13835                                 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13836                                 cqid, cq->queue_id, smp_processor_id());
13837 }
13838
13839 /**
13840  * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
13841  * @irq: Interrupt number.
13842  * @dev_id: The device context pointer.
13843  *
13844  * This function is directly called from the PCI layer as an interrupt
13845  * service routine when device with SLI-4 interface spec is enabled with
13846  * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
13847  * IOCB ring event in the HBA. However, when the device is enabled with either
13848  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13849  * device-level interrupt handler. When the PCI slot is in error recovery
13850  * or the HBA is undergoing initialization, the interrupt handler will not
13851  * process the interrupt. The Flash Optimized Fabric ring event are handled in
13852  * the intrrupt context. This function is called without any lock held.
13853  * It gets the hbalock to access and update SLI data structures. Note that,
13854  * the EQ to CQ are one-to-one map such that the EQ index is
13855  * equal to that of CQ index.
13856  *
13857  * This function returns IRQ_HANDLED when interrupt is handled else it
13858  * returns IRQ_NONE.
13859  **/
13860 irqreturn_t
13861 lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
13862 {
13863         struct lpfc_hba *phba;
13864         struct lpfc_hba_eq_hdl *hba_eq_hdl;
13865         struct lpfc_queue *eq;
13866         struct lpfc_eqe *eqe;
13867         unsigned long iflag;
13868         int ecount = 0;
13869
13870         /* Get the driver's phba structure from the dev_id */
13871         hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13872         phba = hba_eq_hdl->phba;
13873
13874         if (unlikely(!phba))
13875                 return IRQ_NONE;
13876
13877         /* Get to the EQ struct associated with this vector */
13878         eq = phba->sli4_hba.fof_eq;
13879         if (unlikely(!eq))
13880                 return IRQ_NONE;
13881
13882         /* Check device state for handling interrupt */
13883         if (unlikely(lpfc_intr_state_check(phba))) {
13884                 /* Check again for link_state with lock held */
13885                 spin_lock_irqsave(&phba->hbalock, iflag);
13886                 if (phba->link_state < LPFC_LINK_DOWN)
13887                         /* Flush, clear interrupt, and rearm the EQ */
13888                         lpfc_sli4_eq_flush(phba, eq);
13889                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13890                 return IRQ_NONE;
13891         }
13892
13893         /*
13894          * Process all the event on FCP fast-path EQ
13895          */
13896         while ((eqe = lpfc_sli4_eq_get(eq))) {
13897                 lpfc_sli4_fof_handle_eqe(phba, eqe);
13898                 if (!(++ecount % eq->entry_repost))
13899                         break;
13900                 eq->EQ_processed++;
13901         }
13902
13903         /* Track the max number of EQEs processed in 1 intr */
13904         if (ecount > eq->EQ_max_eqe)
13905                 eq->EQ_max_eqe = ecount;
13906
13907
13908         if (unlikely(ecount == 0)) {
13909                 eq->EQ_no_entry++;
13910
13911                 if (phba->intr_type == MSIX)
13912                         /* MSI-X treated interrupt served as no EQ share INT */
13913                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13914                                         "9145 MSI-X interrupt with no EQE\n");
13915                 else {
13916                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13917                                         "9146 ISR interrupt with no EQE\n");
13918                         /* Non MSI-X treated on interrupt as EQ share INT */
13919                         return IRQ_NONE;
13920                 }
13921         }
13922         /* Always clear and re-arm the fast-path EQ */
13923         phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
13924         return IRQ_HANDLED;
13925 }
13926
13927 /**
13928  * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
13929  * @irq: Interrupt number.
13930  * @dev_id: The device context pointer.
13931  *
13932  * This function is directly called from the PCI layer as an interrupt
13933  * service routine when device with SLI-4 interface spec is enabled with
13934  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13935  * ring event in the HBA. However, when the device is enabled with either
13936  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13937  * device-level interrupt handler. When the PCI slot is in error recovery
13938  * or the HBA is undergoing initialization, the interrupt handler will not
13939  * process the interrupt. The SCSI FCP fast-path ring event are handled in
13940  * the intrrupt context. This function is called without any lock held.
13941  * It gets the hbalock to access and update SLI data structures. Note that,
13942  * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
13943  * equal to that of FCP CQ index.
13944  *
13945  * The link attention and ELS ring attention events are handled
13946  * by the worker thread. The interrupt handler signals the worker thread
13947  * and returns for these events. This function is called without any lock
13948  * held. It gets the hbalock to access and update SLI data structures.
13949  *
13950  * This function returns IRQ_HANDLED when interrupt is handled else it
13951  * returns IRQ_NONE.
13952  **/
13953 irqreturn_t
13954 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
13955 {
13956         struct lpfc_hba *phba;
13957         struct lpfc_hba_eq_hdl *hba_eq_hdl;
13958         struct lpfc_queue *fpeq;
13959         struct lpfc_eqe *eqe;
13960         unsigned long iflag;
13961         int ecount = 0;
13962         int hba_eqidx;
13963
13964         /* Get the driver's phba structure from the dev_id */
13965         hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13966         phba = hba_eq_hdl->phba;
13967         hba_eqidx = hba_eq_hdl->idx;
13968
13969         if (unlikely(!phba))
13970                 return IRQ_NONE;
13971         if (unlikely(!phba->sli4_hba.hba_eq))
13972                 return IRQ_NONE;
13973
13974         /* Get to the EQ struct associated with this vector */
13975         fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
13976         if (unlikely(!fpeq))
13977                 return IRQ_NONE;
13978
13979         if (lpfc_fcp_look_ahead) {
13980                 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
13981                         phba->sli4_hba.sli4_eq_clr_intr(fpeq);
13982                 else {
13983                         atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13984                         return IRQ_NONE;
13985                 }
13986         }
13987
13988         /* Check device state for handling interrupt */
13989         if (unlikely(lpfc_intr_state_check(phba))) {
13990                 /* Check again for link_state with lock held */
13991                 spin_lock_irqsave(&phba->hbalock, iflag);
13992                 if (phba->link_state < LPFC_LINK_DOWN)
13993                         /* Flush, clear interrupt, and rearm the EQ */
13994                         lpfc_sli4_eq_flush(phba, fpeq);
13995                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13996                 if (lpfc_fcp_look_ahead)
13997                         atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13998                 return IRQ_NONE;
13999         }
14000
14001         /*
14002          * Process all the event on FCP fast-path EQ
14003          */
14004         while ((eqe = lpfc_sli4_eq_get(fpeq))) {
14005                 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
14006                 if (!(++ecount % fpeq->entry_repost))
14007                         break;
14008                 fpeq->EQ_processed++;
14009         }
14010
14011         /* Track the max number of EQEs processed in 1 intr */
14012         if (ecount > fpeq->EQ_max_eqe)
14013                 fpeq->EQ_max_eqe = ecount;
14014
14015         /* Always clear and re-arm the fast-path EQ */
14016         phba->sli4_hba.sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
14017
14018         if (unlikely(ecount == 0)) {
14019                 fpeq->EQ_no_entry++;
14020
14021                 if (lpfc_fcp_look_ahead) {
14022                         atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14023                         return IRQ_NONE;
14024                 }
14025
14026                 if (phba->intr_type == MSIX)
14027                         /* MSI-X treated interrupt served as no EQ share INT */
14028                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14029                                         "0358 MSI-X interrupt with no EQE\n");
14030                 else
14031                         /* Non MSI-X treated on interrupt as EQ share INT */
14032                         return IRQ_NONE;
14033         }
14034
14035         if (lpfc_fcp_look_ahead)
14036                 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14037
14038         return IRQ_HANDLED;
14039 } /* lpfc_sli4_fp_intr_handler */
14040
14041 /**
14042  * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14043  * @irq: Interrupt number.
14044  * @dev_id: The device context pointer.
14045  *
14046  * This function is the device-level interrupt handler to device with SLI-4
14047  * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14048  * interrupt mode is enabled and there is an event in the HBA which requires
14049  * driver attention. This function invokes the slow-path interrupt attention
14050  * handling function and fast-path interrupt attention handling function in
14051  * turn to process the relevant HBA attention events. This function is called
14052  * without any lock held. It gets the hbalock to access and update SLI data
14053  * structures.
14054  *
14055  * This function returns IRQ_HANDLED when interrupt is handled, else it
14056  * returns IRQ_NONE.
14057  **/
14058 irqreturn_t
14059 lpfc_sli4_intr_handler(int irq, void *dev_id)
14060 {
14061         struct lpfc_hba  *phba;
14062         irqreturn_t hba_irq_rc;
14063         bool hba_handled = false;
14064         int qidx;
14065
14066         /* Get the driver's phba structure from the dev_id */
14067         phba = (struct lpfc_hba *)dev_id;
14068
14069         if (unlikely(!phba))
14070                 return IRQ_NONE;
14071
14072         /*
14073          * Invoke fast-path host attention interrupt handling as appropriate.
14074          */
14075         for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
14076                 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14077                                         &phba->sli4_hba.hba_eq_hdl[qidx]);
14078                 if (hba_irq_rc == IRQ_HANDLED)
14079                         hba_handled |= true;
14080         }
14081
14082         if (phba->cfg_fof) {
14083                 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
14084                                         &phba->sli4_hba.hba_eq_hdl[qidx]);
14085                 if (hba_irq_rc == IRQ_HANDLED)
14086                         hba_handled |= true;
14087         }
14088
14089         return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14090 } /* lpfc_sli4_intr_handler */
14091
14092 /**
14093  * lpfc_sli4_queue_free - free a queue structure and associated memory
14094  * @queue: The queue structure to free.
14095  *
14096  * This function frees a queue structure and the DMAable memory used for
14097  * the host resident queue. This function must be called after destroying the
14098  * queue on the HBA.
14099  **/
14100 void
14101 lpfc_sli4_queue_free(struct lpfc_queue *queue)
14102 {
14103         struct lpfc_dmabuf *dmabuf;
14104
14105         if (!queue)
14106                 return;
14107
14108         while (!list_empty(&queue->page_list)) {
14109                 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14110                                  list);
14111                 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14112                                   dmabuf->virt, dmabuf->phys);
14113                 kfree(dmabuf);
14114         }
14115         if (queue->rqbp) {
14116                 lpfc_free_rq_buffer(queue->phba, queue);
14117                 kfree(queue->rqbp);
14118         }
14119
14120         if (!list_empty(&queue->wq_list))
14121                 list_del(&queue->wq_list);
14122
14123         kfree(queue);
14124         return;
14125 }
14126
14127 /**
14128  * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14129  * @phba: The HBA that this queue is being created on.
14130  * @page_size: The size of a queue page
14131  * @entry_size: The size of each queue entry for this queue.
14132  * @entry count: The number of entries that this queue will handle.
14133  *
14134  * This function allocates a queue structure and the DMAable memory used for
14135  * the host resident queue. This function must be called before creating the
14136  * queue on the HBA.
14137  **/
14138 struct lpfc_queue *
14139 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14140                       uint32_t entry_size, uint32_t entry_count)
14141 {
14142         struct lpfc_queue *queue;
14143         struct lpfc_dmabuf *dmabuf;
14144         int x, total_qe_count;
14145         void *dma_pointer;
14146         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14147
14148         if (!phba->sli4_hba.pc_sli4_params.supported)
14149                 hw_page_size = page_size;
14150
14151         queue = kzalloc(sizeof(struct lpfc_queue) +
14152                         (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
14153         if (!queue)
14154                 return NULL;
14155         queue->page_count = (ALIGN(entry_size * entry_count,
14156                         hw_page_size))/hw_page_size;
14157
14158         /* If needed, Adjust page count to match the max the adapter supports */
14159         if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)
14160                 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
14161
14162         INIT_LIST_HEAD(&queue->list);
14163         INIT_LIST_HEAD(&queue->wq_list);
14164         INIT_LIST_HEAD(&queue->wqfull_list);
14165         INIT_LIST_HEAD(&queue->page_list);
14166         INIT_LIST_HEAD(&queue->child_list);
14167
14168         /* Set queue parameters now.  If the system cannot provide memory
14169          * resources, the free routine needs to know what was allocated.
14170          */
14171         queue->entry_size = entry_size;
14172         queue->entry_count = entry_count;
14173         queue->page_size = hw_page_size;
14174         queue->phba = phba;
14175
14176         for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
14177                 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
14178                 if (!dmabuf)
14179                         goto out_fail;
14180                 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
14181                                                    hw_page_size, &dmabuf->phys,
14182                                                    GFP_KERNEL);
14183                 if (!dmabuf->virt) {
14184                         kfree(dmabuf);
14185                         goto out_fail;
14186                 }
14187                 dmabuf->buffer_tag = x;
14188                 list_add_tail(&dmabuf->list, &queue->page_list);
14189                 /* initialize queue's entry array */
14190                 dma_pointer = dmabuf->virt;
14191                 for (; total_qe_count < entry_count &&
14192                      dma_pointer < (hw_page_size + dmabuf->virt);
14193                      total_qe_count++, dma_pointer += entry_size) {
14194                         queue->qe[total_qe_count].address = dma_pointer;
14195                 }
14196         }
14197         INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14198         INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14199
14200         /* entry_repost will be set during q creation */
14201
14202         return queue;
14203 out_fail:
14204         lpfc_sli4_queue_free(queue);
14205         return NULL;
14206 }
14207
14208 /**
14209  * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14210  * @phba: HBA structure that indicates port to create a queue on.
14211  * @pci_barset: PCI BAR set flag.
14212  *
14213  * This function shall perform iomap of the specified PCI BAR address to host
14214  * memory address if not already done so and return it. The returned host
14215  * memory address can be NULL.
14216  */
14217 static void __iomem *
14218 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14219 {
14220         if (!phba->pcidev)
14221                 return NULL;
14222
14223         switch (pci_barset) {
14224         case WQ_PCI_BAR_0_AND_1:
14225                 return phba->pci_bar0_memmap_p;
14226         case WQ_PCI_BAR_2_AND_3:
14227                 return phba->pci_bar2_memmap_p;
14228         case WQ_PCI_BAR_4_AND_5:
14229                 return phba->pci_bar4_memmap_p;
14230         default:
14231                 break;
14232         }
14233         return NULL;
14234 }
14235
14236 /**
14237  * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs
14238  * @phba: HBA structure that indicates port to create a queue on.
14239  * @startq: The starting FCP EQ to modify
14240  *
14241  * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
14242  * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
14243  * updated in one mailbox command.
14244  *
14245  * The @phba struct is used to send mailbox command to HBA. The @startq
14246  * is used to get the starting FCP EQ to change.
14247  * This function is asynchronous and will wait for the mailbox
14248  * command to finish before continuing.
14249  *
14250  * On success this function will return a zero. If unable to allocate enough
14251  * memory this function will return -ENOMEM. If the queue create mailbox command
14252  * fails this function will return -ENXIO.
14253  **/
14254 int
14255 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14256                          uint32_t numq, uint32_t imax)
14257 {
14258         struct lpfc_mbx_modify_eq_delay *eq_delay;
14259         LPFC_MBOXQ_t *mbox;
14260         struct lpfc_queue *eq;
14261         int cnt, rc, length, status = 0;
14262         uint32_t shdr_status, shdr_add_status;
14263         uint32_t result, val;
14264         int qidx;
14265         union lpfc_sli4_cfg_shdr *shdr;
14266         uint16_t dmult;
14267
14268         if (startq >= phba->io_channel_irqs)
14269                 return 0;
14270
14271         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14272         if (!mbox)
14273                 return -ENOMEM;
14274         length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14275                   sizeof(struct lpfc_sli4_cfg_mhdr));
14276         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14277                          LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14278                          length, LPFC_SLI4_MBX_EMBED);
14279         eq_delay = &mbox->u.mqe.un.eq_delay;
14280
14281         /* Calculate delay multiper from maximum interrupt per second */
14282         result = imax / phba->io_channel_irqs;
14283         if (result > LPFC_DMULT_CONST || result == 0)
14284                 dmult = 0;
14285         else
14286                 dmult = LPFC_DMULT_CONST/result - 1;
14287         if (dmult > LPFC_DMULT_MAX)
14288                 dmult = LPFC_DMULT_MAX;
14289
14290         cnt = 0;
14291         for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
14292                 eq = phba->sli4_hba.hba_eq[qidx];
14293                 if (!eq)
14294                         continue;
14295                 eq->q_mode = imax;
14296                 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14297                 eq_delay->u.request.eq[cnt].phase = 0;
14298                 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14299                 cnt++;
14300
14301                 /* q_mode is only used for auto_imax */
14302                 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14303                         /* Use EQ Delay Register method for q_mode */
14304
14305                         /* Convert for EQ Delay register */
14306                         val =  phba->cfg_fcp_imax;
14307                         if (val) {
14308                                 /* First, interrupts per sec per EQ */
14309                                 val = phba->cfg_fcp_imax /
14310                                         phba->io_channel_irqs;
14311
14312                                 /* us delay between each interrupt */
14313                                 val = LPFC_SEC_TO_USEC / val;
14314                         }
14315                         eq->q_mode = val;
14316                 } else {
14317                         eq->q_mode = imax;
14318                 }
14319
14320                 if (cnt >= numq)
14321                         break;
14322         }
14323         eq_delay->u.request.num_eq = cnt;
14324
14325         mbox->vport = phba->pport;
14326         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14327         mbox->context1 = NULL;
14328         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14329         shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14330         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14331         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14332         if (shdr_status || shdr_add_status || rc) {
14333                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14334                                 "2512 MODIFY_EQ_DELAY mailbox failed with "
14335                                 "status x%x add_status x%x, mbx status x%x\n",
14336                                 shdr_status, shdr_add_status, rc);
14337                 status = -ENXIO;
14338         }
14339         mempool_free(mbox, phba->mbox_mem_pool);
14340         return status;
14341 }
14342
14343 /**
14344  * lpfc_eq_create - Create an Event Queue on the HBA
14345  * @phba: HBA structure that indicates port to create a queue on.
14346  * @eq: The queue structure to use to create the event queue.
14347  * @imax: The maximum interrupt per second limit.
14348  *
14349  * This function creates an event queue, as detailed in @eq, on a port,
14350  * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14351  *
14352  * The @phba struct is used to send mailbox command to HBA. The @eq struct
14353  * is used to get the entry count and entry size that are necessary to
14354  * determine the number of pages to allocate and use for this queue. This
14355  * function will send the EQ_CREATE mailbox command to the HBA to setup the
14356  * event queue. This function is asynchronous and will wait for the mailbox
14357  * command to finish before continuing.
14358  *
14359  * On success this function will return a zero. If unable to allocate enough
14360  * memory this function will return -ENOMEM. If the queue create mailbox command
14361  * fails this function will return -ENXIO.
14362  **/
14363 int
14364 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14365 {
14366         struct lpfc_mbx_eq_create *eq_create;
14367         LPFC_MBOXQ_t *mbox;
14368         int rc, length, status = 0;
14369         struct lpfc_dmabuf *dmabuf;
14370         uint32_t shdr_status, shdr_add_status;
14371         union lpfc_sli4_cfg_shdr *shdr;
14372         uint16_t dmult;
14373         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14374
14375         /* sanity check on queue memory */
14376         if (!eq)
14377                 return -ENODEV;
14378         if (!phba->sli4_hba.pc_sli4_params.supported)
14379                 hw_page_size = SLI4_PAGE_SIZE;
14380
14381         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14382         if (!mbox)
14383                 return -ENOMEM;
14384         length = (sizeof(struct lpfc_mbx_eq_create) -
14385                   sizeof(struct lpfc_sli4_cfg_mhdr));
14386         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14387                          LPFC_MBOX_OPCODE_EQ_CREATE,
14388                          length, LPFC_SLI4_MBX_EMBED);
14389         eq_create = &mbox->u.mqe.un.eq_create;
14390         shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14391         bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14392                eq->page_count);
14393         bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14394                LPFC_EQE_SIZE);
14395         bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
14396
14397         /* Use version 2 of CREATE_EQ if eqav is set */
14398         if (phba->sli4_hba.pc_sli4_params.eqav) {
14399                 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14400                        LPFC_Q_CREATE_VERSION_2);
14401                 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14402                        phba->sli4_hba.pc_sli4_params.eqav);
14403         }
14404
14405         /* don't setup delay multiplier using EQ_CREATE */
14406         dmult = 0;
14407         bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14408                dmult);
14409         switch (eq->entry_count) {
14410         default:
14411                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14412                                 "0360 Unsupported EQ count. (%d)\n",
14413                                 eq->entry_count);
14414                 if (eq->entry_count < 256)
14415                         return -EINVAL;
14416                 /* otherwise default to smallest count (drop through) */
14417         case 256:
14418                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14419                        LPFC_EQ_CNT_256);
14420                 break;
14421         case 512:
14422                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14423                        LPFC_EQ_CNT_512);
14424                 break;
14425         case 1024:
14426                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14427                        LPFC_EQ_CNT_1024);
14428                 break;
14429         case 2048:
14430                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14431                        LPFC_EQ_CNT_2048);
14432                 break;
14433         case 4096:
14434                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14435                        LPFC_EQ_CNT_4096);
14436                 break;
14437         }
14438         list_for_each_entry(dmabuf, &eq->page_list, list) {
14439                 memset(dmabuf->virt, 0, hw_page_size);
14440                 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14441                                         putPaddrLow(dmabuf->phys);
14442                 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14443                                         putPaddrHigh(dmabuf->phys);
14444         }
14445         mbox->vport = phba->pport;
14446         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14447         mbox->context1 = NULL;
14448         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14449         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14450         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14451         if (shdr_status || shdr_add_status || rc) {
14452                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14453                                 "2500 EQ_CREATE mailbox failed with "
14454                                 "status x%x add_status x%x, mbx status x%x\n",
14455                                 shdr_status, shdr_add_status, rc);
14456                 status = -ENXIO;
14457         }
14458         eq->type = LPFC_EQ;
14459         eq->subtype = LPFC_NONE;
14460         eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14461         if (eq->queue_id == 0xFFFF)
14462                 status = -ENXIO;
14463         eq->host_index = 0;
14464         eq->hba_index = 0;
14465         eq->entry_repost = LPFC_EQ_REPOST;
14466
14467         mempool_free(mbox, phba->mbox_mem_pool);
14468         return status;
14469 }
14470
14471 /**
14472  * lpfc_cq_create - Create a Completion Queue on the HBA
14473  * @phba: HBA structure that indicates port to create a queue on.
14474  * @cq: The queue structure to use to create the completion queue.
14475  * @eq: The event queue to bind this completion queue to.
14476  *
14477  * This function creates a completion queue, as detailed in @wq, on a port,
14478  * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14479  *
14480  * The @phba struct is used to send mailbox command to HBA. The @cq struct
14481  * is used to get the entry count and entry size that are necessary to
14482  * determine the number of pages to allocate and use for this queue. The @eq
14483  * is used to indicate which event queue to bind this completion queue to. This
14484  * function will send the CQ_CREATE mailbox command to the HBA to setup the
14485  * completion queue. This function is asynchronous and will wait for the mailbox
14486  * command to finish before continuing.
14487  *
14488  * On success this function will return a zero. If unable to allocate enough
14489  * memory this function will return -ENOMEM. If the queue create mailbox command
14490  * fails this function will return -ENXIO.
14491  **/
14492 int
14493 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14494                struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14495 {
14496         struct lpfc_mbx_cq_create *cq_create;
14497         struct lpfc_dmabuf *dmabuf;
14498         LPFC_MBOXQ_t *mbox;
14499         int rc, length, status = 0;
14500         uint32_t shdr_status, shdr_add_status;
14501         union lpfc_sli4_cfg_shdr *shdr;
14502         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14503
14504         /* sanity check on queue memory */
14505         if (!cq || !eq)
14506                 return -ENODEV;
14507         if (!phba->sli4_hba.pc_sli4_params.supported)
14508                 hw_page_size = cq->page_size;
14509
14510         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14511         if (!mbox)
14512                 return -ENOMEM;
14513         length = (sizeof(struct lpfc_mbx_cq_create) -
14514                   sizeof(struct lpfc_sli4_cfg_mhdr));
14515         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14516                          LPFC_MBOX_OPCODE_CQ_CREATE,
14517                          length, LPFC_SLI4_MBX_EMBED);
14518         cq_create = &mbox->u.mqe.un.cq_create;
14519         shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
14520         bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14521                     cq->page_count);
14522         bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14523         bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
14524         bf_set(lpfc_mbox_hdr_version, &shdr->request,
14525                phba->sli4_hba.pc_sli4_params.cqv);
14526         if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
14527                 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14528                        (cq->page_size / SLI4_PAGE_SIZE));
14529                 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14530                        eq->queue_id);
14531                 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14532                        phba->sli4_hba.pc_sli4_params.cqav);
14533         } else {
14534                 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14535                        eq->queue_id);
14536         }
14537         switch (cq->entry_count) {
14538         case 2048:
14539         case 4096:
14540                 if (phba->sli4_hba.pc_sli4_params.cqv ==
14541                     LPFC_Q_CREATE_VERSION_2) {
14542                         cq_create->u.request.context.lpfc_cq_context_count =
14543                                 cq->entry_count;
14544                         bf_set(lpfc_cq_context_count,
14545                                &cq_create->u.request.context,
14546                                LPFC_CQ_CNT_WORD7);
14547                         break;
14548                 }
14549                 /* Fall Thru */
14550         default:
14551                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14552                                 "0361 Unsupported CQ count: "
14553                                 "entry cnt %d sz %d pg cnt %d\n",
14554                                 cq->entry_count, cq->entry_size,
14555                                 cq->page_count);
14556                 if (cq->entry_count < 256) {
14557                         status = -EINVAL;
14558                         goto out;
14559                 }
14560                 /* otherwise default to smallest count (drop through) */
14561         case 256:
14562                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14563                        LPFC_CQ_CNT_256);
14564                 break;
14565         case 512:
14566                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14567                        LPFC_CQ_CNT_512);
14568                 break;
14569         case 1024:
14570                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14571                        LPFC_CQ_CNT_1024);
14572                 break;
14573         }
14574         list_for_each_entry(dmabuf, &cq->page_list, list) {
14575                 memset(dmabuf->virt, 0, cq->page_size);
14576                 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14577                                         putPaddrLow(dmabuf->phys);
14578                 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14579                                         putPaddrHigh(dmabuf->phys);
14580         }
14581         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14582
14583         /* The IOCTL status is embedded in the mailbox subheader. */
14584         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14585         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14586         if (shdr_status || shdr_add_status || rc) {
14587                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14588                                 "2501 CQ_CREATE mailbox failed with "
14589                                 "status x%x add_status x%x, mbx status x%x\n",
14590                                 shdr_status, shdr_add_status, rc);
14591                 status = -ENXIO;
14592                 goto out;
14593         }
14594         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14595         if (cq->queue_id == 0xFFFF) {
14596                 status = -ENXIO;
14597                 goto out;
14598         }
14599         /* link the cq onto the parent eq child list */
14600         list_add_tail(&cq->list, &eq->child_list);
14601         /* Set up completion queue's type and subtype */
14602         cq->type = type;
14603         cq->subtype = subtype;
14604         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14605         cq->assoc_qid = eq->queue_id;
14606         cq->host_index = 0;
14607         cq->hba_index = 0;
14608         cq->entry_repost = LPFC_CQ_REPOST;
14609
14610 out:
14611         mempool_free(mbox, phba->mbox_mem_pool);
14612         return status;
14613 }
14614
14615 /**
14616  * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
14617  * @phba: HBA structure that indicates port to create a queue on.
14618  * @cqp: The queue structure array to use to create the completion queues.
14619  * @eqp: The event queue array to bind these completion queues to.
14620  *
14621  * This function creates a set of  completion queue, s to support MRQ
14622  * as detailed in @cqp, on a port,
14623  * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
14624  *
14625  * The @phba struct is used to send mailbox command to HBA. The @cq struct
14626  * is used to get the entry count and entry size that are necessary to
14627  * determine the number of pages to allocate and use for this queue. The @eq
14628  * is used to indicate which event queue to bind this completion queue to. This
14629  * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
14630  * completion queue. This function is asynchronous and will wait for the mailbox
14631  * command to finish before continuing.
14632  *
14633  * On success this function will return a zero. If unable to allocate enough
14634  * memory this function will return -ENOMEM. If the queue create mailbox command
14635  * fails this function will return -ENXIO.
14636  **/
14637 int
14638 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14639                    struct lpfc_queue **eqp, uint32_t type, uint32_t subtype)
14640 {
14641         struct lpfc_queue *cq;
14642         struct lpfc_queue *eq;
14643         struct lpfc_mbx_cq_create_set *cq_set;
14644         struct lpfc_dmabuf *dmabuf;
14645         LPFC_MBOXQ_t *mbox;
14646         int rc, length, alloclen, status = 0;
14647         int cnt, idx, numcq, page_idx = 0;
14648         uint32_t shdr_status, shdr_add_status;
14649         union lpfc_sli4_cfg_shdr *shdr;
14650         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14651
14652         /* sanity check on queue memory */
14653         numcq = phba->cfg_nvmet_mrq;
14654         if (!cqp || !eqp || !numcq)
14655                 return -ENODEV;
14656
14657         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14658         if (!mbox)
14659                 return -ENOMEM;
14660
14661         length = sizeof(struct lpfc_mbx_cq_create_set);
14662         length += ((numcq * cqp[0]->page_count) *
14663                    sizeof(struct dma_address));
14664         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14665                         LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
14666                         LPFC_SLI4_MBX_NEMBED);
14667         if (alloclen < length) {
14668                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14669                                 "3098 Allocated DMA memory size (%d) is "
14670                                 "less than the requested DMA memory size "
14671                                 "(%d)\n", alloclen, length);
14672                 status = -ENOMEM;
14673                 goto out;
14674         }
14675         cq_set = mbox->sge_array->addr[0];
14676         shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
14677         bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
14678
14679         for (idx = 0; idx < numcq; idx++) {
14680                 cq = cqp[idx];
14681                 eq = eqp[idx];
14682                 if (!cq || !eq) {
14683                         status = -ENOMEM;
14684                         goto out;
14685                 }
14686                 if (!phba->sli4_hba.pc_sli4_params.supported)
14687                         hw_page_size = cq->page_size;
14688
14689                 switch (idx) {
14690                 case 0:
14691                         bf_set(lpfc_mbx_cq_create_set_page_size,
14692                                &cq_set->u.request,
14693                                (hw_page_size / SLI4_PAGE_SIZE));
14694                         bf_set(lpfc_mbx_cq_create_set_num_pages,
14695                                &cq_set->u.request, cq->page_count);
14696                         bf_set(lpfc_mbx_cq_create_set_evt,
14697                                &cq_set->u.request, 1);
14698                         bf_set(lpfc_mbx_cq_create_set_valid,
14699                                &cq_set->u.request, 1);
14700                         bf_set(lpfc_mbx_cq_create_set_cqe_size,
14701                                &cq_set->u.request, 0);
14702                         bf_set(lpfc_mbx_cq_create_set_num_cq,
14703                                &cq_set->u.request, numcq);
14704                         bf_set(lpfc_mbx_cq_create_set_autovalid,
14705                                &cq_set->u.request,
14706                                phba->sli4_hba.pc_sli4_params.cqav);
14707                         switch (cq->entry_count) {
14708                         case 2048:
14709                         case 4096:
14710                                 if (phba->sli4_hba.pc_sli4_params.cqv ==
14711                                     LPFC_Q_CREATE_VERSION_2) {
14712                                         bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14713                                                &cq_set->u.request,
14714                                                 cq->entry_count);
14715                                         bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14716                                                &cq_set->u.request,
14717                                                LPFC_CQ_CNT_WORD7);
14718                                         break;
14719                                 }
14720                                 /* Fall Thru */
14721                         default:
14722                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14723                                                 "3118 Bad CQ count. (%d)\n",
14724                                                 cq->entry_count);
14725                                 if (cq->entry_count < 256) {
14726                                         status = -EINVAL;
14727                                         goto out;
14728                                 }
14729                                 /* otherwise default to smallest (drop thru) */
14730                         case 256:
14731                                 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14732                                        &cq_set->u.request, LPFC_CQ_CNT_256);
14733                                 break;
14734                         case 512:
14735                                 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14736                                        &cq_set->u.request, LPFC_CQ_CNT_512);
14737                                 break;
14738                         case 1024:
14739                                 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14740                                        &cq_set->u.request, LPFC_CQ_CNT_1024);
14741                                 break;
14742                         }
14743                         bf_set(lpfc_mbx_cq_create_set_eq_id0,
14744                                &cq_set->u.request, eq->queue_id);
14745                         break;
14746                 case 1:
14747                         bf_set(lpfc_mbx_cq_create_set_eq_id1,
14748                                &cq_set->u.request, eq->queue_id);
14749                         break;
14750                 case 2:
14751                         bf_set(lpfc_mbx_cq_create_set_eq_id2,
14752                                &cq_set->u.request, eq->queue_id);
14753                         break;
14754                 case 3:
14755                         bf_set(lpfc_mbx_cq_create_set_eq_id3,
14756                                &cq_set->u.request, eq->queue_id);
14757                         break;
14758                 case 4:
14759                         bf_set(lpfc_mbx_cq_create_set_eq_id4,
14760                                &cq_set->u.request, eq->queue_id);
14761                         break;
14762                 case 5:
14763                         bf_set(lpfc_mbx_cq_create_set_eq_id5,
14764                                &cq_set->u.request, eq->queue_id);
14765                         break;
14766                 case 6:
14767                         bf_set(lpfc_mbx_cq_create_set_eq_id6,
14768                                &cq_set->u.request, eq->queue_id);
14769                         break;
14770                 case 7:
14771                         bf_set(lpfc_mbx_cq_create_set_eq_id7,
14772                                &cq_set->u.request, eq->queue_id);
14773                         break;
14774                 case 8:
14775                         bf_set(lpfc_mbx_cq_create_set_eq_id8,
14776                                &cq_set->u.request, eq->queue_id);
14777                         break;
14778                 case 9:
14779                         bf_set(lpfc_mbx_cq_create_set_eq_id9,
14780                                &cq_set->u.request, eq->queue_id);
14781                         break;
14782                 case 10:
14783                         bf_set(lpfc_mbx_cq_create_set_eq_id10,
14784                                &cq_set->u.request, eq->queue_id);
14785                         break;
14786                 case 11:
14787                         bf_set(lpfc_mbx_cq_create_set_eq_id11,
14788                                &cq_set->u.request, eq->queue_id);
14789                         break;
14790                 case 12:
14791                         bf_set(lpfc_mbx_cq_create_set_eq_id12,
14792                                &cq_set->u.request, eq->queue_id);
14793                         break;
14794                 case 13:
14795                         bf_set(lpfc_mbx_cq_create_set_eq_id13,
14796                                &cq_set->u.request, eq->queue_id);
14797                         break;
14798                 case 14:
14799                         bf_set(lpfc_mbx_cq_create_set_eq_id14,
14800                                &cq_set->u.request, eq->queue_id);
14801                         break;
14802                 case 15:
14803                         bf_set(lpfc_mbx_cq_create_set_eq_id15,
14804                                &cq_set->u.request, eq->queue_id);
14805                         break;
14806                 }
14807
14808                 /* link the cq onto the parent eq child list */
14809                 list_add_tail(&cq->list, &eq->child_list);
14810                 /* Set up completion queue's type and subtype */
14811                 cq->type = type;
14812                 cq->subtype = subtype;
14813                 cq->assoc_qid = eq->queue_id;
14814                 cq->host_index = 0;
14815                 cq->hba_index = 0;
14816                 cq->entry_repost = LPFC_CQ_REPOST;
14817                 cq->chann = idx;
14818
14819                 rc = 0;
14820                 list_for_each_entry(dmabuf, &cq->page_list, list) {
14821                         memset(dmabuf->virt, 0, hw_page_size);
14822                         cnt = page_idx + dmabuf->buffer_tag;
14823                         cq_set->u.request.page[cnt].addr_lo =
14824                                         putPaddrLow(dmabuf->phys);
14825                         cq_set->u.request.page[cnt].addr_hi =
14826                                         putPaddrHigh(dmabuf->phys);
14827                         rc++;
14828                 }
14829                 page_idx += rc;
14830         }
14831
14832         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14833
14834         /* The IOCTL status is embedded in the mailbox subheader. */
14835         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14836         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14837         if (shdr_status || shdr_add_status || rc) {
14838                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14839                                 "3119 CQ_CREATE_SET mailbox failed with "
14840                                 "status x%x add_status x%x, mbx status x%x\n",
14841                                 shdr_status, shdr_add_status, rc);
14842                 status = -ENXIO;
14843                 goto out;
14844         }
14845         rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
14846         if (rc == 0xFFFF) {
14847                 status = -ENXIO;
14848                 goto out;
14849         }
14850
14851         for (idx = 0; idx < numcq; idx++) {
14852                 cq = cqp[idx];
14853                 cq->queue_id = rc + idx;
14854         }
14855
14856 out:
14857         lpfc_sli4_mbox_cmd_free(phba, mbox);
14858         return status;
14859 }
14860
14861 /**
14862  * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
14863  * @phba: HBA structure that indicates port to create a queue on.
14864  * @mq: The queue structure to use to create the mailbox queue.
14865  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
14866  * @cq: The completion queue to associate with this cq.
14867  *
14868  * This function provides failback (fb) functionality when the
14869  * mq_create_ext fails on older FW generations.  It's purpose is identical
14870  * to mq_create_ext otherwise.
14871  *
14872  * This routine cannot fail as all attributes were previously accessed and
14873  * initialized in mq_create_ext.
14874  **/
14875 static void
14876 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
14877                        LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
14878 {
14879         struct lpfc_mbx_mq_create *mq_create;
14880         struct lpfc_dmabuf *dmabuf;
14881         int length;
14882
14883         length = (sizeof(struct lpfc_mbx_mq_create) -
14884                   sizeof(struct lpfc_sli4_cfg_mhdr));
14885         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14886                          LPFC_MBOX_OPCODE_MQ_CREATE,
14887                          length, LPFC_SLI4_MBX_EMBED);
14888         mq_create = &mbox->u.mqe.un.mq_create;
14889         bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
14890                mq->page_count);
14891         bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
14892                cq->queue_id);
14893         bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
14894         switch (mq->entry_count) {
14895         case 16:
14896                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14897                        LPFC_MQ_RING_SIZE_16);
14898                 break;
14899         case 32:
14900                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14901                        LPFC_MQ_RING_SIZE_32);
14902                 break;
14903         case 64:
14904                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14905                        LPFC_MQ_RING_SIZE_64);
14906                 break;
14907         case 128:
14908                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14909                        LPFC_MQ_RING_SIZE_128);
14910                 break;
14911         }
14912         list_for_each_entry(dmabuf, &mq->page_list, list) {
14913                 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14914                         putPaddrLow(dmabuf->phys);
14915                 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14916                         putPaddrHigh(dmabuf->phys);
14917         }
14918 }
14919
14920 /**
14921  * lpfc_mq_create - Create a mailbox Queue on the HBA
14922  * @phba: HBA structure that indicates port to create a queue on.
14923  * @mq: The queue structure to use to create the mailbox queue.
14924  * @cq: The completion queue to associate with this cq.
14925  * @subtype: The queue's subtype.
14926  *
14927  * This function creates a mailbox queue, as detailed in @mq, on a port,
14928  * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
14929  *
14930  * The @phba struct is used to send mailbox command to HBA. The @cq struct
14931  * is used to get the entry count and entry size that are necessary to
14932  * determine the number of pages to allocate and use for this queue. This
14933  * function will send the MQ_CREATE mailbox command to the HBA to setup the
14934  * mailbox queue. This function is asynchronous and will wait for the mailbox
14935  * command to finish before continuing.
14936  *
14937  * On success this function will return a zero. If unable to allocate enough
14938  * memory this function will return -ENOMEM. If the queue create mailbox command
14939  * fails this function will return -ENXIO.
14940  **/
14941 int32_t
14942 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
14943                struct lpfc_queue *cq, uint32_t subtype)
14944 {
14945         struct lpfc_mbx_mq_create *mq_create;
14946         struct lpfc_mbx_mq_create_ext *mq_create_ext;
14947         struct lpfc_dmabuf *dmabuf;
14948         LPFC_MBOXQ_t *mbox;
14949         int rc, length, status = 0;
14950         uint32_t shdr_status, shdr_add_status;
14951         union lpfc_sli4_cfg_shdr *shdr;
14952         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14953
14954         /* sanity check on queue memory */
14955         if (!mq || !cq)
14956                 return -ENODEV;
14957         if (!phba->sli4_hba.pc_sli4_params.supported)
14958                 hw_page_size = SLI4_PAGE_SIZE;
14959
14960         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14961         if (!mbox)
14962                 return -ENOMEM;
14963         length = (sizeof(struct lpfc_mbx_mq_create_ext) -
14964                   sizeof(struct lpfc_sli4_cfg_mhdr));
14965         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14966                          LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
14967                          length, LPFC_SLI4_MBX_EMBED);
14968
14969         mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
14970         shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
14971         bf_set(lpfc_mbx_mq_create_ext_num_pages,
14972                &mq_create_ext->u.request, mq->page_count);
14973         bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
14974                &mq_create_ext->u.request, 1);
14975         bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
14976                &mq_create_ext->u.request, 1);
14977         bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
14978                &mq_create_ext->u.request, 1);
14979         bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
14980                &mq_create_ext->u.request, 1);
14981         bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
14982                &mq_create_ext->u.request, 1);
14983         bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
14984         bf_set(lpfc_mbox_hdr_version, &shdr->request,
14985                phba->sli4_hba.pc_sli4_params.mqv);
14986         if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
14987                 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
14988                        cq->queue_id);
14989         else
14990                 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
14991                        cq->queue_id);
14992         switch (mq->entry_count) {
14993         default:
14994                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14995                                 "0362 Unsupported MQ count. (%d)\n",
14996                                 mq->entry_count);
14997                 if (mq->entry_count < 16) {
14998                         status = -EINVAL;
14999                         goto out;
15000                 }
15001                 /* otherwise default to smallest count (drop through) */
15002         case 16:
15003                 bf_set(lpfc_mq_context_ring_size,
15004                        &mq_create_ext->u.request.context,
15005                        LPFC_MQ_RING_SIZE_16);
15006                 break;
15007         case 32:
15008                 bf_set(lpfc_mq_context_ring_size,
15009                        &mq_create_ext->u.request.context,
15010                        LPFC_MQ_RING_SIZE_32);
15011                 break;
15012         case 64:
15013                 bf_set(lpfc_mq_context_ring_size,
15014                        &mq_create_ext->u.request.context,
15015                        LPFC_MQ_RING_SIZE_64);
15016                 break;
15017         case 128:
15018                 bf_set(lpfc_mq_context_ring_size,
15019                        &mq_create_ext->u.request.context,
15020                        LPFC_MQ_RING_SIZE_128);
15021                 break;
15022         }
15023         list_for_each_entry(dmabuf, &mq->page_list, list) {
15024                 memset(dmabuf->virt, 0, hw_page_size);
15025                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15026                                         putPaddrLow(dmabuf->phys);
15027                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15028                                         putPaddrHigh(dmabuf->phys);
15029         }
15030         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15031         mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15032                               &mq_create_ext->u.response);
15033         if (rc != MBX_SUCCESS) {
15034                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15035                                 "2795 MQ_CREATE_EXT failed with "
15036                                 "status x%x. Failback to MQ_CREATE.\n",
15037                                 rc);
15038                 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15039                 mq_create = &mbox->u.mqe.un.mq_create;
15040                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15041                 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15042                 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15043                                       &mq_create->u.response);
15044         }
15045
15046         /* The IOCTL status is embedded in the mailbox subheader. */
15047         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15048         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15049         if (shdr_status || shdr_add_status || rc) {
15050                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15051                                 "2502 MQ_CREATE mailbox failed with "
15052                                 "status x%x add_status x%x, mbx status x%x\n",
15053                                 shdr_status, shdr_add_status, rc);
15054                 status = -ENXIO;
15055                 goto out;
15056         }
15057         if (mq->queue_id == 0xFFFF) {
15058                 status = -ENXIO;
15059                 goto out;
15060         }
15061         mq->type = LPFC_MQ;
15062         mq->assoc_qid = cq->queue_id;
15063         mq->subtype = subtype;
15064         mq->host_index = 0;
15065         mq->hba_index = 0;
15066         mq->entry_repost = LPFC_MQ_REPOST;
15067
15068         /* link the mq onto the parent cq child list */
15069         list_add_tail(&mq->list, &cq->child_list);
15070 out:
15071         mempool_free(mbox, phba->mbox_mem_pool);
15072         return status;
15073 }
15074
15075 /**
15076  * lpfc_wq_create - Create a Work Queue on the HBA
15077  * @phba: HBA structure that indicates port to create a queue on.
15078  * @wq: The queue structure to use to create the work queue.
15079  * @cq: The completion queue to bind this work queue to.
15080  * @subtype: The subtype of the work queue indicating its functionality.
15081  *
15082  * This function creates a work queue, as detailed in @wq, on a port, described
15083  * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15084  *
15085  * The @phba struct is used to send mailbox command to HBA. The @wq struct
15086  * is used to get the entry count and entry size that are necessary to
15087  * determine the number of pages to allocate and use for this queue. The @cq
15088  * is used to indicate which completion queue to bind this work queue to. This
15089  * function will send the WQ_CREATE mailbox command to the HBA to setup the
15090  * work queue. This function is asynchronous and will wait for the mailbox
15091  * command to finish before continuing.
15092  *
15093  * On success this function will return a zero. If unable to allocate enough
15094  * memory this function will return -ENOMEM. If the queue create mailbox command
15095  * fails this function will return -ENXIO.
15096  **/
15097 int
15098 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15099                struct lpfc_queue *cq, uint32_t subtype)
15100 {
15101         struct lpfc_mbx_wq_create *wq_create;
15102         struct lpfc_dmabuf *dmabuf;
15103         LPFC_MBOXQ_t *mbox;
15104         int rc, length, status = 0;
15105         uint32_t shdr_status, shdr_add_status;
15106         union lpfc_sli4_cfg_shdr *shdr;
15107         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15108         struct dma_address *page;
15109         void __iomem *bar_memmap_p;
15110         uint32_t db_offset;
15111         uint16_t pci_barset;
15112         uint8_t dpp_barset;
15113         uint32_t dpp_offset;
15114         unsigned long pg_addr;
15115         uint8_t wq_create_version;
15116
15117         /* sanity check on queue memory */
15118         if (!wq || !cq)
15119                 return -ENODEV;
15120         if (!phba->sli4_hba.pc_sli4_params.supported)
15121                 hw_page_size = wq->page_size;
15122
15123         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15124         if (!mbox)
15125                 return -ENOMEM;
15126         length = (sizeof(struct lpfc_mbx_wq_create) -
15127                   sizeof(struct lpfc_sli4_cfg_mhdr));
15128         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15129                          LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15130                          length, LPFC_SLI4_MBX_EMBED);
15131         wq_create = &mbox->u.mqe.un.wq_create;
15132         shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15133         bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15134                     wq->page_count);
15135         bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15136                     cq->queue_id);
15137
15138         /* wqv is the earliest version supported, NOT the latest */
15139         bf_set(lpfc_mbox_hdr_version, &shdr->request,
15140                phba->sli4_hba.pc_sli4_params.wqv);
15141
15142         if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15143             (wq->page_size > SLI4_PAGE_SIZE))
15144                 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15145         else
15146                 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15147
15148
15149         if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15150                 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15151         else
15152                 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15153
15154         switch (wq_create_version) {
15155         case LPFC_Q_CREATE_VERSION_1:
15156                 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15157                        wq->entry_count);
15158                 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15159                        LPFC_Q_CREATE_VERSION_1);
15160
15161                 switch (wq->entry_size) {
15162                 default:
15163                 case 64:
15164                         bf_set(lpfc_mbx_wq_create_wqe_size,
15165                                &wq_create->u.request_1,
15166                                LPFC_WQ_WQE_SIZE_64);
15167                         break;
15168                 case 128:
15169                         bf_set(lpfc_mbx_wq_create_wqe_size,
15170                                &wq_create->u.request_1,
15171                                LPFC_WQ_WQE_SIZE_128);
15172                         break;
15173                 }
15174                 /* Request DPP by default */
15175                 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15176                 bf_set(lpfc_mbx_wq_create_page_size,
15177                        &wq_create->u.request_1,
15178                        (wq->page_size / SLI4_PAGE_SIZE));
15179                 page = wq_create->u.request_1.page;
15180                 break;
15181         default:
15182                 page = wq_create->u.request.page;
15183                 break;
15184         }
15185
15186         list_for_each_entry(dmabuf, &wq->page_list, list) {
15187                 memset(dmabuf->virt, 0, hw_page_size);
15188                 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15189                 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15190         }
15191
15192         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15193                 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15194
15195         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15196         /* The IOCTL status is embedded in the mailbox subheader. */
15197         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15198         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15199         if (shdr_status || shdr_add_status || rc) {
15200                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15201                                 "2503 WQ_CREATE mailbox failed with "
15202                                 "status x%x add_status x%x, mbx status x%x\n",
15203                                 shdr_status, shdr_add_status, rc);
15204                 status = -ENXIO;
15205                 goto out;
15206         }
15207
15208         if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15209                 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15210                                         &wq_create->u.response);
15211         else
15212                 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15213                                         &wq_create->u.response_1);
15214
15215         if (wq->queue_id == 0xFFFF) {
15216                 status = -ENXIO;
15217                 goto out;
15218         }
15219
15220         wq->db_format = LPFC_DB_LIST_FORMAT;
15221         if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15222                 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15223                         wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15224                                                &wq_create->u.response);
15225                         if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15226                             (wq->db_format != LPFC_DB_RING_FORMAT)) {
15227                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15228                                                 "3265 WQ[%d] doorbell format "
15229                                                 "not supported: x%x\n",
15230                                                 wq->queue_id, wq->db_format);
15231                                 status = -EINVAL;
15232                                 goto out;
15233                         }
15234                         pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15235                                             &wq_create->u.response);
15236                         bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15237                                                                    pci_barset);
15238                         if (!bar_memmap_p) {
15239                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15240                                                 "3263 WQ[%d] failed to memmap "
15241                                                 "pci barset:x%x\n",
15242                                                 wq->queue_id, pci_barset);
15243                                 status = -ENOMEM;
15244                                 goto out;
15245                         }
15246                         db_offset = wq_create->u.response.doorbell_offset;
15247                         if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15248                             (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15249                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15250                                                 "3252 WQ[%d] doorbell offset "
15251                                                 "not supported: x%x\n",
15252                                                 wq->queue_id, db_offset);
15253                                 status = -EINVAL;
15254                                 goto out;
15255                         }
15256                         wq->db_regaddr = bar_memmap_p + db_offset;
15257                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15258                                         "3264 WQ[%d]: barset:x%x, offset:x%x, "
15259                                         "format:x%x\n", wq->queue_id,
15260                                         pci_barset, db_offset, wq->db_format);
15261                 } else
15262                         wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15263         } else {
15264                 /* Check if DPP was honored by the firmware */
15265                 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15266                                     &wq_create->u.response_1);
15267                 if (wq->dpp_enable) {
15268                         pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15269                                             &wq_create->u.response_1);
15270                         bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15271                                                                    pci_barset);
15272                         if (!bar_memmap_p) {
15273                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15274                                                 "3267 WQ[%d] failed to memmap "
15275                                                 "pci barset:x%x\n",
15276                                                 wq->queue_id, pci_barset);
15277                                 status = -ENOMEM;
15278                                 goto out;
15279                         }
15280                         db_offset = wq_create->u.response_1.doorbell_offset;
15281                         wq->db_regaddr = bar_memmap_p + db_offset;
15282                         wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15283                                             &wq_create->u.response_1);
15284                         dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15285                                             &wq_create->u.response_1);
15286                         bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15287                                                                    dpp_barset);
15288                         if (!bar_memmap_p) {
15289                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15290                                                 "3268 WQ[%d] failed to memmap "
15291                                                 "pci barset:x%x\n",
15292                                                 wq->queue_id, dpp_barset);
15293                                 status = -ENOMEM;
15294                                 goto out;
15295                         }
15296                         dpp_offset = wq_create->u.response_1.dpp_offset;
15297                         wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15298                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15299                                         "3271 WQ[%d]: barset:x%x, offset:x%x, "
15300                                         "dpp_id:x%x dpp_barset:x%x "
15301                                         "dpp_offset:x%x\n",
15302                                         wq->queue_id, pci_barset, db_offset,
15303                                         wq->dpp_id, dpp_barset, dpp_offset);
15304
15305                         /* Enable combined writes for DPP aperture */
15306                         pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15307 #ifdef CONFIG_X86
15308                         rc = set_memory_wc(pg_addr, 1);
15309                         if (rc) {
15310                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15311                                         "3272 Cannot setup Combined "
15312                                         "Write on WQ[%d] - disable DPP\n",
15313                                         wq->queue_id);
15314                                 phba->cfg_enable_dpp = 0;
15315                         }
15316 #else
15317                         phba->cfg_enable_dpp = 0;
15318 #endif
15319                 } else
15320                         wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15321         }
15322         wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15323         if (wq->pring == NULL) {
15324                 status = -ENOMEM;
15325                 goto out;
15326         }
15327         wq->type = LPFC_WQ;
15328         wq->assoc_qid = cq->queue_id;
15329         wq->subtype = subtype;
15330         wq->host_index = 0;
15331         wq->hba_index = 0;
15332         wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
15333
15334         /* link the wq onto the parent cq child list */
15335         list_add_tail(&wq->list, &cq->child_list);
15336 out:
15337         mempool_free(mbox, phba->mbox_mem_pool);
15338         return status;
15339 }
15340
15341 /**
15342  * lpfc_rq_create - Create a Receive Queue on the HBA
15343  * @phba: HBA structure that indicates port to create a queue on.
15344  * @hrq: The queue structure to use to create the header receive queue.
15345  * @drq: The queue structure to use to create the data receive queue.
15346  * @cq: The completion queue to bind this work queue to.
15347  *
15348  * This function creates a receive buffer queue pair , as detailed in @hrq and
15349  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15350  * to the HBA.
15351  *
15352  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15353  * struct is used to get the entry count that is necessary to determine the
15354  * number of pages to use for this queue. The @cq is used to indicate which
15355  * completion queue to bind received buffers that are posted to these queues to.
15356  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15357  * receive queue pair. This function is asynchronous and will wait for the
15358  * mailbox command to finish before continuing.
15359  *
15360  * On success this function will return a zero. If unable to allocate enough
15361  * memory this function will return -ENOMEM. If the queue create mailbox command
15362  * fails this function will return -ENXIO.
15363  **/
15364 int
15365 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15366                struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15367 {
15368         struct lpfc_mbx_rq_create *rq_create;
15369         struct lpfc_dmabuf *dmabuf;
15370         LPFC_MBOXQ_t *mbox;
15371         int rc, length, status = 0;
15372         uint32_t shdr_status, shdr_add_status;
15373         union lpfc_sli4_cfg_shdr *shdr;
15374         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15375         void __iomem *bar_memmap_p;
15376         uint32_t db_offset;
15377         uint16_t pci_barset;
15378
15379         /* sanity check on queue memory */
15380         if (!hrq || !drq || !cq)
15381                 return -ENODEV;
15382         if (!phba->sli4_hba.pc_sli4_params.supported)
15383                 hw_page_size = SLI4_PAGE_SIZE;
15384
15385         if (hrq->entry_count != drq->entry_count)
15386                 return -EINVAL;
15387         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15388         if (!mbox)
15389                 return -ENOMEM;
15390         length = (sizeof(struct lpfc_mbx_rq_create) -
15391                   sizeof(struct lpfc_sli4_cfg_mhdr));
15392         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15393                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15394                          length, LPFC_SLI4_MBX_EMBED);
15395         rq_create = &mbox->u.mqe.un.rq_create;
15396         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15397         bf_set(lpfc_mbox_hdr_version, &shdr->request,
15398                phba->sli4_hba.pc_sli4_params.rqv);
15399         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15400                 bf_set(lpfc_rq_context_rqe_count_1,
15401                        &rq_create->u.request.context,
15402                        hrq->entry_count);
15403                 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
15404                 bf_set(lpfc_rq_context_rqe_size,
15405                        &rq_create->u.request.context,
15406                        LPFC_RQE_SIZE_8);
15407                 bf_set(lpfc_rq_context_page_size,
15408                        &rq_create->u.request.context,
15409                        LPFC_RQ_PAGE_SIZE_4096);
15410         } else {
15411                 switch (hrq->entry_count) {
15412                 default:
15413                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15414                                         "2535 Unsupported RQ count. (%d)\n",
15415                                         hrq->entry_count);
15416                         if (hrq->entry_count < 512) {
15417                                 status = -EINVAL;
15418                                 goto out;
15419                         }
15420                         /* otherwise default to smallest count (drop through) */
15421                 case 512:
15422                         bf_set(lpfc_rq_context_rqe_count,
15423                                &rq_create->u.request.context,
15424                                LPFC_RQ_RING_SIZE_512);
15425                         break;
15426                 case 1024:
15427                         bf_set(lpfc_rq_context_rqe_count,
15428                                &rq_create->u.request.context,
15429                                LPFC_RQ_RING_SIZE_1024);
15430                         break;
15431                 case 2048:
15432                         bf_set(lpfc_rq_context_rqe_count,
15433                                &rq_create->u.request.context,
15434                                LPFC_RQ_RING_SIZE_2048);
15435                         break;
15436                 case 4096:
15437                         bf_set(lpfc_rq_context_rqe_count,
15438                                &rq_create->u.request.context,
15439                                LPFC_RQ_RING_SIZE_4096);
15440                         break;
15441                 }
15442                 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15443                        LPFC_HDR_BUF_SIZE);
15444         }
15445         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15446                cq->queue_id);
15447         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15448                hrq->page_count);
15449         list_for_each_entry(dmabuf, &hrq->page_list, list) {
15450                 memset(dmabuf->virt, 0, hw_page_size);
15451                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15452                                         putPaddrLow(dmabuf->phys);
15453                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15454                                         putPaddrHigh(dmabuf->phys);
15455         }
15456         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15457                 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15458
15459         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15460         /* The IOCTL status is embedded in the mailbox subheader. */
15461         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15462         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15463         if (shdr_status || shdr_add_status || rc) {
15464                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15465                                 "2504 RQ_CREATE mailbox failed with "
15466                                 "status x%x add_status x%x, mbx status x%x\n",
15467                                 shdr_status, shdr_add_status, rc);
15468                 status = -ENXIO;
15469                 goto out;
15470         }
15471         hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15472         if (hrq->queue_id == 0xFFFF) {
15473                 status = -ENXIO;
15474                 goto out;
15475         }
15476
15477         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15478                 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15479                                         &rq_create->u.response);
15480                 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15481                     (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15482                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15483                                         "3262 RQ [%d] doorbell format not "
15484                                         "supported: x%x\n", hrq->queue_id,
15485                                         hrq->db_format);
15486                         status = -EINVAL;
15487                         goto out;
15488                 }
15489
15490                 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15491                                     &rq_create->u.response);
15492                 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15493                 if (!bar_memmap_p) {
15494                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15495                                         "3269 RQ[%d] failed to memmap pci "
15496                                         "barset:x%x\n", hrq->queue_id,
15497                                         pci_barset);
15498                         status = -ENOMEM;
15499                         goto out;
15500                 }
15501
15502                 db_offset = rq_create->u.response.doorbell_offset;
15503                 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15504                     (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15505                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15506                                         "3270 RQ[%d] doorbell offset not "
15507                                         "supported: x%x\n", hrq->queue_id,
15508                                         db_offset);
15509                         status = -EINVAL;
15510                         goto out;
15511                 }
15512                 hrq->db_regaddr = bar_memmap_p + db_offset;
15513                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15514                                 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15515                                 "format:x%x\n", hrq->queue_id, pci_barset,
15516                                 db_offset, hrq->db_format);
15517         } else {
15518                 hrq->db_format = LPFC_DB_RING_FORMAT;
15519                 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15520         }
15521         hrq->type = LPFC_HRQ;
15522         hrq->assoc_qid = cq->queue_id;
15523         hrq->subtype = subtype;
15524         hrq->host_index = 0;
15525         hrq->hba_index = 0;
15526         hrq->entry_repost = LPFC_RQ_REPOST;
15527
15528         /* now create the data queue */
15529         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15530                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15531                          length, LPFC_SLI4_MBX_EMBED);
15532         bf_set(lpfc_mbox_hdr_version, &shdr->request,
15533                phba->sli4_hba.pc_sli4_params.rqv);
15534         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15535                 bf_set(lpfc_rq_context_rqe_count_1,
15536                        &rq_create->u.request.context, hrq->entry_count);
15537                 if (subtype == LPFC_NVMET)
15538                         rq_create->u.request.context.buffer_size =
15539                                 LPFC_NVMET_DATA_BUF_SIZE;
15540                 else
15541                         rq_create->u.request.context.buffer_size =
15542                                 LPFC_DATA_BUF_SIZE;
15543                 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15544                        LPFC_RQE_SIZE_8);
15545                 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15546                        (PAGE_SIZE/SLI4_PAGE_SIZE));
15547         } else {
15548                 switch (drq->entry_count) {
15549                 default:
15550                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15551                                         "2536 Unsupported RQ count. (%d)\n",
15552                                         drq->entry_count);
15553                         if (drq->entry_count < 512) {
15554                                 status = -EINVAL;
15555                                 goto out;
15556                         }
15557                         /* otherwise default to smallest count (drop through) */
15558                 case 512:
15559                         bf_set(lpfc_rq_context_rqe_count,
15560                                &rq_create->u.request.context,
15561                                LPFC_RQ_RING_SIZE_512);
15562                         break;
15563                 case 1024:
15564                         bf_set(lpfc_rq_context_rqe_count,
15565                                &rq_create->u.request.context,
15566                                LPFC_RQ_RING_SIZE_1024);
15567                         break;
15568                 case 2048:
15569                         bf_set(lpfc_rq_context_rqe_count,
15570                                &rq_create->u.request.context,
15571                                LPFC_RQ_RING_SIZE_2048);
15572                         break;
15573                 case 4096:
15574                         bf_set(lpfc_rq_context_rqe_count,
15575                                &rq_create->u.request.context,
15576                                LPFC_RQ_RING_SIZE_4096);
15577                         break;
15578                 }
15579                 if (subtype == LPFC_NVMET)
15580                         bf_set(lpfc_rq_context_buf_size,
15581                                &rq_create->u.request.context,
15582                                LPFC_NVMET_DATA_BUF_SIZE);
15583                 else
15584                         bf_set(lpfc_rq_context_buf_size,
15585                                &rq_create->u.request.context,
15586                                LPFC_DATA_BUF_SIZE);
15587         }
15588         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15589                cq->queue_id);
15590         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15591                drq->page_count);
15592         list_for_each_entry(dmabuf, &drq->page_list, list) {
15593                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15594                                         putPaddrLow(dmabuf->phys);
15595                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15596                                         putPaddrHigh(dmabuf->phys);
15597         }
15598         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15599                 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15600         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15601         /* The IOCTL status is embedded in the mailbox subheader. */
15602         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15603         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15604         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15605         if (shdr_status || shdr_add_status || rc) {
15606                 status = -ENXIO;
15607                 goto out;
15608         }
15609         drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15610         if (drq->queue_id == 0xFFFF) {
15611                 status = -ENXIO;
15612                 goto out;
15613         }
15614         drq->type = LPFC_DRQ;
15615         drq->assoc_qid = cq->queue_id;
15616         drq->subtype = subtype;
15617         drq->host_index = 0;
15618         drq->hba_index = 0;
15619         drq->entry_repost = LPFC_RQ_REPOST;
15620
15621         /* link the header and data RQs onto the parent cq child list */
15622         list_add_tail(&hrq->list, &cq->child_list);
15623         list_add_tail(&drq->list, &cq->child_list);
15624
15625 out:
15626         mempool_free(mbox, phba->mbox_mem_pool);
15627         return status;
15628 }
15629
15630 /**
15631  * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
15632  * @phba: HBA structure that indicates port to create a queue on.
15633  * @hrqp: The queue structure array to use to create the header receive queues.
15634  * @drqp: The queue structure array to use to create the data receive queues.
15635  * @cqp: The completion queue array to bind these receive queues to.
15636  *
15637  * This function creates a receive buffer queue pair , as detailed in @hrq and
15638  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15639  * to the HBA.
15640  *
15641  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15642  * struct is used to get the entry count that is necessary to determine the
15643  * number of pages to use for this queue. The @cq is used to indicate which
15644  * completion queue to bind received buffers that are posted to these queues to.
15645  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15646  * receive queue pair. This function is asynchronous and will wait for the
15647  * mailbox command to finish before continuing.
15648  *
15649  * On success this function will return a zero. If unable to allocate enough
15650  * memory this function will return -ENOMEM. If the queue create mailbox command
15651  * fails this function will return -ENXIO.
15652  **/
15653 int
15654 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15655                 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
15656                 uint32_t subtype)
15657 {
15658         struct lpfc_queue *hrq, *drq, *cq;
15659         struct lpfc_mbx_rq_create_v2 *rq_create;
15660         struct lpfc_dmabuf *dmabuf;
15661         LPFC_MBOXQ_t *mbox;
15662         int rc, length, alloclen, status = 0;
15663         int cnt, idx, numrq, page_idx = 0;
15664         uint32_t shdr_status, shdr_add_status;
15665         union lpfc_sli4_cfg_shdr *shdr;
15666         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15667
15668         numrq = phba->cfg_nvmet_mrq;
15669         /* sanity check on array memory */
15670         if (!hrqp || !drqp || !cqp || !numrq)
15671                 return -ENODEV;
15672         if (!phba->sli4_hba.pc_sli4_params.supported)
15673                 hw_page_size = SLI4_PAGE_SIZE;
15674
15675         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15676         if (!mbox)
15677                 return -ENOMEM;
15678
15679         length = sizeof(struct lpfc_mbx_rq_create_v2);
15680         length += ((2 * numrq * hrqp[0]->page_count) *
15681                    sizeof(struct dma_address));
15682
15683         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15684                                     LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
15685                                     LPFC_SLI4_MBX_NEMBED);
15686         if (alloclen < length) {
15687                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15688                                 "3099 Allocated DMA memory size (%d) is "
15689                                 "less than the requested DMA memory size "
15690                                 "(%d)\n", alloclen, length);
15691                 status = -ENOMEM;
15692                 goto out;
15693         }
15694
15695
15696
15697         rq_create = mbox->sge_array->addr[0];
15698         shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
15699
15700         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
15701         cnt = 0;
15702
15703         for (idx = 0; idx < numrq; idx++) {
15704                 hrq = hrqp[idx];
15705                 drq = drqp[idx];
15706                 cq  = cqp[idx];
15707
15708                 /* sanity check on queue memory */
15709                 if (!hrq || !drq || !cq) {
15710                         status = -ENODEV;
15711                         goto out;
15712                 }
15713
15714                 if (hrq->entry_count != drq->entry_count) {
15715                         status = -EINVAL;
15716                         goto out;
15717                 }
15718
15719                 if (idx == 0) {
15720                         bf_set(lpfc_mbx_rq_create_num_pages,
15721                                &rq_create->u.request,
15722                                hrq->page_count);
15723                         bf_set(lpfc_mbx_rq_create_rq_cnt,
15724                                &rq_create->u.request, (numrq * 2));
15725                         bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
15726                                1);
15727                         bf_set(lpfc_rq_context_base_cq,
15728                                &rq_create->u.request.context,
15729                                cq->queue_id);
15730                         bf_set(lpfc_rq_context_data_size,
15731                                &rq_create->u.request.context,
15732                                LPFC_NVMET_DATA_BUF_SIZE);
15733                         bf_set(lpfc_rq_context_hdr_size,
15734                                &rq_create->u.request.context,
15735                                LPFC_HDR_BUF_SIZE);
15736                         bf_set(lpfc_rq_context_rqe_count_1,
15737                                &rq_create->u.request.context,
15738                                hrq->entry_count);
15739                         bf_set(lpfc_rq_context_rqe_size,
15740                                &rq_create->u.request.context,
15741                                LPFC_RQE_SIZE_8);
15742                         bf_set(lpfc_rq_context_page_size,
15743                                &rq_create->u.request.context,
15744                                (PAGE_SIZE/SLI4_PAGE_SIZE));
15745                 }
15746                 rc = 0;
15747                 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15748                         memset(dmabuf->virt, 0, hw_page_size);
15749                         cnt = page_idx + dmabuf->buffer_tag;
15750                         rq_create->u.request.page[cnt].addr_lo =
15751                                         putPaddrLow(dmabuf->phys);
15752                         rq_create->u.request.page[cnt].addr_hi =
15753                                         putPaddrHigh(dmabuf->phys);
15754                         rc++;
15755                 }
15756                 page_idx += rc;
15757
15758                 rc = 0;
15759                 list_for_each_entry(dmabuf, &drq->page_list, list) {
15760                         memset(dmabuf->virt, 0, hw_page_size);
15761                         cnt = page_idx + dmabuf->buffer_tag;
15762                         rq_create->u.request.page[cnt].addr_lo =
15763                                         putPaddrLow(dmabuf->phys);
15764                         rq_create->u.request.page[cnt].addr_hi =
15765                                         putPaddrHigh(dmabuf->phys);
15766                         rc++;
15767                 }
15768                 page_idx += rc;
15769
15770                 hrq->db_format = LPFC_DB_RING_FORMAT;
15771                 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15772                 hrq->type = LPFC_HRQ;
15773                 hrq->assoc_qid = cq->queue_id;
15774                 hrq->subtype = subtype;
15775                 hrq->host_index = 0;
15776                 hrq->hba_index = 0;
15777                 hrq->entry_repost = LPFC_RQ_REPOST;
15778
15779                 drq->db_format = LPFC_DB_RING_FORMAT;
15780                 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15781                 drq->type = LPFC_DRQ;
15782                 drq->assoc_qid = cq->queue_id;
15783                 drq->subtype = subtype;
15784                 drq->host_index = 0;
15785                 drq->hba_index = 0;
15786                 drq->entry_repost = LPFC_RQ_REPOST;
15787
15788                 list_add_tail(&hrq->list, &cq->child_list);
15789                 list_add_tail(&drq->list, &cq->child_list);
15790         }
15791
15792         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15793         /* The IOCTL status is embedded in the mailbox subheader. */
15794         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15795         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15796         if (shdr_status || shdr_add_status || rc) {
15797                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15798                                 "3120 RQ_CREATE mailbox failed with "
15799                                 "status x%x add_status x%x, mbx status x%x\n",
15800                                 shdr_status, shdr_add_status, rc);
15801                 status = -ENXIO;
15802                 goto out;
15803         }
15804         rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15805         if (rc == 0xFFFF) {
15806                 status = -ENXIO;
15807                 goto out;
15808         }
15809
15810         /* Initialize all RQs with associated queue id */
15811         for (idx = 0; idx < numrq; idx++) {
15812                 hrq = hrqp[idx];
15813                 hrq->queue_id = rc + (2 * idx);
15814                 drq = drqp[idx];
15815                 drq->queue_id = rc + (2 * idx) + 1;
15816         }
15817
15818 out:
15819         lpfc_sli4_mbox_cmd_free(phba, mbox);
15820         return status;
15821 }
15822
15823 /**
15824  * lpfc_eq_destroy - Destroy an event Queue on the HBA
15825  * @eq: The queue structure associated with the queue to destroy.
15826  *
15827  * This function destroys a queue, as detailed in @eq by sending an mailbox
15828  * command, specific to the type of queue, to the HBA.
15829  *
15830  * The @eq struct is used to get the queue ID of the queue to destroy.
15831  *
15832  * On success this function will return a zero. If the queue destroy mailbox
15833  * command fails this function will return -ENXIO.
15834  **/
15835 int
15836 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
15837 {
15838         LPFC_MBOXQ_t *mbox;
15839         int rc, length, status = 0;
15840         uint32_t shdr_status, shdr_add_status;
15841         union lpfc_sli4_cfg_shdr *shdr;
15842
15843         /* sanity check on queue memory */
15844         if (!eq)
15845                 return -ENODEV;
15846         mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
15847         if (!mbox)
15848                 return -ENOMEM;
15849         length = (sizeof(struct lpfc_mbx_eq_destroy) -
15850                   sizeof(struct lpfc_sli4_cfg_mhdr));
15851         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15852                          LPFC_MBOX_OPCODE_EQ_DESTROY,
15853                          length, LPFC_SLI4_MBX_EMBED);
15854         bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
15855                eq->queue_id);
15856         mbox->vport = eq->phba->pport;
15857         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15858
15859         rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
15860         /* The IOCTL status is embedded in the mailbox subheader. */
15861         shdr = (union lpfc_sli4_cfg_shdr *)
15862                 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
15863         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15864         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15865         if (shdr_status || shdr_add_status || rc) {
15866                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15867                                 "2505 EQ_DESTROY mailbox failed with "
15868                                 "status x%x add_status x%x, mbx status x%x\n",
15869                                 shdr_status, shdr_add_status, rc);
15870                 status = -ENXIO;
15871         }
15872
15873         /* Remove eq from any list */
15874         list_del_init(&eq->list);
15875         mempool_free(mbox, eq->phba->mbox_mem_pool);
15876         return status;
15877 }
15878
15879 /**
15880  * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
15881  * @cq: The queue structure associated with the queue to destroy.
15882  *
15883  * This function destroys a queue, as detailed in @cq by sending an mailbox
15884  * command, specific to the type of queue, to the HBA.
15885  *
15886  * The @cq struct is used to get the queue ID of the queue to destroy.
15887  *
15888  * On success this function will return a zero. If the queue destroy mailbox
15889  * command fails this function will return -ENXIO.
15890  **/
15891 int
15892 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
15893 {
15894         LPFC_MBOXQ_t *mbox;
15895         int rc, length, status = 0;
15896         uint32_t shdr_status, shdr_add_status;
15897         union lpfc_sli4_cfg_shdr *shdr;
15898
15899         /* sanity check on queue memory */
15900         if (!cq)
15901                 return -ENODEV;
15902         mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
15903         if (!mbox)
15904                 return -ENOMEM;
15905         length = (sizeof(struct lpfc_mbx_cq_destroy) -
15906                   sizeof(struct lpfc_sli4_cfg_mhdr));
15907         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15908                          LPFC_MBOX_OPCODE_CQ_DESTROY,
15909                          length, LPFC_SLI4_MBX_EMBED);
15910         bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
15911                cq->queue_id);
15912         mbox->vport = cq->phba->pport;
15913         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15914         rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
15915         /* The IOCTL status is embedded in the mailbox subheader. */
15916         shdr = (union lpfc_sli4_cfg_shdr *)
15917                 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
15918         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15919         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15920         if (shdr_status || shdr_add_status || rc) {
15921                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15922                                 "2506 CQ_DESTROY mailbox failed with "
15923                                 "status x%x add_status x%x, mbx status x%x\n",
15924                                 shdr_status, shdr_add_status, rc);
15925                 status = -ENXIO;
15926         }
15927         /* Remove cq from any list */
15928         list_del_init(&cq->list);
15929         mempool_free(mbox, cq->phba->mbox_mem_pool);
15930         return status;
15931 }
15932
15933 /**
15934  * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
15935  * @qm: The queue structure associated with the queue to destroy.
15936  *
15937  * This function destroys a queue, as detailed in @mq by sending an mailbox
15938  * command, specific to the type of queue, to the HBA.
15939  *
15940  * The @mq struct is used to get the queue ID of the queue to destroy.
15941  *
15942  * On success this function will return a zero. If the queue destroy mailbox
15943  * command fails this function will return -ENXIO.
15944  **/
15945 int
15946 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
15947 {
15948         LPFC_MBOXQ_t *mbox;
15949         int rc, length, status = 0;
15950         uint32_t shdr_status, shdr_add_status;
15951         union lpfc_sli4_cfg_shdr *shdr;
15952
15953         /* sanity check on queue memory */
15954         if (!mq)
15955                 return -ENODEV;
15956         mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
15957         if (!mbox)
15958                 return -ENOMEM;
15959         length = (sizeof(struct lpfc_mbx_mq_destroy) -
15960                   sizeof(struct lpfc_sli4_cfg_mhdr));
15961         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15962                          LPFC_MBOX_OPCODE_MQ_DESTROY,
15963                          length, LPFC_SLI4_MBX_EMBED);
15964         bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
15965                mq->queue_id);
15966         mbox->vport = mq->phba->pport;
15967         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15968         rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
15969         /* The IOCTL status is embedded in the mailbox subheader. */
15970         shdr = (union lpfc_sli4_cfg_shdr *)
15971                 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
15972         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15973         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15974         if (shdr_status || shdr_add_status || rc) {
15975                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15976                                 "2507 MQ_DESTROY mailbox failed with "
15977                                 "status x%x add_status x%x, mbx status x%x\n",
15978                                 shdr_status, shdr_add_status, rc);
15979                 status = -ENXIO;
15980         }
15981         /* Remove mq from any list */
15982         list_del_init(&mq->list);
15983         mempool_free(mbox, mq->phba->mbox_mem_pool);
15984         return status;
15985 }
15986
15987 /**
15988  * lpfc_wq_destroy - Destroy a Work Queue on the HBA
15989  * @wq: The queue structure associated with the queue to destroy.
15990  *
15991  * This function destroys a queue, as detailed in @wq by sending an mailbox
15992  * command, specific to the type of queue, to the HBA.
15993  *
15994  * The @wq struct is used to get the queue ID of the queue to destroy.
15995  *
15996  * On success this function will return a zero. If the queue destroy mailbox
15997  * command fails this function will return -ENXIO.
15998  **/
15999 int
16000 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16001 {
16002         LPFC_MBOXQ_t *mbox;
16003         int rc, length, status = 0;
16004         uint32_t shdr_status, shdr_add_status;
16005         union lpfc_sli4_cfg_shdr *shdr;
16006
16007         /* sanity check on queue memory */
16008         if (!wq)
16009                 return -ENODEV;
16010         mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16011         if (!mbox)
16012                 return -ENOMEM;
16013         length = (sizeof(struct lpfc_mbx_wq_destroy) -
16014                   sizeof(struct lpfc_sli4_cfg_mhdr));
16015         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16016                          LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16017                          length, LPFC_SLI4_MBX_EMBED);
16018         bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16019                wq->queue_id);
16020         mbox->vport = wq->phba->pport;
16021         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16022         rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16023         shdr = (union lpfc_sli4_cfg_shdr *)
16024                 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16025         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16026         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16027         if (shdr_status || shdr_add_status || rc) {
16028                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16029                                 "2508 WQ_DESTROY mailbox failed with "
16030                                 "status x%x add_status x%x, mbx status x%x\n",
16031                                 shdr_status, shdr_add_status, rc);
16032                 status = -ENXIO;
16033         }
16034         /* Remove wq from any list */
16035         list_del_init(&wq->list);
16036         kfree(wq->pring);
16037         wq->pring = NULL;
16038         mempool_free(mbox, wq->phba->mbox_mem_pool);
16039         return status;
16040 }
16041
16042 /**
16043  * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16044  * @rq: The queue structure associated with the queue to destroy.
16045  *
16046  * This function destroys a queue, as detailed in @rq by sending an mailbox
16047  * command, specific to the type of queue, to the HBA.
16048  *
16049  * The @rq struct is used to get the queue ID of the queue to destroy.
16050  *
16051  * On success this function will return a zero. If the queue destroy mailbox
16052  * command fails this function will return -ENXIO.
16053  **/
16054 int
16055 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16056                 struct lpfc_queue *drq)
16057 {
16058         LPFC_MBOXQ_t *mbox;
16059         int rc, length, status = 0;
16060         uint32_t shdr_status, shdr_add_status;
16061         union lpfc_sli4_cfg_shdr *shdr;
16062
16063         /* sanity check on queue memory */
16064         if (!hrq || !drq)
16065                 return -ENODEV;
16066         mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16067         if (!mbox)
16068                 return -ENOMEM;
16069         length = (sizeof(struct lpfc_mbx_rq_destroy) -
16070                   sizeof(struct lpfc_sli4_cfg_mhdr));
16071         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16072                          LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16073                          length, LPFC_SLI4_MBX_EMBED);
16074         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16075                hrq->queue_id);
16076         mbox->vport = hrq->phba->pport;
16077         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16078         rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16079         /* The IOCTL status is embedded in the mailbox subheader. */
16080         shdr = (union lpfc_sli4_cfg_shdr *)
16081                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16082         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16083         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16084         if (shdr_status || shdr_add_status || rc) {
16085                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16086                                 "2509 RQ_DESTROY mailbox failed with "
16087                                 "status x%x add_status x%x, mbx status x%x\n",
16088                                 shdr_status, shdr_add_status, rc);
16089                 if (rc != MBX_TIMEOUT)
16090                         mempool_free(mbox, hrq->phba->mbox_mem_pool);
16091                 return -ENXIO;
16092         }
16093         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16094                drq->queue_id);
16095         rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16096         shdr = (union lpfc_sli4_cfg_shdr *)
16097                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16098         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16099         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16100         if (shdr_status || shdr_add_status || rc) {
16101                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16102                                 "2510 RQ_DESTROY mailbox failed with "
16103                                 "status x%x add_status x%x, mbx status x%x\n",
16104                                 shdr_status, shdr_add_status, rc);
16105                 status = -ENXIO;
16106         }
16107         list_del_init(&hrq->list);
16108         list_del_init(&drq->list);
16109         mempool_free(mbox, hrq->phba->mbox_mem_pool);
16110         return status;
16111 }
16112
16113 /**
16114  * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16115  * @phba: The virtual port for which this call being executed.
16116  * @pdma_phys_addr0: Physical address of the 1st SGL page.
16117  * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16118  * @xritag: the xritag that ties this io to the SGL pages.
16119  *
16120  * This routine will post the sgl pages for the IO that has the xritag
16121  * that is in the iocbq structure. The xritag is assigned during iocbq
16122  * creation and persists for as long as the driver is loaded.
16123  * if the caller has fewer than 256 scatter gather segments to map then
16124  * pdma_phys_addr1 should be 0.
16125  * If the caller needs to map more than 256 scatter gather segment then
16126  * pdma_phys_addr1 should be a valid physical address.
16127  * physical address for SGLs must be 64 byte aligned.
16128  * If you are going to map 2 SGL's then the first one must have 256 entries
16129  * the second sgl can have between 1 and 256 entries.
16130  *
16131  * Return codes:
16132  *      0 - Success
16133  *      -ENXIO, -ENOMEM - Failure
16134  **/
16135 int
16136 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16137                 dma_addr_t pdma_phys_addr0,
16138                 dma_addr_t pdma_phys_addr1,
16139                 uint16_t xritag)
16140 {
16141         struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16142         LPFC_MBOXQ_t *mbox;
16143         int rc;
16144         uint32_t shdr_status, shdr_add_status;
16145         uint32_t mbox_tmo;
16146         union lpfc_sli4_cfg_shdr *shdr;
16147
16148         if (xritag == NO_XRI) {
16149                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16150                                 "0364 Invalid param:\n");
16151                 return -EINVAL;
16152         }
16153
16154         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16155         if (!mbox)
16156                 return -ENOMEM;
16157
16158         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16159                         LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16160                         sizeof(struct lpfc_mbx_post_sgl_pages) -
16161                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16162
16163         post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16164                                 &mbox->u.mqe.un.post_sgl_pages;
16165         bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16166         bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16167
16168         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16169                                 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16170         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16171                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16172
16173         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16174                                 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16175         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16176                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16177         if (!phba->sli4_hba.intr_enable)
16178                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16179         else {
16180                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16181                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16182         }
16183         /* The IOCTL status is embedded in the mailbox subheader. */
16184         shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16185         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16186         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16187         if (rc != MBX_TIMEOUT)
16188                 mempool_free(mbox, phba->mbox_mem_pool);
16189         if (shdr_status || shdr_add_status || rc) {
16190                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16191                                 "2511 POST_SGL mailbox failed with "
16192                                 "status x%x add_status x%x, mbx status x%x\n",
16193                                 shdr_status, shdr_add_status, rc);
16194         }
16195         return 0;
16196 }
16197
16198 /**
16199  * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
16200  * @phba: pointer to lpfc hba data structure.
16201  *
16202  * This routine is invoked to post rpi header templates to the
16203  * HBA consistent with the SLI-4 interface spec.  This routine
16204  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16205  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
16206  *
16207  * Returns
16208  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16209  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
16210  **/
16211 static uint16_t
16212 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16213 {
16214         unsigned long xri;
16215
16216         /*
16217          * Fetch the next logical xri.  Because this index is logical,
16218          * the driver starts at 0 each time.
16219          */
16220         spin_lock_irq(&phba->hbalock);
16221         xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16222                                  phba->sli4_hba.max_cfg_param.max_xri, 0);
16223         if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16224                 spin_unlock_irq(&phba->hbalock);
16225                 return NO_XRI;
16226         } else {
16227                 set_bit(xri, phba->sli4_hba.xri_bmask);
16228                 phba->sli4_hba.max_cfg_param.xri_used++;
16229         }
16230         spin_unlock_irq(&phba->hbalock);
16231         return xri;
16232 }
16233
16234 /**
16235  * lpfc_sli4_free_xri - Release an xri for reuse.
16236  * @phba: pointer to lpfc hba data structure.
16237  *
16238  * This routine is invoked to release an xri to the pool of
16239  * available rpis maintained by the driver.
16240  **/
16241 static void
16242 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16243 {
16244         if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
16245                 phba->sli4_hba.max_cfg_param.xri_used--;
16246         }
16247 }
16248
16249 /**
16250  * lpfc_sli4_free_xri - Release an xri for reuse.
16251  * @phba: pointer to lpfc hba data structure.
16252  *
16253  * This routine is invoked to release an xri to the pool of
16254  * available rpis maintained by the driver.
16255  **/
16256 void
16257 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16258 {
16259         spin_lock_irq(&phba->hbalock);
16260         __lpfc_sli4_free_xri(phba, xri);
16261         spin_unlock_irq(&phba->hbalock);
16262 }
16263
16264 /**
16265  * lpfc_sli4_next_xritag - Get an xritag for the io
16266  * @phba: Pointer to HBA context object.
16267  *
16268  * This function gets an xritag for the iocb. If there is no unused xritag
16269  * it will return 0xffff.
16270  * The function returns the allocated xritag if successful, else returns zero.
16271  * Zero is not a valid xritag.
16272  * The caller is not required to hold any lock.
16273  **/
16274 uint16_t
16275 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16276 {
16277         uint16_t xri_index;
16278
16279         xri_index = lpfc_sli4_alloc_xri(phba);
16280         if (xri_index == NO_XRI)
16281                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16282                                 "2004 Failed to allocate XRI.last XRITAG is %d"
16283                                 " Max XRI is %d, Used XRI is %d\n",
16284                                 xri_index,
16285                                 phba->sli4_hba.max_cfg_param.max_xri,
16286                                 phba->sli4_hba.max_cfg_param.xri_used);
16287         return xri_index;
16288 }
16289
16290 /**
16291  * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
16292  * @phba: pointer to lpfc hba data structure.
16293  * @post_sgl_list: pointer to els sgl entry list.
16294  * @count: number of els sgl entries on the list.
16295  *
16296  * This routine is invoked to post a block of driver's sgl pages to the
16297  * HBA using non-embedded mailbox command. No Lock is held. This routine
16298  * is only called when the driver is loading and after all IO has been
16299  * stopped.
16300  **/
16301 static int
16302 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
16303                             struct list_head *post_sgl_list,
16304                             int post_cnt)
16305 {
16306         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
16307         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16308         struct sgl_page_pairs *sgl_pg_pairs;
16309         void *viraddr;
16310         LPFC_MBOXQ_t *mbox;
16311         uint32_t reqlen, alloclen, pg_pairs;
16312         uint32_t mbox_tmo;
16313         uint16_t xritag_start = 0;
16314         int rc = 0;
16315         uint32_t shdr_status, shdr_add_status;
16316         union lpfc_sli4_cfg_shdr *shdr;
16317
16318         reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
16319                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16320         if (reqlen > SLI4_PAGE_SIZE) {
16321                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16322                                 "2559 Block sgl registration required DMA "
16323                                 "size (%d) great than a page\n", reqlen);
16324                 return -ENOMEM;
16325         }
16326
16327         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16328         if (!mbox)
16329                 return -ENOMEM;
16330
16331         /* Allocate DMA memory and set up the non-embedded mailbox command */
16332         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16333                          LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16334                          LPFC_SLI4_MBX_NEMBED);
16335
16336         if (alloclen < reqlen) {
16337                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16338                                 "0285 Allocated DMA memory size (%d) is "
16339                                 "less than the requested DMA memory "
16340                                 "size (%d)\n", alloclen, reqlen);
16341                 lpfc_sli4_mbox_cmd_free(phba, mbox);
16342                 return -ENOMEM;
16343         }
16344         /* Set up the SGL pages in the non-embedded DMA pages */
16345         viraddr = mbox->sge_array->addr[0];
16346         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16347         sgl_pg_pairs = &sgl->sgl_pg_pairs;
16348
16349         pg_pairs = 0;
16350         list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
16351                 /* Set up the sge entry */
16352                 sgl_pg_pairs->sgl_pg0_addr_lo =
16353                                 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16354                 sgl_pg_pairs->sgl_pg0_addr_hi =
16355                                 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16356                 sgl_pg_pairs->sgl_pg1_addr_lo =
16357                                 cpu_to_le32(putPaddrLow(0));
16358                 sgl_pg_pairs->sgl_pg1_addr_hi =
16359                                 cpu_to_le32(putPaddrHigh(0));
16360
16361                 /* Keep the first xritag on the list */
16362                 if (pg_pairs == 0)
16363                         xritag_start = sglq_entry->sli4_xritag;
16364                 sgl_pg_pairs++;
16365                 pg_pairs++;
16366         }
16367
16368         /* Complete initialization and perform endian conversion. */
16369         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16370         bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
16371         sgl->word0 = cpu_to_le32(sgl->word0);
16372
16373         if (!phba->sli4_hba.intr_enable)
16374                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16375         else {
16376                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16377                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16378         }
16379         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16380         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16381         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16382         if (rc != MBX_TIMEOUT)
16383                 lpfc_sli4_mbox_cmd_free(phba, mbox);
16384         if (shdr_status || shdr_add_status || rc) {
16385                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16386                                 "2513 POST_SGL_BLOCK mailbox command failed "
16387                                 "status x%x add_status x%x mbx status x%x\n",
16388                                 shdr_status, shdr_add_status, rc);
16389                 rc = -ENXIO;
16390         }
16391         return rc;
16392 }
16393
16394 /**
16395  * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
16396  * @phba: pointer to lpfc hba data structure.
16397  * @sblist: pointer to scsi buffer list.
16398  * @count: number of scsi buffers on the list.
16399  *
16400  * This routine is invoked to post a block of @count scsi sgl pages from a
16401  * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
16402  * No Lock is held.
16403  *
16404  **/
16405 int
16406 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
16407                               struct list_head *sblist,
16408                               int count)
16409 {
16410         struct lpfc_scsi_buf *psb;
16411         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16412         struct sgl_page_pairs *sgl_pg_pairs;
16413         void *viraddr;
16414         LPFC_MBOXQ_t *mbox;
16415         uint32_t reqlen, alloclen, pg_pairs;
16416         uint32_t mbox_tmo;
16417         uint16_t xritag_start = 0;
16418         int rc = 0;
16419         uint32_t shdr_status, shdr_add_status;
16420         dma_addr_t pdma_phys_bpl1;
16421         union lpfc_sli4_cfg_shdr *shdr;
16422
16423         /* Calculate the requested length of the dma memory */
16424         reqlen = count * sizeof(struct sgl_page_pairs) +
16425                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16426         if (reqlen > SLI4_PAGE_SIZE) {
16427                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16428                                 "0217 Block sgl registration required DMA "
16429                                 "size (%d) great than a page\n", reqlen);
16430                 return -ENOMEM;
16431         }
16432         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16433         if (!mbox) {
16434                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16435                                 "0283 Failed to allocate mbox cmd memory\n");
16436                 return -ENOMEM;
16437         }
16438
16439         /* Allocate DMA memory and set up the non-embedded mailbox command */
16440         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16441                                 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16442                                 LPFC_SLI4_MBX_NEMBED);
16443
16444         if (alloclen < reqlen) {
16445                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16446                                 "2561 Allocated DMA memory size (%d) is "
16447                                 "less than the requested DMA memory "
16448                                 "size (%d)\n", alloclen, reqlen);
16449                 lpfc_sli4_mbox_cmd_free(phba, mbox);
16450                 return -ENOMEM;
16451         }
16452
16453         /* Get the first SGE entry from the non-embedded DMA memory */
16454         viraddr = mbox->sge_array->addr[0];
16455
16456         /* Set up the SGL pages in the non-embedded DMA pages */
16457         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16458         sgl_pg_pairs = &sgl->sgl_pg_pairs;
16459
16460         pg_pairs = 0;
16461         list_for_each_entry(psb, sblist, list) {
16462                 /* Set up the sge entry */
16463                 sgl_pg_pairs->sgl_pg0_addr_lo =
16464                         cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
16465                 sgl_pg_pairs->sgl_pg0_addr_hi =
16466                         cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
16467                 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16468                         pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
16469                 else
16470                         pdma_phys_bpl1 = 0;
16471                 sgl_pg_pairs->sgl_pg1_addr_lo =
16472                         cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16473                 sgl_pg_pairs->sgl_pg1_addr_hi =
16474                         cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16475                 /* Keep the first xritag on the list */
16476                 if (pg_pairs == 0)
16477                         xritag_start = psb->cur_iocbq.sli4_xritag;
16478                 sgl_pg_pairs++;
16479                 pg_pairs++;
16480         }
16481         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16482         bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16483         /* Perform endian conversion if necessary */
16484         sgl->word0 = cpu_to_le32(sgl->word0);
16485
16486         if (!phba->sli4_hba.intr_enable)
16487                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16488         else {
16489                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16490                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16491         }
16492         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16493         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16494         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16495         if (rc != MBX_TIMEOUT)
16496                 lpfc_sli4_mbox_cmd_free(phba, mbox);
16497         if (shdr_status || shdr_add_status || rc) {
16498                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16499                                 "2564 POST_SGL_BLOCK mailbox command failed "
16500                                 "status x%x add_status x%x mbx status x%x\n",
16501                                 shdr_status, shdr_add_status, rc);
16502                 rc = -ENXIO;
16503         }
16504         return rc;
16505 }
16506
16507 /**
16508  * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
16509  * @phba: pointer to lpfc_hba struct that the frame was received on
16510  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16511  *
16512  * This function checks the fields in the @fc_hdr to see if the FC frame is a
16513  * valid type of frame that the LPFC driver will handle. This function will
16514  * return a zero if the frame is a valid frame or a non zero value when the
16515  * frame does not pass the check.
16516  **/
16517 static int
16518 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16519 {
16520         /*  make rctl_names static to save stack space */
16521         struct fc_vft_header *fc_vft_hdr;
16522         uint32_t *header = (uint32_t *) fc_hdr;
16523
16524 #define FC_RCTL_MDS_DIAGS       0xF4
16525
16526         switch (fc_hdr->fh_r_ctl) {
16527         case FC_RCTL_DD_UNCAT:          /* uncategorized information */
16528         case FC_RCTL_DD_SOL_DATA:       /* solicited data */
16529         case FC_RCTL_DD_UNSOL_CTL:      /* unsolicited control */
16530         case FC_RCTL_DD_SOL_CTL:        /* solicited control or reply */
16531         case FC_RCTL_DD_UNSOL_DATA:     /* unsolicited data */
16532         case FC_RCTL_DD_DATA_DESC:      /* data descriptor */
16533         case FC_RCTL_DD_UNSOL_CMD:      /* unsolicited command */
16534         case FC_RCTL_DD_CMD_STATUS:     /* command status */
16535         case FC_RCTL_ELS_REQ:   /* extended link services request */
16536         case FC_RCTL_ELS_REP:   /* extended link services reply */
16537         case FC_RCTL_ELS4_REQ:  /* FC-4 ELS request */
16538         case FC_RCTL_ELS4_REP:  /* FC-4 ELS reply */
16539         case FC_RCTL_BA_NOP:    /* basic link service NOP */
16540         case FC_RCTL_BA_ABTS:   /* basic link service abort */
16541         case FC_RCTL_BA_RMC:    /* remove connection */
16542         case FC_RCTL_BA_ACC:    /* basic accept */
16543         case FC_RCTL_BA_RJT:    /* basic reject */
16544         case FC_RCTL_BA_PRMT:
16545         case FC_RCTL_ACK_1:     /* acknowledge_1 */
16546         case FC_RCTL_ACK_0:     /* acknowledge_0 */
16547         case FC_RCTL_P_RJT:     /* port reject */
16548         case FC_RCTL_F_RJT:     /* fabric reject */
16549         case FC_RCTL_P_BSY:     /* port busy */
16550         case FC_RCTL_F_BSY:     /* fabric busy to data frame */
16551         case FC_RCTL_F_BSYL:    /* fabric busy to link control frame */
16552         case FC_RCTL_LCR:       /* link credit reset */
16553         case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
16554         case FC_RCTL_END:       /* end */
16555                 break;
16556         case FC_RCTL_VFTH:      /* Virtual Fabric tagging Header */
16557                 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16558                 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
16559                 return lpfc_fc_frame_check(phba, fc_hdr);
16560         default:
16561                 goto drop;
16562         }
16563
16564 #define FC_TYPE_VENDOR_UNIQUE   0xFF
16565
16566         switch (fc_hdr->fh_type) {
16567         case FC_TYPE_BLS:
16568         case FC_TYPE_ELS:
16569         case FC_TYPE_FCP:
16570         case FC_TYPE_CT:
16571         case FC_TYPE_NVME:
16572         case FC_TYPE_VENDOR_UNIQUE:
16573                 break;
16574         case FC_TYPE_IP:
16575         case FC_TYPE_ILS:
16576         default:
16577                 goto drop;
16578         }
16579
16580         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
16581                         "2538 Received frame rctl:x%x, type:x%x, "
16582                         "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
16583                         fc_hdr->fh_r_ctl, fc_hdr->fh_type,
16584                         be32_to_cpu(header[0]), be32_to_cpu(header[1]),
16585                         be32_to_cpu(header[2]), be32_to_cpu(header[3]),
16586                         be32_to_cpu(header[4]), be32_to_cpu(header[5]),
16587                         be32_to_cpu(header[6]));
16588         return 0;
16589 drop:
16590         lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
16591                         "2539 Dropped frame rctl:x%x type:x%x\n",
16592                         fc_hdr->fh_r_ctl, fc_hdr->fh_type);
16593         return 1;
16594 }
16595
16596 /**
16597  * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
16598  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16599  *
16600  * This function processes the FC header to retrieve the VFI from the VF
16601  * header, if one exists. This function will return the VFI if one exists
16602  * or 0 if no VSAN Header exists.
16603  **/
16604 static uint32_t
16605 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
16606 {
16607         struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16608
16609         if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
16610                 return 0;
16611         return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
16612 }
16613
16614 /**
16615  * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
16616  * @phba: Pointer to the HBA structure to search for the vport on
16617  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16618  * @fcfi: The FC Fabric ID that the frame came from
16619  *
16620  * This function searches the @phba for a vport that matches the content of the
16621  * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
16622  * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
16623  * returns the matching vport pointer or NULL if unable to match frame to a
16624  * vport.
16625  **/
16626 static struct lpfc_vport *
16627 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
16628                        uint16_t fcfi, uint32_t did)
16629 {
16630         struct lpfc_vport **vports;
16631         struct lpfc_vport *vport = NULL;
16632         int i;
16633
16634         if (did == Fabric_DID)
16635                 return phba->pport;
16636         if ((phba->pport->fc_flag & FC_PT2PT) &&
16637                 !(phba->link_state == LPFC_HBA_READY))
16638                 return phba->pport;
16639
16640         vports = lpfc_create_vport_work_array(phba);
16641         if (vports != NULL) {
16642                 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
16643                         if (phba->fcf.fcfi == fcfi &&
16644                             vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
16645                             vports[i]->fc_myDID == did) {
16646                                 vport = vports[i];
16647                                 break;
16648                         }
16649                 }
16650         }
16651         lpfc_destroy_vport_work_array(phba, vports);
16652         return vport;
16653 }
16654
16655 /**
16656  * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
16657  * @vport: The vport to work on.
16658  *
16659  * This function updates the receive sequence time stamp for this vport. The
16660  * receive sequence time stamp indicates the time that the last frame of the
16661  * the sequence that has been idle for the longest amount of time was received.
16662  * the driver uses this time stamp to indicate if any received sequences have
16663  * timed out.
16664  **/
16665 static void
16666 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
16667 {
16668         struct lpfc_dmabuf *h_buf;
16669         struct hbq_dmabuf *dmabuf = NULL;
16670
16671         /* get the oldest sequence on the rcv list */
16672         h_buf = list_get_first(&vport->rcv_buffer_list,
16673                                struct lpfc_dmabuf, list);
16674         if (!h_buf)
16675                 return;
16676         dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16677         vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
16678 }
16679
16680 /**
16681  * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
16682  * @vport: The vport that the received sequences were sent to.
16683  *
16684  * This function cleans up all outstanding received sequences. This is called
16685  * by the driver when a link event or user action invalidates all the received
16686  * sequences.
16687  **/
16688 void
16689 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
16690 {
16691         struct lpfc_dmabuf *h_buf, *hnext;
16692         struct lpfc_dmabuf *d_buf, *dnext;
16693         struct hbq_dmabuf *dmabuf = NULL;
16694
16695         /* start with the oldest sequence on the rcv list */
16696         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
16697                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16698                 list_del_init(&dmabuf->hbuf.list);
16699                 list_for_each_entry_safe(d_buf, dnext,
16700                                          &dmabuf->dbuf.list, list) {
16701                         list_del_init(&d_buf->list);
16702                         lpfc_in_buf_free(vport->phba, d_buf);
16703                 }
16704                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
16705         }
16706 }
16707
16708 /**
16709  * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
16710  * @vport: The vport that the received sequences were sent to.
16711  *
16712  * This function determines whether any received sequences have timed out by
16713  * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
16714  * indicates that there is at least one timed out sequence this routine will
16715  * go through the received sequences one at a time from most inactive to most
16716  * active to determine which ones need to be cleaned up. Once it has determined
16717  * that a sequence needs to be cleaned up it will simply free up the resources
16718  * without sending an abort.
16719  **/
16720 void
16721 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
16722 {
16723         struct lpfc_dmabuf *h_buf, *hnext;
16724         struct lpfc_dmabuf *d_buf, *dnext;
16725         struct hbq_dmabuf *dmabuf = NULL;
16726         unsigned long timeout;
16727         int abort_count = 0;
16728
16729         timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
16730                    vport->rcv_buffer_time_stamp);
16731         if (list_empty(&vport->rcv_buffer_list) ||
16732             time_before(jiffies, timeout))
16733                 return;
16734         /* start with the oldest sequence on the rcv list */
16735         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
16736                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16737                 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
16738                            dmabuf->time_stamp);
16739                 if (time_before(jiffies, timeout))
16740                         break;
16741                 abort_count++;
16742                 list_del_init(&dmabuf->hbuf.list);
16743                 list_for_each_entry_safe(d_buf, dnext,
16744                                          &dmabuf->dbuf.list, list) {
16745                         list_del_init(&d_buf->list);
16746                         lpfc_in_buf_free(vport->phba, d_buf);
16747                 }
16748                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
16749         }
16750         if (abort_count)
16751                 lpfc_update_rcv_time_stamp(vport);
16752 }
16753
16754 /**
16755  * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
16756  * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
16757  *
16758  * This function searches through the existing incomplete sequences that have
16759  * been sent to this @vport. If the frame matches one of the incomplete
16760  * sequences then the dbuf in the @dmabuf is added to the list of frames that
16761  * make up that sequence. If no sequence is found that matches this frame then
16762  * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
16763  * This function returns a pointer to the first dmabuf in the sequence list that
16764  * the frame was linked to.
16765  **/
16766 static struct hbq_dmabuf *
16767 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
16768 {
16769         struct fc_frame_header *new_hdr;
16770         struct fc_frame_header *temp_hdr;
16771         struct lpfc_dmabuf *d_buf;
16772         struct lpfc_dmabuf *h_buf;
16773         struct hbq_dmabuf *seq_dmabuf = NULL;
16774         struct hbq_dmabuf *temp_dmabuf = NULL;
16775         uint8_t found = 0;
16776
16777         INIT_LIST_HEAD(&dmabuf->dbuf.list);
16778         dmabuf->time_stamp = jiffies;
16779         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16780
16781         /* Use the hdr_buf to find the sequence that this frame belongs to */
16782         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
16783                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
16784                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
16785                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
16786                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
16787                         continue;
16788                 /* found a pending sequence that matches this frame */
16789                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16790                 break;
16791         }
16792         if (!seq_dmabuf) {
16793                 /*
16794                  * This indicates first frame received for this sequence.
16795                  * Queue the buffer on the vport's rcv_buffer_list.
16796                  */
16797                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
16798                 lpfc_update_rcv_time_stamp(vport);
16799                 return dmabuf;
16800         }
16801         temp_hdr = seq_dmabuf->hbuf.virt;
16802         if (be16_to_cpu(new_hdr->fh_seq_cnt) <
16803                 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
16804                 list_del_init(&seq_dmabuf->hbuf.list);
16805                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
16806                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
16807                 lpfc_update_rcv_time_stamp(vport);
16808                 return dmabuf;
16809         }
16810         /* move this sequence to the tail to indicate a young sequence */
16811         list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
16812         seq_dmabuf->time_stamp = jiffies;
16813         lpfc_update_rcv_time_stamp(vport);
16814         if (list_empty(&seq_dmabuf->dbuf.list)) {
16815                 temp_hdr = dmabuf->hbuf.virt;
16816                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
16817                 return seq_dmabuf;
16818         }
16819         /* find the correct place in the sequence to insert this frame */
16820         d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
16821         while (!found) {
16822                 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16823                 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
16824                 /*
16825                  * If the frame's sequence count is greater than the frame on
16826                  * the list then insert the frame right after this frame
16827                  */
16828                 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
16829                         be16_to_cpu(temp_hdr->fh_seq_cnt)) {
16830                         list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
16831                         found = 1;
16832                         break;
16833                 }
16834
16835                 if (&d_buf->list == &seq_dmabuf->dbuf.list)
16836                         break;
16837                 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
16838         }
16839
16840         if (found)
16841                 return seq_dmabuf;
16842         return NULL;
16843 }
16844
16845 /**
16846  * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
16847  * @vport: pointer to a vitural port
16848  * @dmabuf: pointer to a dmabuf that describes the FC sequence
16849  *
16850  * This function tries to abort from the partially assembed sequence, described
16851  * by the information from basic abbort @dmabuf. It checks to see whether such
16852  * partially assembled sequence held by the driver. If so, it shall free up all
16853  * the frames from the partially assembled sequence.
16854  *
16855  * Return
16856  * true  -- if there is matching partially assembled sequence present and all
16857  *          the frames freed with the sequence;
16858  * false -- if there is no matching partially assembled sequence present so
16859  *          nothing got aborted in the lower layer driver
16860  **/
16861 static bool
16862 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
16863                             struct hbq_dmabuf *dmabuf)
16864 {
16865         struct fc_frame_header *new_hdr;
16866         struct fc_frame_header *temp_hdr;
16867         struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
16868         struct hbq_dmabuf *seq_dmabuf = NULL;
16869
16870         /* Use the hdr_buf to find the sequence that matches this frame */
16871         INIT_LIST_HEAD(&dmabuf->dbuf.list);
16872         INIT_LIST_HEAD(&dmabuf->hbuf.list);
16873         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16874         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
16875                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
16876                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
16877                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
16878                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
16879                         continue;
16880                 /* found a pending sequence that matches this frame */
16881                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16882                 break;
16883         }
16884
16885         /* Free up all the frames from the partially assembled sequence */
16886         if (seq_dmabuf) {
16887                 list_for_each_entry_safe(d_buf, n_buf,
16888                                          &seq_dmabuf->dbuf.list, list) {
16889                         list_del_init(&d_buf->list);
16890                         lpfc_in_buf_free(vport->phba, d_buf);
16891                 }
16892                 return true;
16893         }
16894         return false;
16895 }
16896
16897 /**
16898  * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
16899  * @vport: pointer to a vitural port
16900  * @dmabuf: pointer to a dmabuf that describes the FC sequence
16901  *
16902  * This function tries to abort from the assembed sequence from upper level
16903  * protocol, described by the information from basic abbort @dmabuf. It
16904  * checks to see whether such pending context exists at upper level protocol.
16905  * If so, it shall clean up the pending context.
16906  *
16907  * Return
16908  * true  -- if there is matching pending context of the sequence cleaned
16909  *          at ulp;
16910  * false -- if there is no matching pending context of the sequence present
16911  *          at ulp.
16912  **/
16913 static bool
16914 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
16915 {
16916         struct lpfc_hba *phba = vport->phba;
16917         int handled;
16918
16919         /* Accepting abort at ulp with SLI4 only */
16920         if (phba->sli_rev < LPFC_SLI_REV4)
16921                 return false;
16922
16923         /* Register all caring upper level protocols to attend abort */
16924         handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
16925         if (handled)
16926                 return true;
16927
16928         return false;
16929 }
16930
16931 /**
16932  * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
16933  * @phba: Pointer to HBA context object.
16934  * @cmd_iocbq: pointer to the command iocbq structure.
16935  * @rsp_iocbq: pointer to the response iocbq structure.
16936  *
16937  * This function handles the sequence abort response iocb command complete
16938  * event. It properly releases the memory allocated to the sequence abort
16939  * accept iocb.
16940  **/
16941 static void
16942 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
16943                              struct lpfc_iocbq *cmd_iocbq,
16944                              struct lpfc_iocbq *rsp_iocbq)
16945 {
16946         struct lpfc_nodelist *ndlp;
16947
16948         if (cmd_iocbq) {
16949                 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
16950                 lpfc_nlp_put(ndlp);
16951                 lpfc_nlp_not_used(ndlp);
16952                 lpfc_sli_release_iocbq(phba, cmd_iocbq);
16953         }
16954
16955         /* Failure means BLS ABORT RSP did not get delivered to remote node*/
16956         if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
16957                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16958                         "3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
16959                         rsp_iocbq->iocb.ulpStatus,
16960                         rsp_iocbq->iocb.un.ulpWord[4]);
16961 }
16962
16963 /**
16964  * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
16965  * @phba: Pointer to HBA context object.
16966  * @xri: xri id in transaction.
16967  *
16968  * This function validates the xri maps to the known range of XRIs allocated an
16969  * used by the driver.
16970  **/
16971 uint16_t
16972 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
16973                       uint16_t xri)
16974 {
16975         uint16_t i;
16976
16977         for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
16978                 if (xri == phba->sli4_hba.xri_ids[i])
16979                         return i;
16980         }
16981         return NO_XRI;
16982 }
16983
16984 /**
16985  * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
16986  * @phba: Pointer to HBA context object.
16987  * @fc_hdr: pointer to a FC frame header.
16988  *
16989  * This function sends a basic response to a previous unsol sequence abort
16990  * event after aborting the sequence handling.
16991  **/
16992 void
16993 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
16994                         struct fc_frame_header *fc_hdr, bool aborted)
16995 {
16996         struct lpfc_hba *phba = vport->phba;
16997         struct lpfc_iocbq *ctiocb = NULL;
16998         struct lpfc_nodelist *ndlp;
16999         uint16_t oxid, rxid, xri, lxri;
17000         uint32_t sid, fctl;
17001         IOCB_t *icmd;
17002         int rc;
17003
17004         if (!lpfc_is_link_up(phba))
17005                 return;
17006
17007         sid = sli4_sid_from_fc_hdr(fc_hdr);
17008         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17009         rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17010
17011         ndlp = lpfc_findnode_did(vport, sid);
17012         if (!ndlp) {
17013                 ndlp = lpfc_nlp_init(vport, sid);
17014                 if (!ndlp) {
17015                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17016                                          "1268 Failed to allocate ndlp for "
17017                                          "oxid:x%x SID:x%x\n", oxid, sid);
17018                         return;
17019                 }
17020                 /* Put ndlp onto pport node list */
17021                 lpfc_enqueue_node(vport, ndlp);
17022         } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17023                 /* re-setup ndlp without removing from node list */
17024                 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17025                 if (!ndlp) {
17026                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17027                                          "3275 Failed to active ndlp found "
17028                                          "for oxid:x%x SID:x%x\n", oxid, sid);
17029                         return;
17030                 }
17031         }
17032
17033         /* Allocate buffer for rsp iocb */
17034         ctiocb = lpfc_sli_get_iocbq(phba);
17035         if (!ctiocb)
17036                 return;
17037
17038         /* Extract the F_CTL field from FC_HDR */
17039         fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17040
17041         icmd = &ctiocb->iocb;
17042         icmd->un.xseq64.bdl.bdeSize = 0;
17043         icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17044         icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17045         icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17046         icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17047
17048         /* Fill in the rest of iocb fields */
17049         icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17050         icmd->ulpBdeCount = 0;
17051         icmd->ulpLe = 1;
17052         icmd->ulpClass = CLASS3;
17053         icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17054         ctiocb->context1 = lpfc_nlp_get(ndlp);
17055
17056         ctiocb->iocb_cmpl = NULL;
17057         ctiocb->vport = phba->pport;
17058         ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17059         ctiocb->sli4_lxritag = NO_XRI;
17060         ctiocb->sli4_xritag = NO_XRI;
17061
17062         if (fctl & FC_FC_EX_CTX)
17063                 /* Exchange responder sent the abort so we
17064                  * own the oxid.
17065                  */
17066                 xri = oxid;
17067         else
17068                 xri = rxid;
17069         lxri = lpfc_sli4_xri_inrange(phba, xri);
17070         if (lxri != NO_XRI)
17071                 lpfc_set_rrq_active(phba, ndlp, lxri,
17072                         (xri == oxid) ? rxid : oxid, 0);
17073         /* For BA_ABTS from exchange responder, if the logical xri with
17074          * the oxid maps to the FCP XRI range, the port no longer has
17075          * that exchange context, send a BLS_RJT. Override the IOCB for
17076          * a BA_RJT.
17077          */
17078         if ((fctl & FC_FC_EX_CTX) &&
17079             (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17080                 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17081                 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17082                 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17083                 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17084         }
17085
17086         /* If BA_ABTS failed to abort a partially assembled receive sequence,
17087          * the driver no longer has that exchange, send a BLS_RJT. Override
17088          * the IOCB for a BA_RJT.
17089          */
17090         if (aborted == false) {
17091                 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17092                 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17093                 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17094                 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17095         }
17096
17097         if (fctl & FC_FC_EX_CTX) {
17098                 /* ABTS sent by responder to CT exchange, construction
17099                  * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17100                  * field and RX_ID from ABTS for RX_ID field.
17101                  */
17102                 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
17103         } else {
17104                 /* ABTS sent by initiator to CT exchange, construction
17105                  * of BA_ACC will need to allocate a new XRI as for the
17106                  * XRI_TAG field.
17107                  */
17108                 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
17109         }
17110         bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
17111         bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
17112
17113         /* Xmit CT abts response on exchange <xid> */
17114         lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17115                          "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17116                          icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
17117
17118         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17119         if (rc == IOCB_ERROR) {
17120                 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17121                                  "2925 Failed to issue CT ABTS RSP x%x on "
17122                                  "xri x%x, Data x%x\n",
17123                                  icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17124                                  phba->link_state);
17125                 lpfc_nlp_put(ndlp);
17126                 ctiocb->context1 = NULL;
17127                 lpfc_sli_release_iocbq(phba, ctiocb);
17128         }
17129 }
17130
17131 /**
17132  * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17133  * @vport: Pointer to the vport on which this sequence was received
17134  * @dmabuf: pointer to a dmabuf that describes the FC sequence
17135  *
17136  * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17137  * receive sequence is only partially assembed by the driver, it shall abort
17138  * the partially assembled frames for the sequence. Otherwise, if the
17139  * unsolicited receive sequence has been completely assembled and passed to
17140  * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
17141  * unsolicited sequence has been aborted. After that, it will issue a basic
17142  * accept to accept the abort.
17143  **/
17144 static void
17145 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17146                              struct hbq_dmabuf *dmabuf)
17147 {
17148         struct lpfc_hba *phba = vport->phba;
17149         struct fc_frame_header fc_hdr;
17150         uint32_t fctl;
17151         bool aborted;
17152
17153         /* Make a copy of fc_hdr before the dmabuf being released */
17154         memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
17155         fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
17156
17157         if (fctl & FC_FC_EX_CTX) {
17158                 /* ABTS by responder to exchange, no cleanup needed */
17159                 aborted = true;
17160         } else {
17161                 /* ABTS by initiator to exchange, need to do cleanup */
17162                 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17163                 if (aborted == false)
17164                         aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
17165         }
17166         lpfc_in_buf_free(phba, &dmabuf->dbuf);
17167
17168         if (phba->nvmet_support) {
17169                 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17170                 return;
17171         }
17172
17173         /* Respond with BA_ACC or BA_RJT accordingly */
17174         lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
17175 }
17176
17177 /**
17178  * lpfc_seq_complete - Indicates if a sequence is complete
17179  * @dmabuf: pointer to a dmabuf that describes the FC sequence
17180  *
17181  * This function checks the sequence, starting with the frame described by
17182  * @dmabuf, to see if all the frames associated with this sequence are present.
17183  * the frames associated with this sequence are linked to the @dmabuf using the
17184  * dbuf list. This function looks for two major things. 1) That the first frame
17185  * has a sequence count of zero. 2) There is a frame with last frame of sequence
17186  * set. 3) That there are no holes in the sequence count. The function will
17187  * return 1 when the sequence is complete, otherwise it will return 0.
17188  **/
17189 static int
17190 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17191 {
17192         struct fc_frame_header *hdr;
17193         struct lpfc_dmabuf *d_buf;
17194         struct hbq_dmabuf *seq_dmabuf;
17195         uint32_t fctl;
17196         int seq_count = 0;
17197
17198         hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17199         /* make sure first fame of sequence has a sequence count of zero */
17200         if (hdr->fh_seq_cnt != seq_count)
17201                 return 0;
17202         fctl = (hdr->fh_f_ctl[0] << 16 |
17203                 hdr->fh_f_ctl[1] << 8 |
17204                 hdr->fh_f_ctl[2]);
17205         /* If last frame of sequence we can return success. */
17206         if (fctl & FC_FC_END_SEQ)
17207                 return 1;
17208         list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17209                 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17210                 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17211                 /* If there is a hole in the sequence count then fail. */
17212                 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
17213                         return 0;
17214                 fctl = (hdr->fh_f_ctl[0] << 16 |
17215                         hdr->fh_f_ctl[1] << 8 |
17216                         hdr->fh_f_ctl[2]);
17217                 /* If last frame of sequence we can return success. */
17218                 if (fctl & FC_FC_END_SEQ)
17219                         return 1;
17220         }
17221         return 0;
17222 }
17223
17224 /**
17225  * lpfc_prep_seq - Prep sequence for ULP processing
17226  * @vport: Pointer to the vport on which this sequence was received
17227  * @dmabuf: pointer to a dmabuf that describes the FC sequence
17228  *
17229  * This function takes a sequence, described by a list of frames, and creates
17230  * a list of iocbq structures to describe the sequence. This iocbq list will be
17231  * used to issue to the generic unsolicited sequence handler. This routine
17232  * returns a pointer to the first iocbq in the list. If the function is unable
17233  * to allocate an iocbq then it throw out the received frames that were not
17234  * able to be described and return a pointer to the first iocbq. If unable to
17235  * allocate any iocbqs (including the first) this function will return NULL.
17236  **/
17237 static struct lpfc_iocbq *
17238 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17239 {
17240         struct hbq_dmabuf *hbq_buf;
17241         struct lpfc_dmabuf *d_buf, *n_buf;
17242         struct lpfc_iocbq *first_iocbq, *iocbq;
17243         struct fc_frame_header *fc_hdr;
17244         uint32_t sid;
17245         uint32_t len, tot_len;
17246         struct ulp_bde64 *pbde;
17247
17248         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17249         /* remove from receive buffer list */
17250         list_del_init(&seq_dmabuf->hbuf.list);
17251         lpfc_update_rcv_time_stamp(vport);
17252         /* get the Remote Port's SID */
17253         sid = sli4_sid_from_fc_hdr(fc_hdr);
17254         tot_len = 0;
17255         /* Get an iocbq struct to fill in. */
17256         first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17257         if (first_iocbq) {
17258                 /* Initialize the first IOCB. */
17259                 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
17260                 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
17261                 first_iocbq->vport = vport;
17262
17263                 /* Check FC Header to see what TYPE of frame we are rcv'ing */
17264                 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17265                         first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17266                         first_iocbq->iocb.un.rcvels.parmRo =
17267                                 sli4_did_from_fc_hdr(fc_hdr);
17268                         first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17269                 } else
17270                         first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
17271                 first_iocbq->iocb.ulpContext = NO_XRI;
17272                 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17273                         be16_to_cpu(fc_hdr->fh_ox_id);
17274                 /* iocbq is prepped for internal consumption.  Physical vpi. */
17275                 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17276                         vport->phba->vpi_ids[vport->vpi];
17277                 /* put the first buffer into the first IOCBq */
17278                 tot_len = bf_get(lpfc_rcqe_length,
17279                                        &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17280
17281                 first_iocbq->context2 = &seq_dmabuf->dbuf;
17282                 first_iocbq->context3 = NULL;
17283                 first_iocbq->iocb.ulpBdeCount = 1;
17284                 if (tot_len > LPFC_DATA_BUF_SIZE)
17285                         first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17286                                                         LPFC_DATA_BUF_SIZE;
17287                 else
17288                         first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17289
17290                 first_iocbq->iocb.un.rcvels.remoteID = sid;
17291
17292                 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17293         }
17294         iocbq = first_iocbq;
17295         /*
17296          * Each IOCBq can have two Buffers assigned, so go through the list
17297          * of buffers for this sequence and save two buffers in each IOCBq
17298          */
17299         list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17300                 if (!iocbq) {
17301                         lpfc_in_buf_free(vport->phba, d_buf);
17302                         continue;
17303                 }
17304                 if (!iocbq->context3) {
17305                         iocbq->context3 = d_buf;
17306                         iocbq->iocb.ulpBdeCount++;
17307                         /* We need to get the size out of the right CQE */
17308                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17309                         len = bf_get(lpfc_rcqe_length,
17310                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
17311                         pbde = (struct ulp_bde64 *)
17312                                         &iocbq->iocb.unsli3.sli3Words[4];
17313                         if (len > LPFC_DATA_BUF_SIZE)
17314                                 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17315                         else
17316                                 pbde->tus.f.bdeSize = len;
17317
17318                         iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17319                         tot_len += len;
17320                 } else {
17321                         iocbq = lpfc_sli_get_iocbq(vport->phba);
17322                         if (!iocbq) {
17323                                 if (first_iocbq) {
17324                                         first_iocbq->iocb.ulpStatus =
17325                                                         IOSTAT_FCP_RSP_ERROR;
17326                                         first_iocbq->iocb.un.ulpWord[4] =
17327                                                         IOERR_NO_RESOURCES;
17328                                 }
17329                                 lpfc_in_buf_free(vport->phba, d_buf);
17330                                 continue;
17331                         }
17332                         /* We need to get the size out of the right CQE */
17333                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17334                         len = bf_get(lpfc_rcqe_length,
17335                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
17336                         iocbq->context2 = d_buf;
17337                         iocbq->context3 = NULL;
17338                         iocbq->iocb.ulpBdeCount = 1;
17339                         if (len > LPFC_DATA_BUF_SIZE)
17340                                 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17341                                                         LPFC_DATA_BUF_SIZE;
17342                         else
17343                                 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
17344
17345                         tot_len += len;
17346                         iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17347
17348                         iocbq->iocb.un.rcvels.remoteID = sid;
17349                         list_add_tail(&iocbq->list, &first_iocbq->list);
17350                 }
17351         }
17352         return first_iocbq;
17353 }
17354
17355 static void
17356 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17357                           struct hbq_dmabuf *seq_dmabuf)
17358 {
17359         struct fc_frame_header *fc_hdr;
17360         struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17361         struct lpfc_hba *phba = vport->phba;
17362
17363         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17364         iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17365         if (!iocbq) {
17366                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17367                                 "2707 Ring %d handler: Failed to allocate "
17368                                 "iocb Rctl x%x Type x%x received\n",
17369                                 LPFC_ELS_RING,
17370                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17371                 return;
17372         }
17373         if (!lpfc_complete_unsol_iocb(phba,
17374                                       phba->sli4_hba.els_wq->pring,
17375                                       iocbq, fc_hdr->fh_r_ctl,
17376                                       fc_hdr->fh_type))
17377                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17378                                 "2540 Ring %d handler: unexpected Rctl "
17379                                 "x%x Type x%x received\n",
17380                                 LPFC_ELS_RING,
17381                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17382
17383         /* Free iocb created in lpfc_prep_seq */
17384         list_for_each_entry_safe(curr_iocb, next_iocb,
17385                 &iocbq->list, list) {
17386                 list_del_init(&curr_iocb->list);
17387                 lpfc_sli_release_iocbq(phba, curr_iocb);
17388         }
17389         lpfc_sli_release_iocbq(phba, iocbq);
17390 }
17391
17392 static void
17393 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17394                             struct lpfc_iocbq *rspiocb)
17395 {
17396         struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17397
17398         if (pcmd && pcmd->virt)
17399                 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17400         kfree(pcmd);
17401         lpfc_sli_release_iocbq(phba, cmdiocb);
17402 }
17403
17404 static void
17405 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17406                               struct hbq_dmabuf *dmabuf)
17407 {
17408         struct fc_frame_header *fc_hdr;
17409         struct lpfc_hba *phba = vport->phba;
17410         struct lpfc_iocbq *iocbq = NULL;
17411         union  lpfc_wqe *wqe;
17412         struct lpfc_dmabuf *pcmd = NULL;
17413         uint32_t frame_len;
17414         int rc;
17415
17416         fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17417         frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17418
17419         /* Send the received frame back */
17420         iocbq = lpfc_sli_get_iocbq(phba);
17421         if (!iocbq)
17422                 goto exit;
17423
17424         /* Allocate buffer for command payload */
17425         pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17426         if (pcmd)
17427                 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
17428                                             &pcmd->phys);
17429         if (!pcmd || !pcmd->virt)
17430                 goto exit;
17431
17432         INIT_LIST_HEAD(&pcmd->list);
17433
17434         /* copyin the payload */
17435         memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17436
17437         /* fill in BDE's for command */
17438         iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17439         iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17440         iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17441         iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17442
17443         iocbq->context2 = pcmd;
17444         iocbq->vport = vport;
17445         iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17446         iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17447
17448         /*
17449          * Setup rest of the iocb as though it were a WQE
17450          * Build the SEND_FRAME WQE
17451          */
17452         wqe = (union lpfc_wqe *)&iocbq->iocb;
17453
17454         wqe->send_frame.frame_len = frame_len;
17455         wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17456         wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17457         wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17458         wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17459         wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17460         wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17461
17462         iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17463         iocbq->iocb.ulpLe = 1;
17464         iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17465         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17466         if (rc == IOCB_ERROR)
17467                 goto exit;
17468
17469         lpfc_in_buf_free(phba, &dmabuf->dbuf);
17470         return;
17471
17472 exit:
17473         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17474                         "2023 Unable to process MDS loopback frame\n");
17475         if (pcmd && pcmd->virt)
17476                 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17477         kfree(pcmd);
17478         if (iocbq)
17479                 lpfc_sli_release_iocbq(phba, iocbq);
17480         lpfc_in_buf_free(phba, &dmabuf->dbuf);
17481 }
17482
17483 /**
17484  * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
17485  * @phba: Pointer to HBA context object.
17486  *
17487  * This function is called with no lock held. This function processes all
17488  * the received buffers and gives it to upper layers when a received buffer
17489  * indicates that it is the final frame in the sequence. The interrupt
17490  * service routine processes received buffers at interrupt contexts.
17491  * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
17492  * appropriate receive function when the final frame in a sequence is received.
17493  **/
17494 void
17495 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
17496                                  struct hbq_dmabuf *dmabuf)
17497 {
17498         struct hbq_dmabuf *seq_dmabuf;
17499         struct fc_frame_header *fc_hdr;
17500         struct lpfc_vport *vport;
17501         uint32_t fcfi;
17502         uint32_t did;
17503
17504         /* Process each received buffer */
17505         fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17506
17507         /* check to see if this a valid type of frame */
17508         if (lpfc_fc_frame_check(phba, fc_hdr)) {
17509                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17510                 return;
17511         }
17512
17513         if ((bf_get(lpfc_cqe_code,
17514                     &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
17515                 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
17516                               &dmabuf->cq_event.cqe.rcqe_cmpl);
17517         else
17518                 fcfi = bf_get(lpfc_rcqe_fcf_id,
17519                               &dmabuf->cq_event.cqe.rcqe_cmpl);
17520
17521         if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
17522                 vport = phba->pport;
17523                 /* Handle MDS Loopback frames */
17524                 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17525                 return;
17526         }
17527
17528         /* d_id this frame is directed to */
17529         did = sli4_did_from_fc_hdr(fc_hdr);
17530
17531         vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
17532         if (!vport) {
17533                 /* throw out the frame */
17534                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17535                 return;
17536         }
17537
17538         /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
17539         if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
17540                 (did != Fabric_DID)) {
17541                 /*
17542                  * Throw out the frame if we are not pt2pt.
17543                  * The pt2pt protocol allows for discovery frames
17544                  * to be received without a registered VPI.
17545                  */
17546                 if (!(vport->fc_flag & FC_PT2PT) ||
17547                         (phba->link_state == LPFC_HBA_READY)) {
17548                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
17549                         return;
17550                 }
17551         }
17552
17553         /* Handle the basic abort sequence (BA_ABTS) event */
17554         if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
17555                 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
17556                 return;
17557         }
17558
17559         /* Link this frame */
17560         seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
17561         if (!seq_dmabuf) {
17562                 /* unable to add frame to vport - throw it out */
17563                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17564                 return;
17565         }
17566         /* If not last frame in sequence continue processing frames. */
17567         if (!lpfc_seq_complete(seq_dmabuf))
17568                 return;
17569
17570         /* Send the complete sequence to the upper layer protocol */
17571         lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
17572 }
17573
17574 /**
17575  * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
17576  * @phba: pointer to lpfc hba data structure.
17577  *
17578  * This routine is invoked to post rpi header templates to the
17579  * HBA consistent with the SLI-4 interface spec.  This routine
17580  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17581  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17582  *
17583  * This routine does not require any locks.  It's usage is expected
17584  * to be driver load or reset recovery when the driver is
17585  * sequential.
17586  *
17587  * Return codes
17588  *      0 - successful
17589  *      -EIO - The mailbox failed to complete successfully.
17590  *      When this error occurs, the driver is not guaranteed
17591  *      to have any rpi regions posted to the device and
17592  *      must either attempt to repost the regions or take a
17593  *      fatal error.
17594  **/
17595 int
17596 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
17597 {
17598         struct lpfc_rpi_hdr *rpi_page;
17599         uint32_t rc = 0;
17600         uint16_t lrpi = 0;
17601
17602         /* SLI4 ports that support extents do not require RPI headers. */
17603         if (!phba->sli4_hba.rpi_hdrs_in_use)
17604                 goto exit;
17605         if (phba->sli4_hba.extents_in_use)
17606                 return -EIO;
17607
17608         list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
17609                 /*
17610                  * Assign the rpi headers a physical rpi only if the driver
17611                  * has not initialized those resources.  A port reset only
17612                  * needs the headers posted.
17613                  */
17614                 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
17615                     LPFC_RPI_RSRC_RDY)
17616                         rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
17617
17618                 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
17619                 if (rc != MBX_SUCCESS) {
17620                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17621                                         "2008 Error %d posting all rpi "
17622                                         "headers\n", rc);
17623                         rc = -EIO;
17624                         break;
17625                 }
17626         }
17627
17628  exit:
17629         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
17630                LPFC_RPI_RSRC_RDY);
17631         return rc;
17632 }
17633
17634 /**
17635  * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
17636  * @phba: pointer to lpfc hba data structure.
17637  * @rpi_page:  pointer to the rpi memory region.
17638  *
17639  * This routine is invoked to post a single rpi header to the
17640  * HBA consistent with the SLI-4 interface spec.  This memory region
17641  * maps up to 64 rpi context regions.
17642  *
17643  * Return codes
17644  *      0 - successful
17645  *      -ENOMEM - No available memory
17646  *      -EIO - The mailbox failed to complete successfully.
17647  **/
17648 int
17649 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
17650 {
17651         LPFC_MBOXQ_t *mboxq;
17652         struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
17653         uint32_t rc = 0;
17654         uint32_t shdr_status, shdr_add_status;
17655         union lpfc_sli4_cfg_shdr *shdr;
17656
17657         /* SLI4 ports that support extents do not require RPI headers. */
17658         if (!phba->sli4_hba.rpi_hdrs_in_use)
17659                 return rc;
17660         if (phba->sli4_hba.extents_in_use)
17661                 return -EIO;
17662
17663         /* The port is notified of the header region via a mailbox command. */
17664         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17665         if (!mboxq) {
17666                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17667                                 "2001 Unable to allocate memory for issuing "
17668                                 "SLI_CONFIG_SPECIAL mailbox command\n");
17669                 return -ENOMEM;
17670         }
17671
17672         /* Post all rpi memory regions to the port. */
17673         hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
17674         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
17675                          LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
17676                          sizeof(struct lpfc_mbx_post_hdr_tmpl) -
17677                          sizeof(struct lpfc_sli4_cfg_mhdr),
17678                          LPFC_SLI4_MBX_EMBED);
17679
17680
17681         /* Post the physical rpi to the port for this rpi header. */
17682         bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
17683                rpi_page->start_rpi);
17684         bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
17685                hdr_tmpl, rpi_page->page_count);
17686
17687         hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
17688         hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
17689         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
17690         shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
17691         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17692         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17693         if (rc != MBX_TIMEOUT)
17694                 mempool_free(mboxq, phba->mbox_mem_pool);
17695         if (shdr_status || shdr_add_status || rc) {
17696                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17697                                 "2514 POST_RPI_HDR mailbox failed with "
17698                                 "status x%x add_status x%x, mbx status x%x\n",
17699                                 shdr_status, shdr_add_status, rc);
17700                 rc = -ENXIO;
17701         } else {
17702                 /*
17703                  * The next_rpi stores the next logical module-64 rpi value used
17704                  * to post physical rpis in subsequent rpi postings.
17705                  */
17706                 spin_lock_irq(&phba->hbalock);
17707                 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
17708                 spin_unlock_irq(&phba->hbalock);
17709         }
17710         return rc;
17711 }
17712
17713 /**
17714  * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
17715  * @phba: pointer to lpfc hba data structure.
17716  *
17717  * This routine is invoked to post rpi header templates to the
17718  * HBA consistent with the SLI-4 interface spec.  This routine
17719  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17720  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17721  *
17722  * Returns
17723  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17724  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
17725  **/
17726 int
17727 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
17728 {
17729         unsigned long rpi;
17730         uint16_t max_rpi, rpi_limit;
17731         uint16_t rpi_remaining, lrpi = 0;
17732         struct lpfc_rpi_hdr *rpi_hdr;
17733         unsigned long iflag;
17734
17735         /*
17736          * Fetch the next logical rpi.  Because this index is logical,
17737          * the  driver starts at 0 each time.
17738          */
17739         spin_lock_irqsave(&phba->hbalock, iflag);
17740         max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
17741         rpi_limit = phba->sli4_hba.next_rpi;
17742
17743         rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
17744         if (rpi >= rpi_limit)
17745                 rpi = LPFC_RPI_ALLOC_ERROR;
17746         else {
17747                 set_bit(rpi, phba->sli4_hba.rpi_bmask);
17748                 phba->sli4_hba.max_cfg_param.rpi_used++;
17749                 phba->sli4_hba.rpi_count++;
17750         }
17751         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
17752                         "0001 rpi:%x max:%x lim:%x\n",
17753                         (int) rpi, max_rpi, rpi_limit);
17754
17755         /*
17756          * Don't try to allocate more rpi header regions if the device limit
17757          * has been exhausted.
17758          */
17759         if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
17760             (phba->sli4_hba.rpi_count >= max_rpi)) {
17761                 spin_unlock_irqrestore(&phba->hbalock, iflag);
17762                 return rpi;
17763         }
17764
17765         /*
17766          * RPI header postings are not required for SLI4 ports capable of
17767          * extents.
17768          */
17769         if (!phba->sli4_hba.rpi_hdrs_in_use) {
17770                 spin_unlock_irqrestore(&phba->hbalock, iflag);
17771                 return rpi;
17772         }
17773
17774         /*
17775          * If the driver is running low on rpi resources, allocate another
17776          * page now.  Note that the next_rpi value is used because
17777          * it represents how many are actually in use whereas max_rpi notes
17778          * how many are supported max by the device.
17779          */
17780         rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
17781         spin_unlock_irqrestore(&phba->hbalock, iflag);
17782         if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
17783                 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
17784                 if (!rpi_hdr) {
17785                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17786                                         "2002 Error Could not grow rpi "
17787                                         "count\n");
17788                 } else {
17789                         lrpi = rpi_hdr->start_rpi;
17790                         rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
17791                         lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
17792                 }
17793         }
17794
17795         return rpi;
17796 }
17797
17798 /**
17799  * lpfc_sli4_free_rpi - Release an rpi for reuse.
17800  * @phba: pointer to lpfc hba data structure.
17801  *
17802  * This routine is invoked to release an rpi to the pool of
17803  * available rpis maintained by the driver.
17804  **/
17805 static void
17806 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
17807 {
17808         if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
17809                 phba->sli4_hba.rpi_count--;
17810                 phba->sli4_hba.max_cfg_param.rpi_used--;
17811         }
17812 }
17813
17814 /**
17815  * lpfc_sli4_free_rpi - Release an rpi for reuse.
17816  * @phba: pointer to lpfc hba data structure.
17817  *
17818  * This routine is invoked to release an rpi to the pool of
17819  * available rpis maintained by the driver.
17820  **/
17821 void
17822 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
17823 {
17824         spin_lock_irq(&phba->hbalock);
17825         __lpfc_sli4_free_rpi(phba, rpi);
17826         spin_unlock_irq(&phba->hbalock);
17827 }
17828
17829 /**
17830  * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
17831  * @phba: pointer to lpfc hba data structure.
17832  *
17833  * This routine is invoked to remove the memory region that
17834  * provided rpi via a bitmask.
17835  **/
17836 void
17837 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
17838 {
17839         kfree(phba->sli4_hba.rpi_bmask);
17840         kfree(phba->sli4_hba.rpi_ids);
17841         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
17842 }
17843
17844 /**
17845  * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
17846  * @phba: pointer to lpfc hba data structure.
17847  *
17848  * This routine is invoked to remove the memory region that
17849  * provided rpi via a bitmask.
17850  **/
17851 int
17852 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
17853         void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
17854 {
17855         LPFC_MBOXQ_t *mboxq;
17856         struct lpfc_hba *phba = ndlp->phba;
17857         int rc;
17858
17859         /* The port is notified of the header region via a mailbox command. */
17860         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17861         if (!mboxq)
17862                 return -ENOMEM;
17863
17864         /* Post all rpi memory regions to the port. */
17865         lpfc_resume_rpi(mboxq, ndlp);
17866         if (cmpl) {
17867                 mboxq->mbox_cmpl = cmpl;
17868                 mboxq->context1 = arg;
17869                 mboxq->context2 = ndlp;
17870         } else
17871                 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17872         mboxq->vport = ndlp->vport;
17873         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17874         if (rc == MBX_NOT_FINISHED) {
17875                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17876                                 "2010 Resume RPI Mailbox failed "
17877                                 "status %d, mbxStatus x%x\n", rc,
17878                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
17879                 mempool_free(mboxq, phba->mbox_mem_pool);
17880                 return -EIO;
17881         }
17882         return 0;
17883 }
17884
17885 /**
17886  * lpfc_sli4_init_vpi - Initialize a vpi with the port
17887  * @vport: Pointer to the vport for which the vpi is being initialized
17888  *
17889  * This routine is invoked to activate a vpi with the port.
17890  *
17891  * Returns:
17892  *    0 success
17893  *    -Evalue otherwise
17894  **/
17895 int
17896 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
17897 {
17898         LPFC_MBOXQ_t *mboxq;
17899         int rc = 0;
17900         int retval = MBX_SUCCESS;
17901         uint32_t mbox_tmo;
17902         struct lpfc_hba *phba = vport->phba;
17903         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17904         if (!mboxq)
17905                 return -ENOMEM;
17906         lpfc_init_vpi(phba, mboxq, vport->vpi);
17907         mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
17908         rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
17909         if (rc != MBX_SUCCESS) {
17910                 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
17911                                 "2022 INIT VPI Mailbox failed "
17912                                 "status %d, mbxStatus x%x\n", rc,
17913                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
17914                 retval = -EIO;
17915         }
17916         if (rc != MBX_TIMEOUT)
17917                 mempool_free(mboxq, vport->phba->mbox_mem_pool);
17918
17919         return retval;
17920 }
17921
17922 /**
17923  * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
17924  * @phba: pointer to lpfc hba data structure.
17925  * @mboxq: Pointer to mailbox object.
17926  *
17927  * This routine is invoked to manually add a single FCF record. The caller
17928  * must pass a completely initialized FCF_Record.  This routine takes
17929  * care of the nonembedded mailbox operations.
17930  **/
17931 static void
17932 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
17933 {
17934         void *virt_addr;
17935         union lpfc_sli4_cfg_shdr *shdr;
17936         uint32_t shdr_status, shdr_add_status;
17937
17938         virt_addr = mboxq->sge_array->addr[0];
17939         /* The IOCTL status is embedded in the mailbox subheader. */
17940         shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
17941         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17942         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17943
17944         if ((shdr_status || shdr_add_status) &&
17945                 (shdr_status != STATUS_FCF_IN_USE))
17946                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17947                         "2558 ADD_FCF_RECORD mailbox failed with "
17948                         "status x%x add_status x%x\n",
17949                         shdr_status, shdr_add_status);
17950
17951         lpfc_sli4_mbox_cmd_free(phba, mboxq);
17952 }
17953
17954 /**
17955  * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
17956  * @phba: pointer to lpfc hba data structure.
17957  * @fcf_record:  pointer to the initialized fcf record to add.
17958  *
17959  * This routine is invoked to manually add a single FCF record. The caller
17960  * must pass a completely initialized FCF_Record.  This routine takes
17961  * care of the nonembedded mailbox operations.
17962  **/
17963 int
17964 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
17965 {
17966         int rc = 0;
17967         LPFC_MBOXQ_t *mboxq;
17968         uint8_t *bytep;
17969         void *virt_addr;
17970         struct lpfc_mbx_sge sge;
17971         uint32_t alloc_len, req_len;
17972         uint32_t fcfindex;
17973
17974         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17975         if (!mboxq) {
17976                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17977                         "2009 Failed to allocate mbox for ADD_FCF cmd\n");
17978                 return -ENOMEM;
17979         }
17980
17981         req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
17982                   sizeof(uint32_t);
17983
17984         /* Allocate DMA memory and set up the non-embedded mailbox command */
17985         alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
17986                                      LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
17987                                      req_len, LPFC_SLI4_MBX_NEMBED);
17988         if (alloc_len < req_len) {
17989                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17990                         "2523 Allocated DMA memory size (x%x) is "
17991                         "less than the requested DMA memory "
17992                         "size (x%x)\n", alloc_len, req_len);
17993                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17994                 return -ENOMEM;
17995         }
17996
17997         /*
17998          * Get the first SGE entry from the non-embedded DMA memory.  This
17999          * routine only uses a single SGE.
18000          */
18001         lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18002         virt_addr = mboxq->sge_array->addr[0];
18003         /*
18004          * Configure the FCF record for FCFI 0.  This is the driver's
18005          * hardcoded default and gets used in nonFIP mode.
18006          */
18007         fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18008         bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18009         lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18010
18011         /*
18012          * Copy the fcf_index and the FCF Record Data. The data starts after
18013          * the FCoE header plus word10. The data copy needs to be endian
18014          * correct.
18015          */
18016         bytep += sizeof(uint32_t);
18017         lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18018         mboxq->vport = phba->pport;
18019         mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18020         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18021         if (rc == MBX_NOT_FINISHED) {
18022                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18023                         "2515 ADD_FCF_RECORD mailbox failed with "
18024                         "status 0x%x\n", rc);
18025                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18026                 rc = -EIO;
18027         } else
18028                 rc = 0;
18029
18030         return rc;
18031 }
18032
18033 /**
18034  * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18035  * @phba: pointer to lpfc hba data structure.
18036  * @fcf_record:  pointer to the fcf record to write the default data.
18037  * @fcf_index: FCF table entry index.
18038  *
18039  * This routine is invoked to build the driver's default FCF record.  The
18040  * values used are hardcoded.  This routine handles memory initialization.
18041  *
18042  **/
18043 void
18044 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18045                                 struct fcf_record *fcf_record,
18046                                 uint16_t fcf_index)
18047 {
18048         memset(fcf_record, 0, sizeof(struct fcf_record));
18049         fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18050         fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18051         fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18052         bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18053         bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18054         bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18055         bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18056         bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18057         bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18058         bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18059         bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18060         bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18061         bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
18062         bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
18063         bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18064         bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18065                 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18066         /* Set the VLAN bit map */
18067         if (phba->valid_vlan) {
18068                 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18069                         = 1 << (phba->vlan_id % 8);
18070         }
18071 }
18072
18073 /**
18074  * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
18075  * @phba: pointer to lpfc hba data structure.
18076  * @fcf_index: FCF table entry offset.
18077  *
18078  * This routine is invoked to scan the entire FCF table by reading FCF
18079  * record and processing it one at a time starting from the @fcf_index
18080  * for initial FCF discovery or fast FCF failover rediscovery.
18081  *
18082  * Return 0 if the mailbox command is submitted successfully, none 0
18083  * otherwise.
18084  **/
18085 int
18086 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18087 {
18088         int rc = 0, error;
18089         LPFC_MBOXQ_t *mboxq;
18090
18091         phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
18092         phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
18093         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18094         if (!mboxq) {
18095                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18096                                 "2000 Failed to allocate mbox for "
18097                                 "READ_FCF cmd\n");
18098                 error = -ENOMEM;
18099                 goto fail_fcf_scan;
18100         }
18101         /* Construct the read FCF record mailbox command */
18102         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18103         if (rc) {
18104                 error = -EINVAL;
18105                 goto fail_fcf_scan;
18106         }
18107         /* Issue the mailbox command asynchronously */
18108         mboxq->vport = phba->pport;
18109         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
18110
18111         spin_lock_irq(&phba->hbalock);
18112         phba->hba_flag |= FCF_TS_INPROG;
18113         spin_unlock_irq(&phba->hbalock);
18114
18115         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18116         if (rc == MBX_NOT_FINISHED)
18117                 error = -EIO;
18118         else {
18119                 /* Reset eligible FCF count for new scan */
18120                 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
18121                         phba->fcf.eligible_fcf_cnt = 0;
18122                 error = 0;
18123         }
18124 fail_fcf_scan:
18125         if (error) {
18126                 if (mboxq)
18127                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
18128                 /* FCF scan failed, clear FCF_TS_INPROG flag */
18129                 spin_lock_irq(&phba->hbalock);
18130                 phba->hba_flag &= ~FCF_TS_INPROG;
18131                 spin_unlock_irq(&phba->hbalock);
18132         }
18133         return error;
18134 }
18135
18136 /**
18137  * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
18138  * @phba: pointer to lpfc hba data structure.
18139  * @fcf_index: FCF table entry offset.
18140  *
18141  * This routine is invoked to read an FCF record indicated by @fcf_index
18142  * and to use it for FLOGI roundrobin FCF failover.
18143  *
18144  * Return 0 if the mailbox command is submitted successfully, none 0
18145  * otherwise.
18146  **/
18147 int
18148 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18149 {
18150         int rc = 0, error;
18151         LPFC_MBOXQ_t *mboxq;
18152
18153         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18154         if (!mboxq) {
18155                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18156                                 "2763 Failed to allocate mbox for "
18157                                 "READ_FCF cmd\n");
18158                 error = -ENOMEM;
18159                 goto fail_fcf_read;
18160         }
18161         /* Construct the read FCF record mailbox command */
18162         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18163         if (rc) {
18164                 error = -EINVAL;
18165                 goto fail_fcf_read;
18166         }
18167         /* Issue the mailbox command asynchronously */
18168         mboxq->vport = phba->pport;
18169         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18170         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18171         if (rc == MBX_NOT_FINISHED)
18172                 error = -EIO;
18173         else
18174                 error = 0;
18175
18176 fail_fcf_read:
18177         if (error && mboxq)
18178                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18179         return error;
18180 }
18181
18182 /**
18183  * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
18184  * @phba: pointer to lpfc hba data structure.
18185  * @fcf_index: FCF table entry offset.
18186  *
18187  * This routine is invoked to read an FCF record indicated by @fcf_index to
18188  * determine whether it's eligible for FLOGI roundrobin failover list.
18189  *
18190  * Return 0 if the mailbox command is submitted successfully, none 0
18191  * otherwise.
18192  **/
18193 int
18194 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18195 {
18196         int rc = 0, error;
18197         LPFC_MBOXQ_t *mboxq;
18198
18199         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18200         if (!mboxq) {
18201                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18202                                 "2758 Failed to allocate mbox for "
18203                                 "READ_FCF cmd\n");
18204                                 error = -ENOMEM;
18205                                 goto fail_fcf_read;
18206         }
18207         /* Construct the read FCF record mailbox command */
18208         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18209         if (rc) {
18210                 error = -EINVAL;
18211                 goto fail_fcf_read;
18212         }
18213         /* Issue the mailbox command asynchronously */
18214         mboxq->vport = phba->pport;
18215         mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18216         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18217         if (rc == MBX_NOT_FINISHED)
18218                 error = -EIO;
18219         else
18220                 error = 0;
18221
18222 fail_fcf_read:
18223         if (error && mboxq)
18224                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18225         return error;
18226 }
18227
18228 /**
18229  * lpfc_check_next_fcf_pri_level
18230  * phba pointer to the lpfc_hba struct for this port.
18231  * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
18232  * routine when the rr_bmask is empty. The FCF indecies are put into the
18233  * rr_bmask based on their priority level. Starting from the highest priority
18234  * to the lowest. The most likely FCF candidate will be in the highest
18235  * priority group. When this routine is called it searches the fcf_pri list for
18236  * next lowest priority group and repopulates the rr_bmask with only those
18237  * fcf_indexes.
18238  * returns:
18239  * 1=success 0=failure
18240  **/
18241 static int
18242 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18243 {
18244         uint16_t next_fcf_pri;
18245         uint16_t last_index;
18246         struct lpfc_fcf_pri *fcf_pri;
18247         int rc;
18248         int ret = 0;
18249
18250         last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18251                         LPFC_SLI4_FCF_TBL_INDX_MAX);
18252         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18253                         "3060 Last IDX %d\n", last_index);
18254
18255         /* Verify the priority list has 2 or more entries */
18256         spin_lock_irq(&phba->hbalock);
18257         if (list_empty(&phba->fcf.fcf_pri_list) ||
18258             list_is_singular(&phba->fcf.fcf_pri_list)) {
18259                 spin_unlock_irq(&phba->hbalock);
18260                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18261                         "3061 Last IDX %d\n", last_index);
18262                 return 0; /* Empty rr list */
18263         }
18264         spin_unlock_irq(&phba->hbalock);
18265
18266         next_fcf_pri = 0;
18267         /*
18268          * Clear the rr_bmask and set all of the bits that are at this
18269          * priority.
18270          */
18271         memset(phba->fcf.fcf_rr_bmask, 0,
18272                         sizeof(*phba->fcf.fcf_rr_bmask));
18273         spin_lock_irq(&phba->hbalock);
18274         list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18275                 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18276                         continue;
18277                 /*
18278                  * the 1st priority that has not FLOGI failed
18279                  * will be the highest.
18280                  */
18281                 if (!next_fcf_pri)
18282                         next_fcf_pri = fcf_pri->fcf_rec.priority;
18283                 spin_unlock_irq(&phba->hbalock);
18284                 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18285                         rc = lpfc_sli4_fcf_rr_index_set(phba,
18286                                                 fcf_pri->fcf_rec.fcf_index);
18287                         if (rc)
18288                                 return 0;
18289                 }
18290                 spin_lock_irq(&phba->hbalock);
18291         }
18292         /*
18293          * if next_fcf_pri was not set above and the list is not empty then
18294          * we have failed flogis on all of them. So reset flogi failed
18295          * and start at the beginning.
18296          */
18297         if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18298                 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18299                         fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18300                         /*
18301                          * the 1st priority that has not FLOGI failed
18302                          * will be the highest.
18303                          */
18304                         if (!next_fcf_pri)
18305                                 next_fcf_pri = fcf_pri->fcf_rec.priority;
18306                         spin_unlock_irq(&phba->hbalock);
18307                         if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18308                                 rc = lpfc_sli4_fcf_rr_index_set(phba,
18309                                                 fcf_pri->fcf_rec.fcf_index);
18310                                 if (rc)
18311                                         return 0;
18312                         }
18313                         spin_lock_irq(&phba->hbalock);
18314                 }
18315         } else
18316                 ret = 1;
18317         spin_unlock_irq(&phba->hbalock);
18318
18319         return ret;
18320 }
18321 /**
18322  * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
18323  * @phba: pointer to lpfc hba data structure.
18324  *
18325  * This routine is to get the next eligible FCF record index in a round
18326  * robin fashion. If the next eligible FCF record index equals to the
18327  * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
18328  * shall be returned, otherwise, the next eligible FCF record's index
18329  * shall be returned.
18330  **/
18331 uint16_t
18332 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18333 {
18334         uint16_t next_fcf_index;
18335
18336 initial_priority:
18337         /* Search start from next bit of currently registered FCF index */
18338         next_fcf_index = phba->fcf.current_rec.fcf_indx;
18339
18340 next_priority:
18341         /* Determine the next fcf index to check */
18342         next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
18343         next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18344                                        LPFC_SLI4_FCF_TBL_INDX_MAX,
18345                                        next_fcf_index);
18346
18347         /* Wrap around condition on phba->fcf.fcf_rr_bmask */
18348         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18349                 /*
18350                  * If we have wrapped then we need to clear the bits that
18351                  * have been tested so that we can detect when we should
18352                  * change the priority level.
18353                  */
18354                 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18355                                                LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
18356         }
18357
18358
18359         /* Check roundrobin failover list empty condition */
18360         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18361                 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18362                 /*
18363                  * If next fcf index is not found check if there are lower
18364                  * Priority level fcf's in the fcf_priority list.
18365                  * Set up the rr_bmask with all of the avaiable fcf bits
18366                  * at that level and continue the selection process.
18367                  */
18368                 if (lpfc_check_next_fcf_pri_level(phba))
18369                         goto initial_priority;
18370                 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18371                                 "2844 No roundrobin failover FCF available\n");
18372                 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
18373                         return LPFC_FCOE_FCF_NEXT_NONE;
18374                 else {
18375                         lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18376                                 "3063 Only FCF available idx %d, flag %x\n",
18377                                 next_fcf_index,
18378                         phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
18379                         return next_fcf_index;
18380                 }
18381         }
18382
18383         if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18384                 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
18385                 LPFC_FCF_FLOGI_FAILED) {
18386                 if (list_is_singular(&phba->fcf.fcf_pri_list))
18387                         return LPFC_FCOE_FCF_NEXT_NONE;
18388
18389                 goto next_priority;
18390         }
18391
18392         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18393                         "2845 Get next roundrobin failover FCF (x%x)\n",
18394                         next_fcf_index);
18395
18396         return next_fcf_index;
18397 }
18398
18399 /**
18400  * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
18401  * @phba: pointer to lpfc hba data structure.
18402  *
18403  * This routine sets the FCF record index in to the eligible bmask for
18404  * roundrobin failover search. It checks to make sure that the index
18405  * does not go beyond the range of the driver allocated bmask dimension
18406  * before setting the bit.
18407  *
18408  * Returns 0 if the index bit successfully set, otherwise, it returns
18409  * -EINVAL.
18410  **/
18411 int
18412 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18413 {
18414         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18415                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18416                                 "2610 FCF (x%x) reached driver's book "
18417                                 "keeping dimension:x%x\n",
18418                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18419                 return -EINVAL;
18420         }
18421         /* Set the eligible FCF record index bmask */
18422         set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18423
18424         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18425                         "2790 Set FCF (x%x) to roundrobin FCF failover "
18426                         "bmask\n", fcf_index);
18427
18428         return 0;
18429 }
18430
18431 /**
18432  * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
18433  * @phba: pointer to lpfc hba data structure.
18434  *
18435  * This routine clears the FCF record index from the eligible bmask for
18436  * roundrobin failover search. It checks to make sure that the index
18437  * does not go beyond the range of the driver allocated bmask dimension
18438  * before clearing the bit.
18439  **/
18440 void
18441 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
18442 {
18443         struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
18444         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18445                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18446                                 "2762 FCF (x%x) reached driver's book "
18447                                 "keeping dimension:x%x\n",
18448                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18449                 return;
18450         }
18451         /* Clear the eligible FCF record index bmask */
18452         spin_lock_irq(&phba->hbalock);
18453         list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
18454                                  list) {
18455                 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
18456                         list_del_init(&fcf_pri->list);
18457                         break;
18458                 }
18459         }
18460         spin_unlock_irq(&phba->hbalock);
18461         clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18462
18463         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18464                         "2791 Clear FCF (x%x) from roundrobin failover "
18465                         "bmask\n", fcf_index);
18466 }
18467
18468 /**
18469  * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
18470  * @phba: pointer to lpfc hba data structure.
18471  *
18472  * This routine is the completion routine for the rediscover FCF table mailbox
18473  * command. If the mailbox command returned failure, it will try to stop the
18474  * FCF rediscover wait timer.
18475  **/
18476 static void
18477 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
18478 {
18479         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18480         uint32_t shdr_status, shdr_add_status;
18481
18482         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18483
18484         shdr_status = bf_get(lpfc_mbox_hdr_status,
18485                              &redisc_fcf->header.cfg_shdr.response);
18486         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
18487                              &redisc_fcf->header.cfg_shdr.response);
18488         if (shdr_status || shdr_add_status) {
18489                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18490                                 "2746 Requesting for FCF rediscovery failed "
18491                                 "status x%x add_status x%x\n",
18492                                 shdr_status, shdr_add_status);
18493                 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
18494                         spin_lock_irq(&phba->hbalock);
18495                         phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
18496                         spin_unlock_irq(&phba->hbalock);
18497                         /*
18498                          * CVL event triggered FCF rediscover request failed,
18499                          * last resort to re-try current registered FCF entry.
18500                          */
18501                         lpfc_retry_pport_discovery(phba);
18502                 } else {
18503                         spin_lock_irq(&phba->hbalock);
18504                         phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
18505                         spin_unlock_irq(&phba->hbalock);
18506                         /*
18507                          * DEAD FCF event triggered FCF rediscover request
18508                          * failed, last resort to fail over as a link down
18509                          * to FCF registration.
18510                          */
18511                         lpfc_sli4_fcf_dead_failthrough(phba);
18512                 }
18513         } else {
18514                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18515                                 "2775 Start FCF rediscover quiescent timer\n");
18516                 /*
18517                  * Start FCF rediscovery wait timer for pending FCF
18518                  * before rescan FCF record table.
18519                  */
18520                 lpfc_fcf_redisc_wait_start_timer(phba);
18521         }
18522
18523         mempool_free(mbox, phba->mbox_mem_pool);
18524 }
18525
18526 /**
18527  * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
18528  * @phba: pointer to lpfc hba data structure.
18529  *
18530  * This routine is invoked to request for rediscovery of the entire FCF table
18531  * by the port.
18532  **/
18533 int
18534 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
18535 {
18536         LPFC_MBOXQ_t *mbox;
18537         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18538         int rc, length;
18539
18540         /* Cancel retry delay timers to all vports before FCF rediscover */
18541         lpfc_cancel_all_vport_retry_delay_timer(phba);
18542
18543         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18544         if (!mbox) {
18545                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18546                                 "2745 Failed to allocate mbox for "
18547                                 "requesting FCF rediscover.\n");
18548                 return -ENOMEM;
18549         }
18550
18551         length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
18552                   sizeof(struct lpfc_sli4_cfg_mhdr));
18553         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18554                          LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
18555                          length, LPFC_SLI4_MBX_EMBED);
18556
18557         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18558         /* Set count to 0 for invalidating the entire FCF database */
18559         bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
18560
18561         /* Issue the mailbox command asynchronously */
18562         mbox->vport = phba->pport;
18563         mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
18564         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
18565
18566         if (rc == MBX_NOT_FINISHED) {
18567                 mempool_free(mbox, phba->mbox_mem_pool);
18568                 return -EIO;
18569         }
18570         return 0;
18571 }
18572
18573 /**
18574  * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
18575  * @phba: pointer to lpfc hba data structure.
18576  *
18577  * This function is the failover routine as a last resort to the FCF DEAD
18578  * event when driver failed to perform fast FCF failover.
18579  **/
18580 void
18581 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
18582 {
18583         uint32_t link_state;
18584
18585         /*
18586          * Last resort as FCF DEAD event failover will treat this as
18587          * a link down, but save the link state because we don't want
18588          * it to be changed to Link Down unless it is already down.
18589          */
18590         link_state = phba->link_state;
18591         lpfc_linkdown(phba);
18592         phba->link_state = link_state;
18593
18594         /* Unregister FCF if no devices connected to it */
18595         lpfc_unregister_unused_fcf(phba);
18596 }
18597
18598 /**
18599  * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
18600  * @phba: pointer to lpfc hba data structure.
18601  * @rgn23_data: pointer to configure region 23 data.
18602  *
18603  * This function gets SLI3 port configure region 23 data through memory dump
18604  * mailbox command. When it successfully retrieves data, the size of the data
18605  * will be returned, otherwise, 0 will be returned.
18606  **/
18607 static uint32_t
18608 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
18609 {
18610         LPFC_MBOXQ_t *pmb = NULL;
18611         MAILBOX_t *mb;
18612         uint32_t offset = 0;
18613         int rc;
18614
18615         if (!rgn23_data)
18616                 return 0;
18617
18618         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18619         if (!pmb) {
18620                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18621                                 "2600 failed to allocate mailbox memory\n");
18622                 return 0;
18623         }
18624         mb = &pmb->u.mb;
18625
18626         do {
18627                 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
18628                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
18629
18630                 if (rc != MBX_SUCCESS) {
18631                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
18632                                         "2601 failed to read config "
18633                                         "region 23, rc 0x%x Status 0x%x\n",
18634                                         rc, mb->mbxStatus);
18635                         mb->un.varDmp.word_cnt = 0;
18636                 }
18637                 /*
18638                  * dump mem may return a zero when finished or we got a
18639                  * mailbox error, either way we are done.
18640                  */
18641                 if (mb->un.varDmp.word_cnt == 0)
18642                         break;
18643                 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
18644                         mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
18645
18646                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
18647                                        rgn23_data + offset,
18648                                        mb->un.varDmp.word_cnt);
18649                 offset += mb->un.varDmp.word_cnt;
18650         } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
18651
18652         mempool_free(pmb, phba->mbox_mem_pool);
18653         return offset;
18654 }
18655
18656 /**
18657  * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
18658  * @phba: pointer to lpfc hba data structure.
18659  * @rgn23_data: pointer to configure region 23 data.
18660  *
18661  * This function gets SLI4 port configure region 23 data through memory dump
18662  * mailbox command. When it successfully retrieves data, the size of the data
18663  * will be returned, otherwise, 0 will be returned.
18664  **/
18665 static uint32_t
18666 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
18667 {
18668         LPFC_MBOXQ_t *mboxq = NULL;
18669         struct lpfc_dmabuf *mp = NULL;
18670         struct lpfc_mqe *mqe;
18671         uint32_t data_length = 0;
18672         int rc;
18673
18674         if (!rgn23_data)
18675                 return 0;
18676
18677         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18678         if (!mboxq) {
18679                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18680                                 "3105 failed to allocate mailbox memory\n");
18681                 return 0;
18682         }
18683
18684         if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
18685                 goto out;
18686         mqe = &mboxq->u.mqe;
18687         mp = (struct lpfc_dmabuf *) mboxq->context1;
18688         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18689         if (rc)
18690                 goto out;
18691         data_length = mqe->un.mb_words[5];
18692         if (data_length == 0)
18693                 goto out;
18694         if (data_length > DMP_RGN23_SIZE) {
18695                 data_length = 0;
18696                 goto out;
18697         }
18698         lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
18699 out:
18700         mempool_free(mboxq, phba->mbox_mem_pool);
18701         if (mp) {
18702                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
18703                 kfree(mp);
18704         }
18705         return data_length;
18706 }
18707
18708 /**
18709  * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
18710  * @phba: pointer to lpfc hba data structure.
18711  *
18712  * This function read region 23 and parse TLV for port status to
18713  * decide if the user disaled the port. If the TLV indicates the
18714  * port is disabled, the hba_flag is set accordingly.
18715  **/
18716 void
18717 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
18718 {
18719         uint8_t *rgn23_data = NULL;
18720         uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
18721         uint32_t offset = 0;
18722
18723         /* Get adapter Region 23 data */
18724         rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
18725         if (!rgn23_data)
18726                 goto out;
18727
18728         if (phba->sli_rev < LPFC_SLI_REV4)
18729                 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
18730         else {
18731                 if_type = bf_get(lpfc_sli_intf_if_type,
18732                                  &phba->sli4_hba.sli_intf);
18733                 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
18734                         goto out;
18735                 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
18736         }
18737
18738         if (!data_size)
18739                 goto out;
18740
18741         /* Check the region signature first */
18742         if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
18743                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18744                         "2619 Config region 23 has bad signature\n");
18745                         goto out;
18746         }
18747         offset += 4;
18748
18749         /* Check the data structure version */
18750         if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
18751                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18752                         "2620 Config region 23 has bad version\n");
18753                 goto out;
18754         }
18755         offset += 4;
18756
18757         /* Parse TLV entries in the region */
18758         while (offset < data_size) {
18759                 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
18760                         break;
18761                 /*
18762                  * If the TLV is not driver specific TLV or driver id is
18763                  * not linux driver id, skip the record.
18764                  */
18765                 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
18766                     (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
18767                     (rgn23_data[offset + 3] != 0)) {
18768                         offset += rgn23_data[offset + 1] * 4 + 4;
18769                         continue;
18770                 }
18771
18772                 /* Driver found a driver specific TLV in the config region */
18773                 sub_tlv_len = rgn23_data[offset + 1] * 4;
18774                 offset += 4;
18775                 tlv_offset = 0;
18776
18777                 /*
18778                  * Search for configured port state sub-TLV.
18779                  */
18780                 while ((offset < data_size) &&
18781                         (tlv_offset < sub_tlv_len)) {
18782                         if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
18783                                 offset += 4;
18784                                 tlv_offset += 4;
18785                                 break;
18786                         }
18787                         if (rgn23_data[offset] != PORT_STE_TYPE) {
18788                                 offset += rgn23_data[offset + 1] * 4 + 4;
18789                                 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
18790                                 continue;
18791                         }
18792
18793                         /* This HBA contains PORT_STE configured */
18794                         if (!rgn23_data[offset + 2])
18795                                 phba->hba_flag |= LINK_DISABLED;
18796
18797                         goto out;
18798                 }
18799         }
18800
18801 out:
18802         kfree(rgn23_data);
18803         return;
18804 }
18805
18806 /**
18807  * lpfc_wr_object - write an object to the firmware
18808  * @phba: HBA structure that indicates port to create a queue on.
18809  * @dmabuf_list: list of dmabufs to write to the port.
18810  * @size: the total byte value of the objects to write to the port.
18811  * @offset: the current offset to be used to start the transfer.
18812  *
18813  * This routine will create a wr_object mailbox command to send to the port.
18814  * the mailbox command will be constructed using the dma buffers described in
18815  * @dmabuf_list to create a list of BDEs. This routine will fill in as many
18816  * BDEs that the imbedded mailbox can support. The @offset variable will be
18817  * used to indicate the starting offset of the transfer and will also return
18818  * the offset after the write object mailbox has completed. @size is used to
18819  * determine the end of the object and whether the eof bit should be set.
18820  *
18821  * Return 0 is successful and offset will contain the the new offset to use
18822  * for the next write.
18823  * Return negative value for error cases.
18824  **/
18825 int
18826 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
18827                uint32_t size, uint32_t *offset)
18828 {
18829         struct lpfc_mbx_wr_object *wr_object;
18830         LPFC_MBOXQ_t *mbox;
18831         int rc = 0, i = 0;
18832         uint32_t shdr_status, shdr_add_status;
18833         uint32_t mbox_tmo;
18834         union lpfc_sli4_cfg_shdr *shdr;
18835         struct lpfc_dmabuf *dmabuf;
18836         uint32_t written = 0;
18837
18838         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18839         if (!mbox)
18840                 return -ENOMEM;
18841
18842         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
18843                         LPFC_MBOX_OPCODE_WRITE_OBJECT,
18844                         sizeof(struct lpfc_mbx_wr_object) -
18845                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
18846
18847         wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
18848         wr_object->u.request.write_offset = *offset;
18849         sprintf((uint8_t *)wr_object->u.request.object_name, "/");
18850         wr_object->u.request.object_name[0] =
18851                 cpu_to_le32(wr_object->u.request.object_name[0]);
18852         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
18853         list_for_each_entry(dmabuf, dmabuf_list, list) {
18854                 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
18855                         break;
18856                 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
18857                 wr_object->u.request.bde[i].addrHigh =
18858                         putPaddrHigh(dmabuf->phys);
18859                 if (written + SLI4_PAGE_SIZE >= size) {
18860                         wr_object->u.request.bde[i].tus.f.bdeSize =
18861                                 (size - written);
18862                         written += (size - written);
18863                         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
18864                 } else {
18865                         wr_object->u.request.bde[i].tus.f.bdeSize =
18866                                 SLI4_PAGE_SIZE;
18867                         written += SLI4_PAGE_SIZE;
18868                 }
18869                 i++;
18870         }
18871         wr_object->u.request.bde_count = i;
18872         bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
18873         if (!phba->sli4_hba.intr_enable)
18874                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18875         else {
18876                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18877                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18878         }
18879         /* The IOCTL status is embedded in the mailbox subheader. */
18880         shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
18881         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18882         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18883         if (rc != MBX_TIMEOUT)
18884                 mempool_free(mbox, phba->mbox_mem_pool);
18885         if (shdr_status || shdr_add_status || rc) {
18886                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18887                                 "3025 Write Object mailbox failed with "
18888                                 "status x%x add_status x%x, mbx status x%x\n",
18889                                 shdr_status, shdr_add_status, rc);
18890                 rc = -ENXIO;
18891                 *offset = shdr_add_status;
18892         } else
18893                 *offset += wr_object->u.response.actual_write_length;
18894         return rc;
18895 }
18896
18897 /**
18898  * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
18899  * @vport: pointer to vport data structure.
18900  *
18901  * This function iterate through the mailboxq and clean up all REG_LOGIN
18902  * and REG_VPI mailbox commands associated with the vport. This function
18903  * is called when driver want to restart discovery of the vport due to
18904  * a Clear Virtual Link event.
18905  **/
18906 void
18907 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
18908 {
18909         struct lpfc_hba *phba = vport->phba;
18910         LPFC_MBOXQ_t *mb, *nextmb;
18911         struct lpfc_dmabuf *mp;
18912         struct lpfc_nodelist *ndlp;
18913         struct lpfc_nodelist *act_mbx_ndlp = NULL;
18914         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
18915         LIST_HEAD(mbox_cmd_list);
18916         uint8_t restart_loop;
18917
18918         /* Clean up internally queued mailbox commands with the vport */
18919         spin_lock_irq(&phba->hbalock);
18920         list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
18921                 if (mb->vport != vport)
18922                         continue;
18923
18924                 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
18925                         (mb->u.mb.mbxCommand != MBX_REG_VPI))
18926                         continue;
18927
18928                 list_del(&mb->list);
18929                 list_add_tail(&mb->list, &mbox_cmd_list);
18930         }
18931         /* Clean up active mailbox command with the vport */
18932         mb = phba->sli.mbox_active;
18933         if (mb && (mb->vport == vport)) {
18934                 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
18935                         (mb->u.mb.mbxCommand == MBX_REG_VPI))
18936                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18937                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18938                         act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
18939                         /* Put reference count for delayed processing */
18940                         act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
18941                         /* Unregister the RPI when mailbox complete */
18942                         mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
18943                 }
18944         }
18945         /* Cleanup any mailbox completions which are not yet processed */
18946         do {
18947                 restart_loop = 0;
18948                 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
18949                         /*
18950                          * If this mailox is already processed or it is
18951                          * for another vport ignore it.
18952                          */
18953                         if ((mb->vport != vport) ||
18954                                 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
18955                                 continue;
18956
18957                         if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
18958                                 (mb->u.mb.mbxCommand != MBX_REG_VPI))
18959                                 continue;
18960
18961                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18962                         if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18963                                 ndlp = (struct lpfc_nodelist *)mb->context2;
18964                                 /* Unregister the RPI when mailbox complete */
18965                                 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
18966                                 restart_loop = 1;
18967                                 spin_unlock_irq(&phba->hbalock);
18968                                 spin_lock(shost->host_lock);
18969                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
18970                                 spin_unlock(shost->host_lock);
18971                                 spin_lock_irq(&phba->hbalock);
18972                                 break;
18973                         }
18974                 }
18975         } while (restart_loop);
18976
18977         spin_unlock_irq(&phba->hbalock);
18978
18979         /* Release the cleaned-up mailbox commands */
18980         while (!list_empty(&mbox_cmd_list)) {
18981                 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
18982                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18983                         mp = (struct lpfc_dmabuf *) (mb->context1);
18984                         if (mp) {
18985                                 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
18986                                 kfree(mp);
18987                         }
18988                         ndlp = (struct lpfc_nodelist *) mb->context2;
18989                         mb->context2 = NULL;
18990                         if (ndlp) {
18991                                 spin_lock(shost->host_lock);
18992                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
18993                                 spin_unlock(shost->host_lock);
18994                                 lpfc_nlp_put(ndlp);
18995                         }
18996                 }
18997                 mempool_free(mb, phba->mbox_mem_pool);
18998         }
18999
19000         /* Release the ndlp with the cleaned-up active mailbox command */
19001         if (act_mbx_ndlp) {
19002                 spin_lock(shost->host_lock);
19003                 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19004                 spin_unlock(shost->host_lock);
19005                 lpfc_nlp_put(act_mbx_ndlp);
19006         }
19007 }
19008
19009 /**
19010  * lpfc_drain_txq - Drain the txq
19011  * @phba: Pointer to HBA context object.
19012  *
19013  * This function attempt to submit IOCBs on the txq
19014  * to the adapter.  For SLI4 adapters, the txq contains
19015  * ELS IOCBs that have been deferred because the there
19016  * are no SGLs.  This congestion can occur with large
19017  * vport counts during node discovery.
19018  **/
19019
19020 uint32_t
19021 lpfc_drain_txq(struct lpfc_hba *phba)
19022 {
19023         LIST_HEAD(completions);
19024         struct lpfc_sli_ring *pring;
19025         struct lpfc_iocbq *piocbq = NULL;
19026         unsigned long iflags = 0;
19027         char *fail_msg = NULL;
19028         struct lpfc_sglq *sglq;
19029         union lpfc_wqe128 wqe128;
19030         union lpfc_wqe *wqe = (union lpfc_wqe *) &wqe128;
19031         uint32_t txq_cnt = 0;
19032
19033         pring = lpfc_phba_elsring(phba);
19034         if (unlikely(!pring))
19035                 return 0;
19036
19037         spin_lock_irqsave(&pring->ring_lock, iflags);
19038         list_for_each_entry(piocbq, &pring->txq, list) {
19039                 txq_cnt++;
19040         }
19041
19042         if (txq_cnt > pring->txq_max)
19043                 pring->txq_max = txq_cnt;
19044
19045         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19046
19047         while (!list_empty(&pring->txq)) {
19048                 spin_lock_irqsave(&pring->ring_lock, iflags);
19049
19050                 piocbq = lpfc_sli_ringtx_get(phba, pring);
19051                 if (!piocbq) {
19052                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19053                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19054                                 "2823 txq empty and txq_cnt is %d\n ",
19055                                 txq_cnt);
19056                         break;
19057                 }
19058                 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
19059                 if (!sglq) {
19060                         __lpfc_sli_ringtx_put(phba, pring, piocbq);
19061                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19062                         break;
19063                 }
19064                 txq_cnt--;
19065
19066                 /* The xri and iocb resources secured,
19067                  * attempt to issue request
19068                  */
19069                 piocbq->sli4_lxritag = sglq->sli4_lxritag;
19070                 piocbq->sli4_xritag = sglq->sli4_xritag;
19071                 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19072                         fail_msg = "to convert bpl to sgl";
19073                 else if (lpfc_sli4_iocb2wqe(phba, piocbq, wqe))
19074                         fail_msg = "to convert iocb to wqe";
19075                 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe))
19076                         fail_msg = " - Wq is full";
19077                 else
19078                         lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19079
19080                 if (fail_msg) {
19081                         /* Failed means we can't issue and need to cancel */
19082                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19083                                         "2822 IOCB failed %s iotag 0x%x "
19084                                         "xri 0x%x\n",
19085                                         fail_msg,
19086                                         piocbq->iotag, piocbq->sli4_xritag);
19087                         list_add_tail(&piocbq->list, &completions);
19088                 }
19089                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19090         }
19091
19092         /* Cancel all the IOCBs that cannot be issued */
19093         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19094                                 IOERR_SLI_ABORTED);
19095
19096         return txq_cnt;
19097 }
19098
19099 /**
19100  * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
19101  * @phba: Pointer to HBA context object.
19102  * @pwqe: Pointer to command WQE.
19103  * @sglq: Pointer to the scatter gather queue object.
19104  *
19105  * This routine converts the bpl or bde that is in the WQE
19106  * to a sgl list for the sli4 hardware. The physical address
19107  * of the bpl/bde is converted back to a virtual address.
19108  * If the WQE contains a BPL then the list of BDE's is
19109  * converted to sli4_sge's. If the WQE contains a single
19110  * BDE then it is converted to a single sli_sge.
19111  * The WQE is still in cpu endianness so the contents of
19112  * the bpl can be used without byte swapping.
19113  *
19114  * Returns valid XRI = Success, NO_XRI = Failure.
19115  */
19116 static uint16_t
19117 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19118                  struct lpfc_sglq *sglq)
19119 {
19120         uint16_t xritag = NO_XRI;
19121         struct ulp_bde64 *bpl = NULL;
19122         struct ulp_bde64 bde;
19123         struct sli4_sge *sgl  = NULL;
19124         struct lpfc_dmabuf *dmabuf;
19125         union lpfc_wqe *wqe;
19126         int numBdes = 0;
19127         int i = 0;
19128         uint32_t offset = 0; /* accumulated offset in the sg request list */
19129         int inbound = 0; /* number of sg reply entries inbound from firmware */
19130         uint32_t cmd;
19131
19132         if (!pwqeq || !sglq)
19133                 return xritag;
19134
19135         sgl  = (struct sli4_sge *)sglq->sgl;
19136         wqe = &pwqeq->wqe;
19137         pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19138
19139         cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19140         if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19141                 return sglq->sli4_xritag;
19142         numBdes = pwqeq->rsvd2;
19143         if (numBdes) {
19144                 /* The addrHigh and addrLow fields within the WQE
19145                  * have not been byteswapped yet so there is no
19146                  * need to swap them back.
19147                  */
19148                 if (pwqeq->context3)
19149                         dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19150                 else
19151                         return xritag;
19152
19153                 bpl  = (struct ulp_bde64 *)dmabuf->virt;
19154                 if (!bpl)
19155                         return xritag;
19156
19157                 for (i = 0; i < numBdes; i++) {
19158                         /* Should already be byte swapped. */
19159                         sgl->addr_hi = bpl->addrHigh;
19160                         sgl->addr_lo = bpl->addrLow;
19161
19162                         sgl->word2 = le32_to_cpu(sgl->word2);
19163                         if ((i+1) == numBdes)
19164                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
19165                         else
19166                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
19167                         /* swap the size field back to the cpu so we
19168                          * can assign it to the sgl.
19169                          */
19170                         bde.tus.w = le32_to_cpu(bpl->tus.w);
19171                         sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19172                         /* The offsets in the sgl need to be accumulated
19173                          * separately for the request and reply lists.
19174                          * The request is always first, the reply follows.
19175                          */
19176                         switch (cmd) {
19177                         case CMD_GEN_REQUEST64_WQE:
19178                                 /* add up the reply sg entries */
19179                                 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19180                                         inbound++;
19181                                 /* first inbound? reset the offset */
19182                                 if (inbound == 1)
19183                                         offset = 0;
19184                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19185                                 bf_set(lpfc_sli4_sge_type, sgl,
19186                                         LPFC_SGE_TYPE_DATA);
19187                                 offset += bde.tus.f.bdeSize;
19188                                 break;
19189                         case CMD_FCP_TRSP64_WQE:
19190                                 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19191                                 bf_set(lpfc_sli4_sge_type, sgl,
19192                                         LPFC_SGE_TYPE_DATA);
19193                                 break;
19194                         case CMD_FCP_TSEND64_WQE:
19195                         case CMD_FCP_TRECEIVE64_WQE:
19196                                 bf_set(lpfc_sli4_sge_type, sgl,
19197                                         bpl->tus.f.bdeFlags);
19198                                 if (i < 3)
19199                                         offset = 0;
19200                                 else
19201                                         offset += bde.tus.f.bdeSize;
19202                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19203                                 break;
19204                         }
19205                         sgl->word2 = cpu_to_le32(sgl->word2);
19206                         bpl++;
19207                         sgl++;
19208                 }
19209         } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19210                 /* The addrHigh and addrLow fields of the BDE have not
19211                  * been byteswapped yet so they need to be swapped
19212                  * before putting them in the sgl.
19213                  */
19214                 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19215                 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19216                 sgl->word2 = le32_to_cpu(sgl->word2);
19217                 bf_set(lpfc_sli4_sge_last, sgl, 1);
19218                 sgl->word2 = cpu_to_le32(sgl->word2);
19219                 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19220         }
19221         return sglq->sli4_xritag;
19222 }
19223
19224 /**
19225  * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
19226  * @phba: Pointer to HBA context object.
19227  * @ring_number: Base sli ring number
19228  * @pwqe: Pointer to command WQE.
19229  **/
19230 int
19231 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
19232                     struct lpfc_iocbq *pwqe)
19233 {
19234         union lpfc_wqe *wqe = &pwqe->wqe;
19235         struct lpfc_nvmet_rcv_ctx *ctxp;
19236         struct lpfc_queue *wq;
19237         struct lpfc_sglq *sglq;
19238         struct lpfc_sli_ring *pring;
19239         unsigned long iflags;
19240         uint32_t ret = 0;
19241
19242         /* NVME_LS and NVME_LS ABTS requests. */
19243         if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19244                 pring =  phba->sli4_hba.nvmels_wq->pring;
19245                 spin_lock_irqsave(&pring->ring_lock, iflags);
19246                 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19247                 if (!sglq) {
19248                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19249                         return WQE_BUSY;
19250                 }
19251                 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19252                 pwqe->sli4_xritag = sglq->sli4_xritag;
19253                 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19254                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19255                         return WQE_ERROR;
19256                 }
19257                 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19258                        pwqe->sli4_xritag);
19259                 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19260                 if (ret) {
19261                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19262                         return ret;
19263                 }
19264
19265                 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19266                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19267                 return 0;
19268         }
19269
19270         /* NVME_FCREQ and NVME_ABTS requests */
19271         if (pwqe->iocb_flag & LPFC_IO_NVME) {
19272                 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19273                 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
19274
19275                 spin_lock_irqsave(&pring->ring_lock, iflags);
19276                 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
19277                 bf_set(wqe_cqid, &wqe->generic.wqe_com,
19278                       phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
19279                 ret = lpfc_sli4_wq_put(wq, wqe);
19280                 if (ret) {
19281                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19282                         return ret;
19283                 }
19284                 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19285                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19286                 return 0;
19287         }
19288
19289         /* NVMET requests */
19290         if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19291                 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19292                 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
19293
19294                 spin_lock_irqsave(&pring->ring_lock, iflags);
19295                 ctxp = pwqe->context2;
19296                 sglq = ctxp->ctxbuf->sglq;
19297                 if (pwqe->sli4_xritag ==  NO_XRI) {
19298                         pwqe->sli4_lxritag = sglq->sli4_lxritag;
19299                         pwqe->sli4_xritag = sglq->sli4_xritag;
19300                 }
19301                 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19302                        pwqe->sli4_xritag);
19303                 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
19304                 bf_set(wqe_cqid, &wqe->generic.wqe_com,
19305                       phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
19306                 ret = lpfc_sli4_wq_put(wq, wqe);
19307                 if (ret) {
19308                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19309                         return ret;
19310                 }
19311                 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19312                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19313                 return 0;
19314         }
19315         return WQE_ERROR;
19316 }