1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
39 #include <asm/set_memory.h>
42 #include <linux/nvme-fc-driver.h>
47 #include "lpfc_sli4.h"
49 #include "lpfc_disc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_logmsg.h"
56 #include "lpfc_compat.h"
57 #include "lpfc_debugfs.h"
58 #include "lpfc_vport.h"
59 #include "lpfc_version.h"
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type {
70 /* Provide function prototypes local to this module. */
71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
74 uint8_t *, uint32_t *);
75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80 struct hbq_dmabuf *dmabuf);
81 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
82 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
86 struct lpfc_queue *eq,
87 struct lpfc_eqe *eqe);
88 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
89 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
90 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
91 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
92 struct lpfc_queue *cq,
93 struct lpfc_cqe *cqe);
96 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
101 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
103 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
104 * @srcp: Source memory pointer.
105 * @destp: Destination memory pointer.
106 * @cnt: Number of words required to be copied.
107 * Must be a multiple of sizeof(uint64_t)
109 * This function is used for copying data between driver memory
110 * and the SLI WQ. This function also changes the endianness
111 * of each word if native endianness is different from SLI
112 * endianness. This function can be called with or without
116 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
118 uint64_t *src = srcp;
119 uint64_t *dest = destp;
122 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
126 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
130 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
131 * @q: The Work Queue to operate on.
132 * @wqe: The work Queue Entry to put on the Work queue.
134 * This routine will copy the contents of @wqe to the next available entry on
135 * the @q. This function will then ring the Work Queue Doorbell to signal the
136 * HBA to start processing the Work Queue Entry. This function returns 0 if
137 * successful. If no entries are available on @q then this function will return
139 * The caller is expected to hold the hbalock when calling this routine.
142 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
144 union lpfc_wqe *temp_wqe;
145 struct lpfc_register doorbell;
152 /* sanity check on queue memory */
155 temp_wqe = lpfc_sli4_qe(q, q->host_index);
157 /* If the host has not yet processed the next entry then we are done */
158 idx = ((q->host_index + 1) % q->entry_count);
159 if (idx == q->hba_index) {
164 /* set consumption flag every once in a while */
165 if (!((q->host_index + 1) % q->notify_interval))
166 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
168 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
169 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
170 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
171 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
172 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
173 /* write to DPP aperture taking advatage of Combined Writes */
174 tmp = (uint8_t *)temp_wqe;
176 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
177 __raw_writeq(*((uint64_t *)(tmp + i)),
180 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
181 __raw_writel(*((uint32_t *)(tmp + i)),
185 /* ensure WQE bcopy and DPP flushed before doorbell write */
188 /* Update the host index before invoking device */
189 host_index = q->host_index;
195 if (q->db_format == LPFC_DB_LIST_FORMAT) {
196 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
197 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
198 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
199 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
201 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
204 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
205 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
207 /* Leave bits <23:16> clear for if_type 6 dpp */
208 if_type = bf_get(lpfc_sli_intf_if_type,
209 &q->phba->sli4_hba.sli_intf);
210 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
211 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
214 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
215 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
216 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
220 writel(doorbell.word0, q->db_regaddr);
226 * lpfc_sli4_wq_release - Updates internal hba index for WQ
227 * @q: The Work Queue to operate on.
228 * @index: The index to advance the hba index to.
230 * This routine will update the HBA index of a queue to reflect consumption of
231 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
232 * an entry the host calls this function to update the queue's internal
233 * pointers. This routine returns the number of entries that were consumed by
237 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
239 uint32_t released = 0;
241 /* sanity check on queue memory */
245 if (q->hba_index == index)
248 q->hba_index = ((q->hba_index + 1) % q->entry_count);
250 } while (q->hba_index != index);
255 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
256 * @q: The Mailbox Queue to operate on.
257 * @wqe: The Mailbox Queue Entry to put on the Work queue.
259 * This routine will copy the contents of @mqe to the next available entry on
260 * the @q. This function will then ring the Work Queue Doorbell to signal the
261 * HBA to start processing the Work Queue Entry. This function returns 0 if
262 * successful. If no entries are available on @q then this function will return
264 * The caller is expected to hold the hbalock when calling this routine.
267 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
269 struct lpfc_mqe *temp_mqe;
270 struct lpfc_register doorbell;
272 /* sanity check on queue memory */
275 temp_mqe = lpfc_sli4_qe(q, q->host_index);
277 /* If the host has not yet processed the next entry then we are done */
278 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
280 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
281 /* Save off the mailbox pointer for completion */
282 q->phba->mbox = (MAILBOX_t *)temp_mqe;
284 /* Update the host index before invoking device */
285 q->host_index = ((q->host_index + 1) % q->entry_count);
289 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
290 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
291 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
296 * lpfc_sli4_mq_release - Updates internal hba index for MQ
297 * @q: The Mailbox Queue to operate on.
299 * This routine will update the HBA index of a queue to reflect consumption of
300 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
301 * an entry the host calls this function to update the queue's internal
302 * pointers. This routine returns the number of entries that were consumed by
306 lpfc_sli4_mq_release(struct lpfc_queue *q)
308 /* sanity check on queue memory */
312 /* Clear the mailbox pointer for completion */
313 q->phba->mbox = NULL;
314 q->hba_index = ((q->hba_index + 1) % q->entry_count);
319 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
320 * @q: The Event Queue to get the first valid EQE from
322 * This routine will get the first valid Event Queue Entry from @q, update
323 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
324 * the Queue (no more work to do), or the Queue is full of EQEs that have been
325 * processed, but not popped back to the HBA then this routine will return NULL.
327 static struct lpfc_eqe *
328 lpfc_sli4_eq_get(struct lpfc_queue *q)
330 struct lpfc_eqe *eqe;
332 /* sanity check on queue memory */
335 eqe = lpfc_sli4_qe(q, q->host_index);
337 /* If the next EQE is not valid then we are done */
338 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
342 * insert barrier for instruction interlock : data from the hardware
343 * must have the valid bit checked before it can be copied and acted
344 * upon. Speculative instructions were allowing a bcopy at the start
345 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
346 * after our return, to copy data before the valid bit check above
347 * was done. As such, some of the copied data was stale. The barrier
348 * ensures the check is before any data is copied.
355 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
356 * @q: The Event Queue to disable interrupts
360 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
362 struct lpfc_register doorbell;
365 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
366 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
367 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
368 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
369 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
370 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
374 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
375 * @q: The Event Queue to disable interrupts
379 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
381 struct lpfc_register doorbell;
384 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
385 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
389 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
390 * @phba: adapter with EQ
391 * @q: The Event Queue that the host has completed processing for.
392 * @count: Number of elements that have been consumed
393 * @arm: Indicates whether the host wants to arms this CQ.
395 * This routine will notify the HBA, by ringing the doorbell, that count
396 * number of EQEs have been processed. The @arm parameter indicates whether
397 * the queue should be rearmed when ringing the doorbell.
400 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
401 uint32_t count, bool arm)
403 struct lpfc_register doorbell;
405 /* sanity check on queue memory */
406 if (unlikely(!q || (count == 0 && !arm)))
409 /* ring doorbell for number popped */
412 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
413 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
415 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
416 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
417 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
418 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
419 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
420 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
421 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
422 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
423 readl(q->phba->sli4_hba.EQDBregaddr);
427 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
428 * @phba: adapter with EQ
429 * @q: The Event Queue that the host has completed processing for.
430 * @count: Number of elements that have been consumed
431 * @arm: Indicates whether the host wants to arms this CQ.
433 * This routine will notify the HBA, by ringing the doorbell, that count
434 * number of EQEs have been processed. The @arm parameter indicates whether
435 * the queue should be rearmed when ringing the doorbell.
438 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
439 uint32_t count, bool arm)
441 struct lpfc_register doorbell;
443 /* sanity check on queue memory */
444 if (unlikely(!q || (count == 0 && !arm)))
447 /* ring doorbell for number popped */
450 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
451 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
452 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
453 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
454 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
455 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
456 readl(q->phba->sli4_hba.EQDBregaddr);
460 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
461 struct lpfc_eqe *eqe)
463 if (!phba->sli4_hba.pc_sli4_params.eqav)
464 bf_set_le32(lpfc_eqe_valid, eqe, 0);
466 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
468 /* if the index wrapped around, toggle the valid bit */
469 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
470 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
474 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
476 struct lpfc_eqe *eqe = NULL;
477 u32 eq_count = 0, cq_count = 0;
478 struct lpfc_cqe *cqe = NULL;
479 struct lpfc_queue *cq = NULL, *childq = NULL;
482 /* walk all the EQ entries and drop on the floor */
483 eqe = lpfc_sli4_eq_get(eq);
485 /* Get the reference to the corresponding CQ */
486 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
489 list_for_each_entry(childq, &eq->child_list, list) {
490 if (childq->queue_id == cqid) {
495 /* If CQ is valid, iterate through it and drop all the CQEs */
497 cqe = lpfc_sli4_cq_get(cq);
499 __lpfc_sli4_consume_cqe(phba, cq, cqe);
501 cqe = lpfc_sli4_cq_get(cq);
503 /* Clear and re-arm the CQ */
504 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
508 __lpfc_sli4_consume_eqe(phba, eq, eqe);
510 eqe = lpfc_sli4_eq_get(eq);
513 /* Clear and re-arm the EQ */
514 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
518 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
521 struct lpfc_eqe *eqe;
522 int count = 0, consumed = 0;
524 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
527 eqe = lpfc_sli4_eq_get(eq);
529 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
530 __lpfc_sli4_consume_eqe(phba, eq, eqe);
533 if (!(++count % eq->max_proc_limit))
536 if (!(count % eq->notify_interval)) {
537 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
542 eqe = lpfc_sli4_eq_get(eq);
544 eq->EQ_processed += count;
546 /* Track the max number of EQEs processed in 1 intr */
547 if (count > eq->EQ_max_eqe)
548 eq->EQ_max_eqe = count;
550 eq->queue_claimed = 0;
553 /* Always clear the EQ. */
554 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
560 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
561 * @q: The Completion Queue to get the first valid CQE from
563 * This routine will get the first valid Completion Queue Entry from @q, update
564 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
565 * the Queue (no more work to do), or the Queue is full of CQEs that have been
566 * processed, but not popped back to the HBA then this routine will return NULL.
568 static struct lpfc_cqe *
569 lpfc_sli4_cq_get(struct lpfc_queue *q)
571 struct lpfc_cqe *cqe;
573 /* sanity check on queue memory */
576 cqe = lpfc_sli4_qe(q, q->host_index);
578 /* If the next CQE is not valid then we are done */
579 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
583 * insert barrier for instruction interlock : data from the hardware
584 * must have the valid bit checked before it can be copied and acted
585 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
586 * instructions allowing action on content before valid bit checked,
587 * add barrier here as well. May not be needed as "content" is a
588 * single 32-bit entity here (vs multi word structure for cq's).
595 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
596 struct lpfc_cqe *cqe)
598 if (!phba->sli4_hba.pc_sli4_params.cqav)
599 bf_set_le32(lpfc_cqe_valid, cqe, 0);
601 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
603 /* if the index wrapped around, toggle the valid bit */
604 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
605 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
609 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
610 * @phba: the adapter with the CQ
611 * @q: The Completion Queue that the host has completed processing for.
612 * @count: the number of elements that were consumed
613 * @arm: Indicates whether the host wants to arms this CQ.
615 * This routine will notify the HBA, by ringing the doorbell, that the
616 * CQEs have been processed. The @arm parameter specifies whether the
617 * queue should be rearmed when ringing the doorbell.
620 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
621 uint32_t count, bool arm)
623 struct lpfc_register doorbell;
625 /* sanity check on queue memory */
626 if (unlikely(!q || (count == 0 && !arm)))
629 /* ring doorbell for number popped */
632 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
633 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
634 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
635 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
636 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
637 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
638 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
642 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
643 * @phba: the adapter with the CQ
644 * @q: The Completion Queue that the host has completed processing for.
645 * @count: the number of elements that were consumed
646 * @arm: Indicates whether the host wants to arms this CQ.
648 * This routine will notify the HBA, by ringing the doorbell, that the
649 * CQEs have been processed. The @arm parameter specifies whether the
650 * queue should be rearmed when ringing the doorbell.
653 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
654 uint32_t count, bool arm)
656 struct lpfc_register doorbell;
658 /* sanity check on queue memory */
659 if (unlikely(!q || (count == 0 && !arm)))
662 /* ring doorbell for number popped */
665 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
666 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
667 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
668 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
672 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
673 * @q: The Header Receive Queue to operate on.
674 * @wqe: The Receive Queue Entry to put on the Receive queue.
676 * This routine will copy the contents of @wqe to the next available entry on
677 * the @q. This function will then ring the Receive Queue Doorbell to signal the
678 * HBA to start processing the Receive Queue Entry. This function returns the
679 * index that the rqe was copied to if successful. If no entries are available
680 * on @q then this function will return -ENOMEM.
681 * The caller is expected to hold the hbalock when calling this routine.
684 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
685 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
687 struct lpfc_rqe *temp_hrqe;
688 struct lpfc_rqe *temp_drqe;
689 struct lpfc_register doorbell;
693 /* sanity check on queue memory */
694 if (unlikely(!hq) || unlikely(!dq))
696 hq_put_index = hq->host_index;
697 dq_put_index = dq->host_index;
698 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
699 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
701 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
703 if (hq_put_index != dq_put_index)
705 /* If the host has not yet processed the next entry then we are done */
706 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
708 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
709 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
711 /* Update the host index to point to the next slot */
712 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
713 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
716 /* Ring The Header Receive Queue Doorbell */
717 if (!(hq->host_index % hq->notify_interval)) {
719 if (hq->db_format == LPFC_DB_RING_FORMAT) {
720 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
721 hq->notify_interval);
722 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
723 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
724 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
725 hq->notify_interval);
726 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
728 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
732 writel(doorbell.word0, hq->db_regaddr);
738 * lpfc_sli4_rq_release - Updates internal hba index for RQ
739 * @q: The Header Receive Queue to operate on.
741 * This routine will update the HBA index of a queue to reflect consumption of
742 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
743 * consumed an entry the host calls this function to update the queue's
744 * internal pointers. This routine returns the number of entries that were
745 * consumed by the HBA.
748 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
750 /* sanity check on queue memory */
751 if (unlikely(!hq) || unlikely(!dq))
754 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
756 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
757 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
762 * lpfc_cmd_iocb - Get next command iocb entry in the ring
763 * @phba: Pointer to HBA context object.
764 * @pring: Pointer to driver SLI ring object.
766 * This function returns pointer to next command iocb entry
767 * in the command ring. The caller must hold hbalock to prevent
768 * other threads consume the next command iocb.
769 * SLI-2/SLI-3 provide different sized iocbs.
771 static inline IOCB_t *
772 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
774 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
775 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
779 * lpfc_resp_iocb - Get next response iocb entry in the ring
780 * @phba: Pointer to HBA context object.
781 * @pring: Pointer to driver SLI ring object.
783 * This function returns pointer to next response iocb entry
784 * in the response ring. The caller must hold hbalock to make sure
785 * that no other thread consume the next response iocb.
786 * SLI-2/SLI-3 provide different sized iocbs.
788 static inline IOCB_t *
789 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
791 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
792 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
796 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
797 * @phba: Pointer to HBA context object.
799 * This function is called with hbalock held. This function
800 * allocates a new driver iocb object from the iocb pool. If the
801 * allocation is successful, it returns pointer to the newly
802 * allocated iocb object else it returns NULL.
805 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
807 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
808 struct lpfc_iocbq * iocbq = NULL;
810 lockdep_assert_held(&phba->hbalock);
812 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
815 if (phba->iocb_cnt > phba->iocb_max)
816 phba->iocb_max = phba->iocb_cnt;
821 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
822 * @phba: Pointer to HBA context object.
823 * @xritag: XRI value.
825 * This function clears the sglq pointer from the array of acive
826 * sglq's. The xritag that is passed in is used to index into the
827 * array. Before the xritag can be used it needs to be adjusted
828 * by subtracting the xribase.
830 * Returns sglq ponter = success, NULL = Failure.
833 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
835 struct lpfc_sglq *sglq;
837 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
838 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
843 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
844 * @phba: Pointer to HBA context object.
845 * @xritag: XRI value.
847 * This function returns the sglq pointer from the array of acive
848 * sglq's. The xritag that is passed in is used to index into the
849 * array. Before the xritag can be used it needs to be adjusted
850 * by subtracting the xribase.
852 * Returns sglq ponter = success, NULL = Failure.
855 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
857 struct lpfc_sglq *sglq;
859 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
864 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
865 * @phba: Pointer to HBA context object.
866 * @xritag: xri used in this exchange.
867 * @rrq: The RRQ to be cleared.
871 lpfc_clr_rrq_active(struct lpfc_hba *phba,
873 struct lpfc_node_rrq *rrq)
875 struct lpfc_nodelist *ndlp = NULL;
877 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
878 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
880 /* The target DID could have been swapped (cable swap)
881 * we should use the ndlp from the findnode if it is
884 if ((!ndlp) && rrq->ndlp)
890 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
893 rrq->rrq_stop_time = 0;
896 mempool_free(rrq, phba->rrq_pool);
900 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
901 * @phba: Pointer to HBA context object.
903 * This function is called with hbalock held. This function
904 * Checks if stop_time (ratov from setting rrq active) has
905 * been reached, if it has and the send_rrq flag is set then
906 * it will call lpfc_send_rrq. If the send_rrq flag is not set
907 * then it will just call the routine to clear the rrq and
908 * free the rrq resource.
909 * The timer is set to the next rrq that is going to expire before
910 * leaving the routine.
914 lpfc_handle_rrq_active(struct lpfc_hba *phba)
916 struct lpfc_node_rrq *rrq;
917 struct lpfc_node_rrq *nextrrq;
918 unsigned long next_time;
919 unsigned long iflags;
922 spin_lock_irqsave(&phba->hbalock, iflags);
923 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
924 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
925 list_for_each_entry_safe(rrq, nextrrq,
926 &phba->active_rrq_list, list) {
927 if (time_after(jiffies, rrq->rrq_stop_time))
928 list_move(&rrq->list, &send_rrq);
929 else if (time_before(rrq->rrq_stop_time, next_time))
930 next_time = rrq->rrq_stop_time;
932 spin_unlock_irqrestore(&phba->hbalock, iflags);
933 if ((!list_empty(&phba->active_rrq_list)) &&
934 (!(phba->pport->load_flag & FC_UNLOADING)))
935 mod_timer(&phba->rrq_tmr, next_time);
936 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
937 list_del(&rrq->list);
938 if (!rrq->send_rrq) {
939 /* this call will free the rrq */
940 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
941 } else if (lpfc_send_rrq(phba, rrq)) {
942 /* if we send the rrq then the completion handler
943 * will clear the bit in the xribitmap.
945 lpfc_clr_rrq_active(phba, rrq->xritag,
952 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
953 * @vport: Pointer to vport context object.
954 * @xri: The xri used in the exchange.
955 * @did: The targets DID for this exchange.
957 * returns NULL = rrq not found in the phba->active_rrq_list.
958 * rrq = rrq for this xri and target.
960 struct lpfc_node_rrq *
961 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
963 struct lpfc_hba *phba = vport->phba;
964 struct lpfc_node_rrq *rrq;
965 struct lpfc_node_rrq *nextrrq;
966 unsigned long iflags;
968 if (phba->sli_rev != LPFC_SLI_REV4)
970 spin_lock_irqsave(&phba->hbalock, iflags);
971 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
972 if (rrq->vport == vport && rrq->xritag == xri &&
973 rrq->nlp_DID == did){
974 list_del(&rrq->list);
975 spin_unlock_irqrestore(&phba->hbalock, iflags);
979 spin_unlock_irqrestore(&phba->hbalock, iflags);
984 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
985 * @vport: Pointer to vport context object.
986 * @ndlp: Pointer to the lpfc_node_list structure.
987 * If ndlp is NULL Remove all active RRQs for this vport from the
988 * phba->active_rrq_list and clear the rrq.
989 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
992 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
995 struct lpfc_hba *phba = vport->phba;
996 struct lpfc_node_rrq *rrq;
997 struct lpfc_node_rrq *nextrrq;
998 unsigned long iflags;
1001 if (phba->sli_rev != LPFC_SLI_REV4)
1004 lpfc_sli4_vport_delete_els_xri_aborted(vport);
1005 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1007 spin_lock_irqsave(&phba->hbalock, iflags);
1008 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
1009 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
1010 list_move(&rrq->list, &rrq_list);
1011 spin_unlock_irqrestore(&phba->hbalock, iflags);
1013 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1014 list_del(&rrq->list);
1015 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1020 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1021 * @phba: Pointer to HBA context object.
1022 * @ndlp: Targets nodelist pointer for this exchange.
1023 * @xritag the xri in the bitmap to test.
1025 * This function returns:
1026 * 0 = rrq not active for this xri
1027 * 1 = rrq is valid for this xri.
1030 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1035 if (!ndlp->active_rrqs_xri_bitmap)
1037 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1044 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1045 * @phba: Pointer to HBA context object.
1046 * @ndlp: nodelist pointer for this target.
1047 * @xritag: xri used in this exchange.
1048 * @rxid: Remote Exchange ID.
1049 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1051 * This function takes the hbalock.
1052 * The active bit is always set in the active rrq xri_bitmap even
1053 * if there is no slot avaiable for the other rrq information.
1055 * returns 0 rrq actived for this xri
1056 * < 0 No memory or invalid ndlp.
1059 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1060 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1062 unsigned long iflags;
1063 struct lpfc_node_rrq *rrq;
1069 if (!phba->cfg_enable_rrq)
1072 spin_lock_irqsave(&phba->hbalock, iflags);
1073 if (phba->pport->load_flag & FC_UNLOADING) {
1074 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1079 * set the active bit even if there is no mem available.
1081 if (NLP_CHK_FREE_REQ(ndlp))
1084 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1087 if (!ndlp->active_rrqs_xri_bitmap)
1090 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1093 spin_unlock_irqrestore(&phba->hbalock, iflags);
1094 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1096 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1097 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1098 " DID:0x%x Send:%d\n",
1099 xritag, rxid, ndlp->nlp_DID, send_rrq);
1102 if (phba->cfg_enable_rrq == 1)
1103 rrq->send_rrq = send_rrq;
1106 rrq->xritag = xritag;
1107 rrq->rrq_stop_time = jiffies +
1108 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1110 rrq->nlp_DID = ndlp->nlp_DID;
1111 rrq->vport = ndlp->vport;
1113 spin_lock_irqsave(&phba->hbalock, iflags);
1114 empty = list_empty(&phba->active_rrq_list);
1115 list_add_tail(&rrq->list, &phba->active_rrq_list);
1116 phba->hba_flag |= HBA_RRQ_ACTIVE;
1118 lpfc_worker_wake_up(phba);
1119 spin_unlock_irqrestore(&phba->hbalock, iflags);
1122 spin_unlock_irqrestore(&phba->hbalock, iflags);
1123 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1124 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1125 " DID:0x%x Send:%d\n",
1126 xritag, rxid, ndlp->nlp_DID, send_rrq);
1131 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1132 * @phba: Pointer to HBA context object.
1133 * @piocb: Pointer to the iocbq.
1135 * The driver calls this function with either the nvme ls ring lock
1136 * or the fc els ring lock held depending on the iocb usage. This function
1137 * gets a new driver sglq object from the sglq list. If the list is not empty
1138 * then it is successful, it returns pointer to the newly allocated sglq
1139 * object else it returns NULL.
1141 static struct lpfc_sglq *
1142 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1144 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1145 struct lpfc_sglq *sglq = NULL;
1146 struct lpfc_sglq *start_sglq = NULL;
1147 struct lpfc_io_buf *lpfc_cmd;
1148 struct lpfc_nodelist *ndlp;
1149 struct lpfc_sli_ring *pring = NULL;
1152 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1153 pring = phba->sli4_hba.nvmels_wq->pring;
1155 pring = lpfc_phba_elsring(phba);
1157 lockdep_assert_held(&pring->ring_lock);
1159 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1160 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1161 ndlp = lpfc_cmd->rdata->pnode;
1162 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1163 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1164 ndlp = piocbq->context_un.ndlp;
1165 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1166 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1169 ndlp = piocbq->context_un.ndlp;
1171 ndlp = piocbq->context1;
1174 spin_lock(&phba->sli4_hba.sgl_list_lock);
1175 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1180 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1181 test_bit(sglq->sli4_lxritag,
1182 ndlp->active_rrqs_xri_bitmap)) {
1183 /* This xri has an rrq outstanding for this DID.
1184 * put it back in the list and get another xri.
1186 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1188 list_remove_head(lpfc_els_sgl_list, sglq,
1189 struct lpfc_sglq, list);
1190 if (sglq == start_sglq) {
1191 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1199 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1200 sglq->state = SGL_ALLOCATED;
1202 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1207 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1208 * @phba: Pointer to HBA context object.
1209 * @piocb: Pointer to the iocbq.
1211 * This function is called with the sgl_list lock held. This function
1212 * gets a new driver sglq object from the sglq list. If the
1213 * list is not empty then it is successful, it returns pointer to the newly
1214 * allocated sglq object else it returns NULL.
1217 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1219 struct list_head *lpfc_nvmet_sgl_list;
1220 struct lpfc_sglq *sglq = NULL;
1222 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1224 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1226 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1229 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1230 sglq->state = SGL_ALLOCATED;
1235 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1236 * @phba: Pointer to HBA context object.
1238 * This function is called with no lock held. This function
1239 * allocates a new driver iocb object from the iocb pool. If the
1240 * allocation is successful, it returns pointer to the newly
1241 * allocated iocb object else it returns NULL.
1244 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1246 struct lpfc_iocbq * iocbq = NULL;
1247 unsigned long iflags;
1249 spin_lock_irqsave(&phba->hbalock, iflags);
1250 iocbq = __lpfc_sli_get_iocbq(phba);
1251 spin_unlock_irqrestore(&phba->hbalock, iflags);
1256 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1257 * @phba: Pointer to HBA context object.
1258 * @iocbq: Pointer to driver iocb object.
1260 * This function is called with hbalock held to release driver
1261 * iocb object to the iocb pool. The iotag in the iocb object
1262 * does not change for each use of the iocb object. This function
1263 * clears all other fields of the iocb object when it is freed.
1264 * The sqlq structure that holds the xritag and phys and virtual
1265 * mappings for the scatter gather list is retrieved from the
1266 * active array of sglq. The get of the sglq pointer also clears
1267 * the entry in the array. If the status of the IO indiactes that
1268 * this IO was aborted then the sglq entry it put on the
1269 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1270 * IO has good status or fails for any other reason then the sglq
1271 * entry is added to the free list (lpfc_els_sgl_list).
1274 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1276 struct lpfc_sglq *sglq;
1277 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1278 unsigned long iflag = 0;
1279 struct lpfc_sli_ring *pring;
1281 lockdep_assert_held(&phba->hbalock);
1283 if (iocbq->sli4_xritag == NO_XRI)
1286 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1290 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1291 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1293 sglq->state = SGL_FREED;
1295 list_add_tail(&sglq->list,
1296 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1297 spin_unlock_irqrestore(
1298 &phba->sli4_hba.sgl_list_lock, iflag);
1302 pring = phba->sli4_hba.els_wq->pring;
1303 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1304 (sglq->state != SGL_XRI_ABORTED)) {
1305 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1307 list_add(&sglq->list,
1308 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1309 spin_unlock_irqrestore(
1310 &phba->sli4_hba.sgl_list_lock, iflag);
1312 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1314 sglq->state = SGL_FREED;
1316 list_add_tail(&sglq->list,
1317 &phba->sli4_hba.lpfc_els_sgl_list);
1318 spin_unlock_irqrestore(
1319 &phba->sli4_hba.sgl_list_lock, iflag);
1321 /* Check if TXQ queue needs to be serviced */
1322 if (!list_empty(&pring->txq))
1323 lpfc_worker_wake_up(phba);
1329 * Clean all volatile data fields, preserve iotag and node struct.
1331 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1332 iocbq->sli4_lxritag = NO_XRI;
1333 iocbq->sli4_xritag = NO_XRI;
1334 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1336 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1341 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1342 * @phba: Pointer to HBA context object.
1343 * @iocbq: Pointer to driver iocb object.
1345 * This function is called with hbalock held to release driver
1346 * iocb object to the iocb pool. The iotag in the iocb object
1347 * does not change for each use of the iocb object. This function
1348 * clears all other fields of the iocb object when it is freed.
1351 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1353 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1355 lockdep_assert_held(&phba->hbalock);
1358 * Clean all volatile data fields, preserve iotag and node struct.
1360 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1361 iocbq->sli4_xritag = NO_XRI;
1362 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1366 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1367 * @phba: Pointer to HBA context object.
1368 * @iocbq: Pointer to driver iocb object.
1370 * This function is called with hbalock held to release driver
1371 * iocb object to the iocb pool. The iotag in the iocb object
1372 * does not change for each use of the iocb object. This function
1373 * clears all other fields of the iocb object when it is freed.
1376 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1378 lockdep_assert_held(&phba->hbalock);
1380 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1385 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1386 * @phba: Pointer to HBA context object.
1387 * @iocbq: Pointer to driver iocb object.
1389 * This function is called with no lock held to release the iocb to
1393 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1395 unsigned long iflags;
1398 * Clean all volatile data fields, preserve iotag and node struct.
1400 spin_lock_irqsave(&phba->hbalock, iflags);
1401 __lpfc_sli_release_iocbq(phba, iocbq);
1402 spin_unlock_irqrestore(&phba->hbalock, iflags);
1406 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1407 * @phba: Pointer to HBA context object.
1408 * @iocblist: List of IOCBs.
1409 * @ulpstatus: ULP status in IOCB command field.
1410 * @ulpWord4: ULP word-4 in IOCB command field.
1412 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1413 * on the list by invoking the complete callback function associated with the
1414 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1418 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1419 uint32_t ulpstatus, uint32_t ulpWord4)
1421 struct lpfc_iocbq *piocb;
1423 while (!list_empty(iocblist)) {
1424 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1425 if (!piocb->iocb_cmpl) {
1426 if (piocb->iocb_flag & LPFC_IO_NVME)
1427 lpfc_nvme_cancel_iocb(phba, piocb);
1429 lpfc_sli_release_iocbq(phba, piocb);
1431 piocb->iocb.ulpStatus = ulpstatus;
1432 piocb->iocb.un.ulpWord[4] = ulpWord4;
1433 (piocb->iocb_cmpl) (phba, piocb, piocb);
1440 * lpfc_sli_iocb_cmd_type - Get the iocb type
1441 * @iocb_cmnd: iocb command code.
1443 * This function is called by ring event handler function to get the iocb type.
1444 * This function translates the iocb command to an iocb command type used to
1445 * decide the final disposition of each completed IOCB.
1446 * The function returns
1447 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1448 * LPFC_SOL_IOCB if it is a solicited iocb completion
1449 * LPFC_ABORT_IOCB if it is an abort iocb
1450 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1452 * The caller is not required to hold any lock.
1454 static lpfc_iocb_type
1455 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1457 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1459 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1462 switch (iocb_cmnd) {
1463 case CMD_XMIT_SEQUENCE_CR:
1464 case CMD_XMIT_SEQUENCE_CX:
1465 case CMD_XMIT_BCAST_CN:
1466 case CMD_XMIT_BCAST_CX:
1467 case CMD_ELS_REQUEST_CR:
1468 case CMD_ELS_REQUEST_CX:
1469 case CMD_CREATE_XRI_CR:
1470 case CMD_CREATE_XRI_CX:
1471 case CMD_GET_RPI_CN:
1472 case CMD_XMIT_ELS_RSP_CX:
1473 case CMD_GET_RPI_CR:
1474 case CMD_FCP_IWRITE_CR:
1475 case CMD_FCP_IWRITE_CX:
1476 case CMD_FCP_IREAD_CR:
1477 case CMD_FCP_IREAD_CX:
1478 case CMD_FCP_ICMND_CR:
1479 case CMD_FCP_ICMND_CX:
1480 case CMD_FCP_TSEND_CX:
1481 case CMD_FCP_TRSP_CX:
1482 case CMD_FCP_TRECEIVE_CX:
1483 case CMD_FCP_AUTO_TRSP_CX:
1484 case CMD_ADAPTER_MSG:
1485 case CMD_ADAPTER_DUMP:
1486 case CMD_XMIT_SEQUENCE64_CR:
1487 case CMD_XMIT_SEQUENCE64_CX:
1488 case CMD_XMIT_BCAST64_CN:
1489 case CMD_XMIT_BCAST64_CX:
1490 case CMD_ELS_REQUEST64_CR:
1491 case CMD_ELS_REQUEST64_CX:
1492 case CMD_FCP_IWRITE64_CR:
1493 case CMD_FCP_IWRITE64_CX:
1494 case CMD_FCP_IREAD64_CR:
1495 case CMD_FCP_IREAD64_CX:
1496 case CMD_FCP_ICMND64_CR:
1497 case CMD_FCP_ICMND64_CX:
1498 case CMD_FCP_TSEND64_CX:
1499 case CMD_FCP_TRSP64_CX:
1500 case CMD_FCP_TRECEIVE64_CX:
1501 case CMD_GEN_REQUEST64_CR:
1502 case CMD_GEN_REQUEST64_CX:
1503 case CMD_XMIT_ELS_RSP64_CX:
1504 case DSSCMD_IWRITE64_CR:
1505 case DSSCMD_IWRITE64_CX:
1506 case DSSCMD_IREAD64_CR:
1507 case DSSCMD_IREAD64_CX:
1508 type = LPFC_SOL_IOCB;
1510 case CMD_ABORT_XRI_CN:
1511 case CMD_ABORT_XRI_CX:
1512 case CMD_CLOSE_XRI_CN:
1513 case CMD_CLOSE_XRI_CX:
1514 case CMD_XRI_ABORTED_CX:
1515 case CMD_ABORT_MXRI64_CN:
1516 case CMD_XMIT_BLS_RSP64_CX:
1517 type = LPFC_ABORT_IOCB;
1519 case CMD_RCV_SEQUENCE_CX:
1520 case CMD_RCV_ELS_REQ_CX:
1521 case CMD_RCV_SEQUENCE64_CX:
1522 case CMD_RCV_ELS_REQ64_CX:
1523 case CMD_ASYNC_STATUS:
1524 case CMD_IOCB_RCV_SEQ64_CX:
1525 case CMD_IOCB_RCV_ELS64_CX:
1526 case CMD_IOCB_RCV_CONT64_CX:
1527 case CMD_IOCB_RET_XRI64_CX:
1528 type = LPFC_UNSOL_IOCB;
1530 case CMD_IOCB_XMIT_MSEQ64_CR:
1531 case CMD_IOCB_XMIT_MSEQ64_CX:
1532 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1533 case CMD_IOCB_RCV_ELS_LIST64_CX:
1534 case CMD_IOCB_CLOSE_EXTENDED_CN:
1535 case CMD_IOCB_ABORT_EXTENDED_CN:
1536 case CMD_IOCB_RET_HBQE64_CN:
1537 case CMD_IOCB_FCP_IBIDIR64_CR:
1538 case CMD_IOCB_FCP_IBIDIR64_CX:
1539 case CMD_IOCB_FCP_ITASKMGT64_CX:
1540 case CMD_IOCB_LOGENTRY_CN:
1541 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1542 printk("%s - Unhandled SLI-3 Command x%x\n",
1543 __func__, iocb_cmnd);
1544 type = LPFC_UNKNOWN_IOCB;
1547 type = LPFC_UNKNOWN_IOCB;
1555 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1556 * @phba: Pointer to HBA context object.
1558 * This function is called from SLI initialization code
1559 * to configure every ring of the HBA's SLI interface. The
1560 * caller is not required to hold any lock. This function issues
1561 * a config_ring mailbox command for each ring.
1562 * This function returns zero if successful else returns a negative
1566 lpfc_sli_ring_map(struct lpfc_hba *phba)
1568 struct lpfc_sli *psli = &phba->sli;
1573 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1577 phba->link_state = LPFC_INIT_MBX_CMDS;
1578 for (i = 0; i < psli->num_rings; i++) {
1579 lpfc_config_ring(phba, i, pmb);
1580 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1581 if (rc != MBX_SUCCESS) {
1582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1583 "0446 Adapter failed to init (%d), "
1584 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1586 rc, pmbox->mbxCommand,
1587 pmbox->mbxStatus, i);
1588 phba->link_state = LPFC_HBA_ERROR;
1593 mempool_free(pmb, phba->mbox_mem_pool);
1598 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1599 * @phba: Pointer to HBA context object.
1600 * @pring: Pointer to driver SLI ring object.
1601 * @piocb: Pointer to the driver iocb object.
1603 * The driver calls this function with the hbalock held for SLI3 ports or
1604 * the ring lock held for SLI4 ports. The function adds the
1605 * new iocb to txcmplq of the given ring. This function always returns
1606 * 0. If this function is called for ELS ring, this function checks if
1607 * there is a vport associated with the ELS command. This function also
1608 * starts els_tmofunc timer if this is an ELS command.
1611 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1612 struct lpfc_iocbq *piocb)
1614 if (phba->sli_rev == LPFC_SLI_REV4)
1615 lockdep_assert_held(&pring->ring_lock);
1617 lockdep_assert_held(&phba->hbalock);
1621 list_add_tail(&piocb->list, &pring->txcmplq);
1622 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1623 pring->txcmplq_cnt++;
1625 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1626 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1627 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1628 BUG_ON(!piocb->vport);
1629 if (!(piocb->vport->load_flag & FC_UNLOADING))
1630 mod_timer(&piocb->vport->els_tmofunc,
1632 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1639 * lpfc_sli_ringtx_get - Get first element of the txq
1640 * @phba: Pointer to HBA context object.
1641 * @pring: Pointer to driver SLI ring object.
1643 * This function is called with hbalock held to get next
1644 * iocb in txq of the given ring. If there is any iocb in
1645 * the txq, the function returns first iocb in the list after
1646 * removing the iocb from the list, else it returns NULL.
1649 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1651 struct lpfc_iocbq *cmd_iocb;
1653 lockdep_assert_held(&phba->hbalock);
1655 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1660 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1661 * @phba: Pointer to HBA context object.
1662 * @pring: Pointer to driver SLI ring object.
1664 * This function is called with hbalock held and the caller must post the
1665 * iocb without releasing the lock. If the caller releases the lock,
1666 * iocb slot returned by the function is not guaranteed to be available.
1667 * The function returns pointer to the next available iocb slot if there
1668 * is available slot in the ring, else it returns NULL.
1669 * If the get index of the ring is ahead of the put index, the function
1670 * will post an error attention event to the worker thread to take the
1671 * HBA to offline state.
1674 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1676 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1677 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1679 lockdep_assert_held(&phba->hbalock);
1681 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1682 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1683 pring->sli.sli3.next_cmdidx = 0;
1685 if (unlikely(pring->sli.sli3.local_getidx ==
1686 pring->sli.sli3.next_cmdidx)) {
1688 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1690 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1691 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1692 "0315 Ring %d issue: portCmdGet %d "
1693 "is bigger than cmd ring %d\n",
1695 pring->sli.sli3.local_getidx,
1698 phba->link_state = LPFC_HBA_ERROR;
1700 * All error attention handlers are posted to
1703 phba->work_ha |= HA_ERATT;
1704 phba->work_hs = HS_FFER3;
1706 lpfc_worker_wake_up(phba);
1711 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1715 return lpfc_cmd_iocb(phba, pring);
1719 * lpfc_sli_next_iotag - Get an iotag for the iocb
1720 * @phba: Pointer to HBA context object.
1721 * @iocbq: Pointer to driver iocb object.
1723 * This function gets an iotag for the iocb. If there is no unused iotag and
1724 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1725 * array and assigns a new iotag.
1726 * The function returns the allocated iotag if successful, else returns zero.
1727 * Zero is not a valid iotag.
1728 * The caller is not required to hold any lock.
1731 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1733 struct lpfc_iocbq **new_arr;
1734 struct lpfc_iocbq **old_arr;
1736 struct lpfc_sli *psli = &phba->sli;
1739 spin_lock_irq(&phba->hbalock);
1740 iotag = psli->last_iotag;
1741 if(++iotag < psli->iocbq_lookup_len) {
1742 psli->last_iotag = iotag;
1743 psli->iocbq_lookup[iotag] = iocbq;
1744 spin_unlock_irq(&phba->hbalock);
1745 iocbq->iotag = iotag;
1747 } else if (psli->iocbq_lookup_len < (0xffff
1748 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1749 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1750 spin_unlock_irq(&phba->hbalock);
1751 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1754 spin_lock_irq(&phba->hbalock);
1755 old_arr = psli->iocbq_lookup;
1756 if (new_len <= psli->iocbq_lookup_len) {
1757 /* highly unprobable case */
1759 iotag = psli->last_iotag;
1760 if(++iotag < psli->iocbq_lookup_len) {
1761 psli->last_iotag = iotag;
1762 psli->iocbq_lookup[iotag] = iocbq;
1763 spin_unlock_irq(&phba->hbalock);
1764 iocbq->iotag = iotag;
1767 spin_unlock_irq(&phba->hbalock);
1770 if (psli->iocbq_lookup)
1771 memcpy(new_arr, old_arr,
1772 ((psli->last_iotag + 1) *
1773 sizeof (struct lpfc_iocbq *)));
1774 psli->iocbq_lookup = new_arr;
1775 psli->iocbq_lookup_len = new_len;
1776 psli->last_iotag = iotag;
1777 psli->iocbq_lookup[iotag] = iocbq;
1778 spin_unlock_irq(&phba->hbalock);
1779 iocbq->iotag = iotag;
1784 spin_unlock_irq(&phba->hbalock);
1786 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1787 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1794 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1795 * @phba: Pointer to HBA context object.
1796 * @pring: Pointer to driver SLI ring object.
1797 * @iocb: Pointer to iocb slot in the ring.
1798 * @nextiocb: Pointer to driver iocb object which need to be
1799 * posted to firmware.
1801 * This function is called with hbalock held to post a new iocb to
1802 * the firmware. This function copies the new iocb to ring iocb slot and
1803 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1804 * a completion call back for this iocb else the function will free the
1808 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1809 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1811 lockdep_assert_held(&phba->hbalock);
1815 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1818 if (pring->ringno == LPFC_ELS_RING) {
1819 lpfc_debugfs_slow_ring_trc(phba,
1820 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1821 *(((uint32_t *) &nextiocb->iocb) + 4),
1822 *(((uint32_t *) &nextiocb->iocb) + 6),
1823 *(((uint32_t *) &nextiocb->iocb) + 7));
1827 * Issue iocb command to adapter
1829 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1831 pring->stats.iocb_cmd++;
1834 * If there is no completion routine to call, we can release the
1835 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1836 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1838 if (nextiocb->iocb_cmpl)
1839 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1841 __lpfc_sli_release_iocbq(phba, nextiocb);
1844 * Let the HBA know what IOCB slot will be the next one the
1845 * driver will put a command into.
1847 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1848 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1852 * lpfc_sli_update_full_ring - Update the chip attention register
1853 * @phba: Pointer to HBA context object.
1854 * @pring: Pointer to driver SLI ring object.
1856 * The caller is not required to hold any lock for calling this function.
1857 * This function updates the chip attention bits for the ring to inform firmware
1858 * that there are pending work to be done for this ring and requests an
1859 * interrupt when there is space available in the ring. This function is
1860 * called when the driver is unable to post more iocbs to the ring due
1861 * to unavailability of space in the ring.
1864 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1866 int ringno = pring->ringno;
1868 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1873 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1874 * The HBA will tell us when an IOCB entry is available.
1876 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1877 readl(phba->CAregaddr); /* flush */
1879 pring->stats.iocb_cmd_full++;
1883 * lpfc_sli_update_ring - Update chip attention register
1884 * @phba: Pointer to HBA context object.
1885 * @pring: Pointer to driver SLI ring object.
1887 * This function updates the chip attention register bit for the
1888 * given ring to inform HBA that there is more work to be done
1889 * in this ring. The caller is not required to hold any lock.
1892 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1894 int ringno = pring->ringno;
1897 * Tell the HBA that there is work to do in this ring.
1899 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1901 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1902 readl(phba->CAregaddr); /* flush */
1907 * lpfc_sli_resume_iocb - Process iocbs in the txq
1908 * @phba: Pointer to HBA context object.
1909 * @pring: Pointer to driver SLI ring object.
1911 * This function is called with hbalock held to post pending iocbs
1912 * in the txq to the firmware. This function is called when driver
1913 * detects space available in the ring.
1916 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1919 struct lpfc_iocbq *nextiocb;
1921 lockdep_assert_held(&phba->hbalock);
1925 * (a) there is anything on the txq to send
1927 * (c) link attention events can be processed (fcp ring only)
1928 * (d) IOCB processing is not blocked by the outstanding mbox command.
1931 if (lpfc_is_link_up(phba) &&
1932 (!list_empty(&pring->txq)) &&
1933 (pring->ringno != LPFC_FCP_RING ||
1934 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1936 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1937 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1938 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1941 lpfc_sli_update_ring(phba, pring);
1943 lpfc_sli_update_full_ring(phba, pring);
1950 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1951 * @phba: Pointer to HBA context object.
1952 * @hbqno: HBQ number.
1954 * This function is called with hbalock held to get the next
1955 * available slot for the given HBQ. If there is free slot
1956 * available for the HBQ it will return pointer to the next available
1957 * HBQ entry else it will return NULL.
1959 static struct lpfc_hbq_entry *
1960 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1962 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1964 lockdep_assert_held(&phba->hbalock);
1966 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1967 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1968 hbqp->next_hbqPutIdx = 0;
1970 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1971 uint32_t raw_index = phba->hbq_get[hbqno];
1972 uint32_t getidx = le32_to_cpu(raw_index);
1974 hbqp->local_hbqGetIdx = getidx;
1976 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1977 lpfc_printf_log(phba, KERN_ERR,
1978 LOG_SLI | LOG_VPORT,
1979 "1802 HBQ %d: local_hbqGetIdx "
1980 "%u is > than hbqp->entry_count %u\n",
1981 hbqno, hbqp->local_hbqGetIdx,
1984 phba->link_state = LPFC_HBA_ERROR;
1988 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1992 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1997 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1998 * @phba: Pointer to HBA context object.
2000 * This function is called with no lock held to free all the
2001 * hbq buffers while uninitializing the SLI interface. It also
2002 * frees the HBQ buffers returned by the firmware but not yet
2003 * processed by the upper layers.
2006 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2008 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2009 struct hbq_dmabuf *hbq_buf;
2010 unsigned long flags;
2013 hbq_count = lpfc_sli_hbq_count();
2014 /* Return all memory used by all HBQs */
2015 spin_lock_irqsave(&phba->hbalock, flags);
2016 for (i = 0; i < hbq_count; ++i) {
2017 list_for_each_entry_safe(dmabuf, next_dmabuf,
2018 &phba->hbqs[i].hbq_buffer_list, list) {
2019 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2020 list_del(&hbq_buf->dbuf.list);
2021 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2023 phba->hbqs[i].buffer_count = 0;
2026 /* Mark the HBQs not in use */
2027 phba->hbq_in_use = 0;
2028 spin_unlock_irqrestore(&phba->hbalock, flags);
2032 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2033 * @phba: Pointer to HBA context object.
2034 * @hbqno: HBQ number.
2035 * @hbq_buf: Pointer to HBQ buffer.
2037 * This function is called with the hbalock held to post a
2038 * hbq buffer to the firmware. If the function finds an empty
2039 * slot in the HBQ, it will post the buffer. The function will return
2040 * pointer to the hbq entry if it successfully post the buffer
2041 * else it will return NULL.
2044 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2045 struct hbq_dmabuf *hbq_buf)
2047 lockdep_assert_held(&phba->hbalock);
2048 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2052 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2053 * @phba: Pointer to HBA context object.
2054 * @hbqno: HBQ number.
2055 * @hbq_buf: Pointer to HBQ buffer.
2057 * This function is called with the hbalock held to post a hbq buffer to the
2058 * firmware. If the function finds an empty slot in the HBQ, it will post the
2059 * buffer and place it on the hbq_buffer_list. The function will return zero if
2060 * it successfully post the buffer else it will return an error.
2063 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2064 struct hbq_dmabuf *hbq_buf)
2066 struct lpfc_hbq_entry *hbqe;
2067 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2069 lockdep_assert_held(&phba->hbalock);
2070 /* Get next HBQ entry slot to use */
2071 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2073 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2075 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2076 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2077 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2078 hbqe->bde.tus.f.bdeFlags = 0;
2079 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2080 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2082 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2083 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2085 readl(phba->hbq_put + hbqno);
2086 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2093 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2094 * @phba: Pointer to HBA context object.
2095 * @hbqno: HBQ number.
2096 * @hbq_buf: Pointer to HBQ buffer.
2098 * This function is called with the hbalock held to post an RQE to the SLI4
2099 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2100 * the hbq_buffer_list and return zero, otherwise it will return an error.
2103 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2104 struct hbq_dmabuf *hbq_buf)
2107 struct lpfc_rqe hrqe;
2108 struct lpfc_rqe drqe;
2109 struct lpfc_queue *hrq;
2110 struct lpfc_queue *drq;
2112 if (hbqno != LPFC_ELS_HBQ)
2114 hrq = phba->sli4_hba.hdr_rq;
2115 drq = phba->sli4_hba.dat_rq;
2117 lockdep_assert_held(&phba->hbalock);
2118 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2119 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2120 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2121 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2122 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2125 hbq_buf->tag = (rc | (hbqno << 16));
2126 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2130 /* HBQ for ELS and CT traffic. */
2131 static struct lpfc_hbq_init lpfc_els_hbq = {
2136 .ring_mask = (1 << LPFC_ELS_RING),
2143 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2148 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2149 * @phba: Pointer to HBA context object.
2150 * @hbqno: HBQ number.
2151 * @count: Number of HBQ buffers to be posted.
2153 * This function is called with no lock held to post more hbq buffers to the
2154 * given HBQ. The function returns the number of HBQ buffers successfully
2158 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2160 uint32_t i, posted = 0;
2161 unsigned long flags;
2162 struct hbq_dmabuf *hbq_buffer;
2163 LIST_HEAD(hbq_buf_list);
2164 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2167 if ((phba->hbqs[hbqno].buffer_count + count) >
2168 lpfc_hbq_defs[hbqno]->entry_count)
2169 count = lpfc_hbq_defs[hbqno]->entry_count -
2170 phba->hbqs[hbqno].buffer_count;
2173 /* Allocate HBQ entries */
2174 for (i = 0; i < count; i++) {
2175 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2178 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2180 /* Check whether HBQ is still in use */
2181 spin_lock_irqsave(&phba->hbalock, flags);
2182 if (!phba->hbq_in_use)
2184 while (!list_empty(&hbq_buf_list)) {
2185 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2187 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2189 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2190 phba->hbqs[hbqno].buffer_count++;
2193 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2195 spin_unlock_irqrestore(&phba->hbalock, flags);
2198 spin_unlock_irqrestore(&phba->hbalock, flags);
2199 while (!list_empty(&hbq_buf_list)) {
2200 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2202 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2208 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2209 * @phba: Pointer to HBA context object.
2212 * This function posts more buffers to the HBQ. This function
2213 * is called with no lock held. The function returns the number of HBQ entries
2214 * successfully allocated.
2217 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2219 if (phba->sli_rev == LPFC_SLI_REV4)
2222 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2223 lpfc_hbq_defs[qno]->add_count);
2227 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2228 * @phba: Pointer to HBA context object.
2229 * @qno: HBQ queue number.
2231 * This function is called from SLI initialization code path with
2232 * no lock held to post initial HBQ buffers to firmware. The
2233 * function returns the number of HBQ entries successfully allocated.
2236 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2238 if (phba->sli_rev == LPFC_SLI_REV4)
2239 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2240 lpfc_hbq_defs[qno]->entry_count);
2242 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2243 lpfc_hbq_defs[qno]->init_count);
2247 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2248 * @phba: Pointer to HBA context object.
2249 * @hbqno: HBQ number.
2251 * This function removes the first hbq buffer on an hbq list and returns a
2252 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2254 static struct hbq_dmabuf *
2255 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2257 struct lpfc_dmabuf *d_buf;
2259 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2262 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2266 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2267 * @phba: Pointer to HBA context object.
2268 * @hbqno: HBQ number.
2270 * This function removes the first RQ buffer on an RQ buffer list and returns a
2271 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2273 static struct rqb_dmabuf *
2274 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2276 struct lpfc_dmabuf *h_buf;
2277 struct lpfc_rqb *rqbp;
2280 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2281 struct lpfc_dmabuf, list);
2284 rqbp->buffer_count--;
2285 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2289 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2290 * @phba: Pointer to HBA context object.
2291 * @tag: Tag of the hbq buffer.
2293 * This function searches for the hbq buffer associated with the given tag in
2294 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2295 * otherwise it returns NULL.
2297 static struct hbq_dmabuf *
2298 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2300 struct lpfc_dmabuf *d_buf;
2301 struct hbq_dmabuf *hbq_buf;
2305 if (hbqno >= LPFC_MAX_HBQS)
2308 spin_lock_irq(&phba->hbalock);
2309 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2310 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2311 if (hbq_buf->tag == tag) {
2312 spin_unlock_irq(&phba->hbalock);
2316 spin_unlock_irq(&phba->hbalock);
2317 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2318 "1803 Bad hbq tag. Data: x%x x%x\n",
2319 tag, phba->hbqs[tag >> 16].buffer_count);
2324 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2325 * @phba: Pointer to HBA context object.
2326 * @hbq_buffer: Pointer to HBQ buffer.
2328 * This function is called with hbalock. This function gives back
2329 * the hbq buffer to firmware. If the HBQ does not have space to
2330 * post the buffer, it will free the buffer.
2333 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2338 hbqno = hbq_buffer->tag >> 16;
2339 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2340 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2345 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2346 * @mbxCommand: mailbox command code.
2348 * This function is called by the mailbox event handler function to verify
2349 * that the completed mailbox command is a legitimate mailbox command. If the
2350 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2351 * and the mailbox event handler will take the HBA offline.
2354 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2358 switch (mbxCommand) {
2362 case MBX_WRITE_VPARMS:
2363 case MBX_RUN_BIU_DIAG:
2366 case MBX_CONFIG_LINK:
2367 case MBX_CONFIG_RING:
2368 case MBX_RESET_RING:
2369 case MBX_READ_CONFIG:
2370 case MBX_READ_RCONFIG:
2371 case MBX_READ_SPARM:
2372 case MBX_READ_STATUS:
2376 case MBX_READ_LNK_STAT:
2378 case MBX_UNREG_LOGIN:
2380 case MBX_DUMP_MEMORY:
2381 case MBX_DUMP_CONTEXT:
2384 case MBX_UPDATE_CFG:
2386 case MBX_DEL_LD_ENTRY:
2387 case MBX_RUN_PROGRAM:
2389 case MBX_SET_VARIABLE:
2390 case MBX_UNREG_D_ID:
2391 case MBX_KILL_BOARD:
2392 case MBX_CONFIG_FARP:
2395 case MBX_RUN_BIU_DIAG64:
2396 case MBX_CONFIG_PORT:
2397 case MBX_READ_SPARM64:
2398 case MBX_READ_RPI64:
2399 case MBX_REG_LOGIN64:
2400 case MBX_READ_TOPOLOGY:
2403 case MBX_LOAD_EXP_ROM:
2404 case MBX_ASYNCEVT_ENABLE:
2408 case MBX_PORT_CAPABILITIES:
2409 case MBX_PORT_IOV_CONTROL:
2410 case MBX_SLI4_CONFIG:
2411 case MBX_SLI4_REQ_FTRS:
2413 case MBX_UNREG_FCFI:
2418 case MBX_RESUME_RPI:
2419 case MBX_READ_EVENT_LOG_STATUS:
2420 case MBX_READ_EVENT_LOG:
2421 case MBX_SECURITY_MGMT:
2423 case MBX_ACCESS_VDATA:
2434 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2435 * @phba: Pointer to HBA context object.
2436 * @pmboxq: Pointer to mailbox command.
2438 * This is completion handler function for mailbox commands issued from
2439 * lpfc_sli_issue_mbox_wait function. This function is called by the
2440 * mailbox event handler function with no lock held. This function
2441 * will wake up thread waiting on the wait queue pointed by context1
2445 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2447 unsigned long drvr_flag;
2448 struct completion *pmbox_done;
2451 * If pmbox_done is empty, the driver thread gave up waiting and
2452 * continued running.
2454 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2455 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2456 pmbox_done = (struct completion *)pmboxq->context3;
2458 complete(pmbox_done);
2459 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2464 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2466 unsigned long iflags;
2468 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2469 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2470 spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
2471 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2472 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2473 spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
2475 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2479 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2480 * @phba: Pointer to HBA context object.
2481 * @pmb: Pointer to mailbox object.
2483 * This function is the default mailbox completion handler. It
2484 * frees the memory resources associated with the completed mailbox
2485 * command. If the completed command is a REG_LOGIN mailbox command,
2486 * this function will issue a UREG_LOGIN to re-claim the RPI.
2489 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2491 struct lpfc_vport *vport = pmb->vport;
2492 struct lpfc_dmabuf *mp;
2493 struct lpfc_nodelist *ndlp;
2494 struct Scsi_Host *shost;
2498 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2501 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2506 * If a REG_LOGIN succeeded after node is destroyed or node
2507 * is in re-discovery driver need to cleanup the RPI.
2509 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2510 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2511 !pmb->u.mb.mbxStatus) {
2512 rpi = pmb->u.mb.un.varWords[0];
2513 vpi = pmb->u.mb.un.varRegLogin.vpi;
2514 if (phba->sli_rev == LPFC_SLI_REV4)
2515 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2516 lpfc_unreg_login(phba, vpi, rpi, pmb);
2518 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2519 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2520 if (rc != MBX_NOT_FINISHED)
2524 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2525 !(phba->pport->load_flag & FC_UNLOADING) &&
2526 !pmb->u.mb.mbxStatus) {
2527 shost = lpfc_shost_from_vport(vport);
2528 spin_lock_irq(shost->host_lock);
2529 vport->vpi_state |= LPFC_VPI_REGISTERED;
2530 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2531 spin_unlock_irq(shost->host_lock);
2534 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2535 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2537 pmb->ctx_buf = NULL;
2538 pmb->ctx_ndlp = NULL;
2541 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2542 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2544 /* Check to see if there are any deferred events to process */
2548 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2549 "1438 UNREG cmpl deferred mbox x%x "
2550 "on NPort x%x Data: x%x x%x %px\n",
2551 ndlp->nlp_rpi, ndlp->nlp_DID,
2552 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2554 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2555 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2556 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2557 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2558 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2560 __lpfc_sli_rpi_release(vport, ndlp);
2562 if (vport->load_flag & FC_UNLOADING)
2564 pmb->ctx_ndlp = NULL;
2568 /* Check security permission status on INIT_LINK mailbox command */
2569 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2570 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2571 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2572 "2860 SLI authentication is required "
2573 "for INIT_LINK but has not done yet\n");
2575 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2576 lpfc_sli4_mbox_cmd_free(phba, pmb);
2578 mempool_free(pmb, phba->mbox_mem_pool);
2581 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2582 * @phba: Pointer to HBA context object.
2583 * @pmb: Pointer to mailbox object.
2585 * This function is the unreg rpi mailbox completion handler. It
2586 * frees the memory resources associated with the completed mailbox
2587 * command. An additional refrenece is put on the ndlp to prevent
2588 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2589 * the unreg mailbox command completes, this routine puts the
2594 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2596 struct lpfc_vport *vport = pmb->vport;
2597 struct lpfc_nodelist *ndlp;
2599 ndlp = pmb->ctx_ndlp;
2600 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2601 if (phba->sli_rev == LPFC_SLI_REV4 &&
2602 (bf_get(lpfc_sli_intf_if_type,
2603 &phba->sli4_hba.sli_intf) >=
2604 LPFC_SLI_INTF_IF_TYPE_2)) {
2607 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2608 "0010 UNREG_LOGIN vpi:%x "
2609 "rpi:%x DID:%x defer x%x flg x%x "
2611 vport->vpi, ndlp->nlp_rpi,
2612 ndlp->nlp_DID, ndlp->nlp_defer_did,
2614 ndlp->nlp_usg_map, ndlp);
2615 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2618 /* Check to see if there are any deferred
2621 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2622 (ndlp->nlp_defer_did !=
2623 NLP_EVT_NOTHING_PENDING)) {
2625 vport, KERN_INFO, LOG_DISCOVERY,
2626 "4111 UNREG cmpl deferred "
2628 "NPort x%x Data: x%x x%px\n",
2629 ndlp->nlp_rpi, ndlp->nlp_DID,
2630 ndlp->nlp_defer_did, ndlp);
2631 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2632 ndlp->nlp_defer_did =
2633 NLP_EVT_NOTHING_PENDING;
2634 lpfc_issue_els_plogi(
2635 vport, ndlp->nlp_DID, 0);
2637 __lpfc_sli_rpi_release(vport, ndlp);
2643 mempool_free(pmb, phba->mbox_mem_pool);
2647 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2648 * @phba: Pointer to HBA context object.
2650 * This function is called with no lock held. This function processes all
2651 * the completed mailbox commands and gives it to upper layers. The interrupt
2652 * service routine processes mailbox completion interrupt and adds completed
2653 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2654 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2655 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2656 * function returns the mailbox commands to the upper layer by calling the
2657 * completion handler function of each mailbox.
2660 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2667 phba->sli.slistat.mbox_event++;
2669 /* Get all completed mailboxe buffers into the cmplq */
2670 spin_lock_irq(&phba->hbalock);
2671 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2672 spin_unlock_irq(&phba->hbalock);
2674 /* Get a Mailbox buffer to setup mailbox commands for callback */
2676 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2682 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2684 lpfc_debugfs_disc_trc(pmb->vport,
2685 LPFC_DISC_TRC_MBOX_VPORT,
2686 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2687 (uint32_t)pmbox->mbxCommand,
2688 pmbox->un.varWords[0],
2689 pmbox->un.varWords[1]);
2692 lpfc_debugfs_disc_trc(phba->pport,
2694 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2695 (uint32_t)pmbox->mbxCommand,
2696 pmbox->un.varWords[0],
2697 pmbox->un.varWords[1]);
2702 * It is a fatal error if unknown mbox command completion.
2704 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2706 /* Unknown mailbox command compl */
2707 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2708 "(%d):0323 Unknown Mailbox command "
2709 "x%x (x%x/x%x) Cmpl\n",
2710 pmb->vport ? pmb->vport->vpi :
2713 lpfc_sli_config_mbox_subsys_get(phba,
2715 lpfc_sli_config_mbox_opcode_get(phba,
2717 phba->link_state = LPFC_HBA_ERROR;
2718 phba->work_hs = HS_FFER3;
2719 lpfc_handle_eratt(phba);
2723 if (pmbox->mbxStatus) {
2724 phba->sli.slistat.mbox_stat_err++;
2725 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2726 /* Mbox cmd cmpl error - RETRYing */
2727 lpfc_printf_log(phba, KERN_INFO,
2729 "(%d):0305 Mbox cmd cmpl "
2730 "error - RETRYing Data: x%x "
2731 "(x%x/x%x) x%x x%x x%x\n",
2732 pmb->vport ? pmb->vport->vpi :
2735 lpfc_sli_config_mbox_subsys_get(phba,
2737 lpfc_sli_config_mbox_opcode_get(phba,
2740 pmbox->un.varWords[0],
2741 pmb->vport ? pmb->vport->port_state :
2742 LPFC_VPORT_UNKNOWN);
2743 pmbox->mbxStatus = 0;
2744 pmbox->mbxOwner = OWN_HOST;
2745 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2746 if (rc != MBX_NOT_FINISHED)
2751 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2752 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2753 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
2754 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2756 pmb->vport ? pmb->vport->vpi : 0,
2758 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2759 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2761 *((uint32_t *) pmbox),
2762 pmbox->un.varWords[0],
2763 pmbox->un.varWords[1],
2764 pmbox->un.varWords[2],
2765 pmbox->un.varWords[3],
2766 pmbox->un.varWords[4],
2767 pmbox->un.varWords[5],
2768 pmbox->un.varWords[6],
2769 pmbox->un.varWords[7],
2770 pmbox->un.varWords[8],
2771 pmbox->un.varWords[9],
2772 pmbox->un.varWords[10]);
2775 pmb->mbox_cmpl(phba,pmb);
2781 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2782 * @phba: Pointer to HBA context object.
2783 * @pring: Pointer to driver SLI ring object.
2786 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2787 * is set in the tag the buffer is posted for a particular exchange,
2788 * the function will return the buffer without replacing the buffer.
2789 * If the buffer is for unsolicited ELS or CT traffic, this function
2790 * returns the buffer and also posts another buffer to the firmware.
2792 static struct lpfc_dmabuf *
2793 lpfc_sli_get_buff(struct lpfc_hba *phba,
2794 struct lpfc_sli_ring *pring,
2797 struct hbq_dmabuf *hbq_entry;
2799 if (tag & QUE_BUFTAG_BIT)
2800 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2801 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2804 return &hbq_entry->dbuf;
2808 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2809 * @phba: Pointer to HBA context object.
2810 * @pring: Pointer to driver SLI ring object.
2811 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2812 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2813 * @fch_type: the type for the first frame of the sequence.
2815 * This function is called with no lock held. This function uses the r_ctl and
2816 * type of the received sequence to find the correct callback function to call
2817 * to process the sequence.
2820 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2821 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2828 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
2834 /* unSolicited Responses */
2835 if (pring->prt[0].profile) {
2836 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2837 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2841 /* We must search, based on rctl / type
2842 for the right routine */
2843 for (i = 0; i < pring->num_mask; i++) {
2844 if ((pring->prt[i].rctl == fch_r_ctl) &&
2845 (pring->prt[i].type == fch_type)) {
2846 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2847 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2848 (phba, pring, saveq);
2856 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2857 * @phba: Pointer to HBA context object.
2858 * @pring: Pointer to driver SLI ring object.
2859 * @saveq: Pointer to the unsolicited iocb.
2861 * This function is called with no lock held by the ring event handler
2862 * when there is an unsolicited iocb posted to the response ring by the
2863 * firmware. This function gets the buffer associated with the iocbs
2864 * and calls the event handler for the ring. This function handles both
2865 * qring buffers and hbq buffers.
2866 * When the function returns 1 the caller can free the iocb object otherwise
2867 * upper layer functions will free the iocb objects.
2870 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2871 struct lpfc_iocbq *saveq)
2875 uint32_t Rctl, Type;
2876 struct lpfc_iocbq *iocbq;
2877 struct lpfc_dmabuf *dmzbuf;
2879 irsp = &(saveq->iocb);
2881 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2882 if (pring->lpfc_sli_rcv_async_status)
2883 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2885 lpfc_printf_log(phba,
2888 "0316 Ring %d handler: unexpected "
2889 "ASYNC_STATUS iocb received evt_code "
2892 irsp->un.asyncstat.evt_code);
2896 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2897 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2898 if (irsp->ulpBdeCount > 0) {
2899 dmzbuf = lpfc_sli_get_buff(phba, pring,
2900 irsp->un.ulpWord[3]);
2901 lpfc_in_buf_free(phba, dmzbuf);
2904 if (irsp->ulpBdeCount > 1) {
2905 dmzbuf = lpfc_sli_get_buff(phba, pring,
2906 irsp->unsli3.sli3Words[3]);
2907 lpfc_in_buf_free(phba, dmzbuf);
2910 if (irsp->ulpBdeCount > 2) {
2911 dmzbuf = lpfc_sli_get_buff(phba, pring,
2912 irsp->unsli3.sli3Words[7]);
2913 lpfc_in_buf_free(phba, dmzbuf);
2919 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2920 if (irsp->ulpBdeCount != 0) {
2921 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2922 irsp->un.ulpWord[3]);
2923 if (!saveq->context2)
2924 lpfc_printf_log(phba,
2927 "0341 Ring %d Cannot find buffer for "
2928 "an unsolicited iocb. tag 0x%x\n",
2930 irsp->un.ulpWord[3]);
2932 if (irsp->ulpBdeCount == 2) {
2933 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2934 irsp->unsli3.sli3Words[7]);
2935 if (!saveq->context3)
2936 lpfc_printf_log(phba,
2939 "0342 Ring %d Cannot find buffer for an"
2940 " unsolicited iocb. tag 0x%x\n",
2942 irsp->unsli3.sli3Words[7]);
2944 list_for_each_entry(iocbq, &saveq->list, list) {
2945 irsp = &(iocbq->iocb);
2946 if (irsp->ulpBdeCount != 0) {
2947 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2948 irsp->un.ulpWord[3]);
2949 if (!iocbq->context2)
2950 lpfc_printf_log(phba,
2953 "0343 Ring %d Cannot find "
2954 "buffer for an unsolicited iocb"
2955 ". tag 0x%x\n", pring->ringno,
2956 irsp->un.ulpWord[3]);
2958 if (irsp->ulpBdeCount == 2) {
2959 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2960 irsp->unsli3.sli3Words[7]);
2961 if (!iocbq->context3)
2962 lpfc_printf_log(phba,
2965 "0344 Ring %d Cannot find "
2966 "buffer for an unsolicited "
2969 irsp->unsli3.sli3Words[7]);
2973 if (irsp->ulpBdeCount != 0 &&
2974 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2975 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2978 /* search continue save q for same XRI */
2979 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2980 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2981 saveq->iocb.unsli3.rcvsli3.ox_id) {
2982 list_add_tail(&saveq->list, &iocbq->list);
2988 list_add_tail(&saveq->clist,
2989 &pring->iocb_continue_saveq);
2990 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2991 list_del_init(&iocbq->clist);
2993 irsp = &(saveq->iocb);
2997 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2998 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2999 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3000 Rctl = FC_RCTL_ELS_REQ;
3003 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3004 Rctl = w5p->hcsw.Rctl;
3005 Type = w5p->hcsw.Type;
3007 /* Firmware Workaround */
3008 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3009 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3010 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3011 Rctl = FC_RCTL_ELS_REQ;
3013 w5p->hcsw.Rctl = Rctl;
3014 w5p->hcsw.Type = Type;
3018 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3019 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3020 "0313 Ring %d handler: unexpected Rctl x%x "
3021 "Type x%x received\n",
3022 pring->ringno, Rctl, Type);
3028 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3029 * @phba: Pointer to HBA context object.
3030 * @pring: Pointer to driver SLI ring object.
3031 * @prspiocb: Pointer to response iocb object.
3033 * This function looks up the iocb_lookup table to get the command iocb
3034 * corresponding to the given response iocb using the iotag of the
3035 * response iocb. The driver calls this function with the hbalock held
3036 * for SLI3 ports or the ring lock held for SLI4 ports.
3037 * This function returns the command iocb object if it finds the command
3038 * iocb else returns NULL.
3040 static struct lpfc_iocbq *
3041 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3042 struct lpfc_sli_ring *pring,
3043 struct lpfc_iocbq *prspiocb)
3045 struct lpfc_iocbq *cmd_iocb = NULL;
3047 spinlock_t *temp_lock = NULL;
3048 unsigned long iflag = 0;
3050 if (phba->sli_rev == LPFC_SLI_REV4)
3051 temp_lock = &pring->ring_lock;
3053 temp_lock = &phba->hbalock;
3055 spin_lock_irqsave(temp_lock, iflag);
3056 iotag = prspiocb->iocb.ulpIoTag;
3058 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3059 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3060 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3061 /* remove from txcmpl queue list */
3062 list_del_init(&cmd_iocb->list);
3063 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3064 pring->txcmplq_cnt--;
3065 spin_unlock_irqrestore(temp_lock, iflag);
3070 spin_unlock_irqrestore(temp_lock, iflag);
3071 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3072 "0317 iotag x%x is out of "
3073 "range: max iotag x%x wd0 x%x\n",
3074 iotag, phba->sli.last_iotag,
3075 *(((uint32_t *) &prspiocb->iocb) + 7));
3080 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3081 * @phba: Pointer to HBA context object.
3082 * @pring: Pointer to driver SLI ring object.
3085 * This function looks up the iocb_lookup table to get the command iocb
3086 * corresponding to the given iotag. The driver calls this function with
3087 * the ring lock held because this function is an SLI4 port only helper.
3088 * This function returns the command iocb object if it finds the command
3089 * iocb else returns NULL.
3091 static struct lpfc_iocbq *
3092 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3093 struct lpfc_sli_ring *pring, uint16_t iotag)
3095 struct lpfc_iocbq *cmd_iocb = NULL;
3096 spinlock_t *temp_lock = NULL;
3097 unsigned long iflag = 0;
3099 if (phba->sli_rev == LPFC_SLI_REV4)
3100 temp_lock = &pring->ring_lock;
3102 temp_lock = &phba->hbalock;
3104 spin_lock_irqsave(temp_lock, iflag);
3105 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3106 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3107 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3108 /* remove from txcmpl queue list */
3109 list_del_init(&cmd_iocb->list);
3110 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3111 pring->txcmplq_cnt--;
3112 spin_unlock_irqrestore(temp_lock, iflag);
3117 spin_unlock_irqrestore(temp_lock, iflag);
3118 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3119 "0372 iotag x%x lookup error: max iotag (x%x) "
3121 iotag, phba->sli.last_iotag,
3122 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3127 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3128 * @phba: Pointer to HBA context object.
3129 * @pring: Pointer to driver SLI ring object.
3130 * @saveq: Pointer to the response iocb to be processed.
3132 * This function is called by the ring event handler for non-fcp
3133 * rings when there is a new response iocb in the response ring.
3134 * The caller is not required to hold any locks. This function
3135 * gets the command iocb associated with the response iocb and
3136 * calls the completion handler for the command iocb. If there
3137 * is no completion handler, the function will free the resources
3138 * associated with command iocb. If the response iocb is for
3139 * an already aborted command iocb, the status of the completion
3140 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3141 * This function always returns 1.
3144 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3145 struct lpfc_iocbq *saveq)
3147 struct lpfc_iocbq *cmdiocbp;
3149 unsigned long iflag;
3151 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3153 if (cmdiocbp->iocb_cmpl) {
3155 * If an ELS command failed send an event to mgmt
3158 if (saveq->iocb.ulpStatus &&
3159 (pring->ringno == LPFC_ELS_RING) &&
3160 (cmdiocbp->iocb.ulpCommand ==
3161 CMD_ELS_REQUEST64_CR))
3162 lpfc_send_els_failure_event(phba,
3166 * Post all ELS completions to the worker thread.
3167 * All other are passed to the completion callback.
3169 if (pring->ringno == LPFC_ELS_RING) {
3170 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3171 (cmdiocbp->iocb_flag &
3172 LPFC_DRIVER_ABORTED)) {
3173 spin_lock_irqsave(&phba->hbalock,
3175 cmdiocbp->iocb_flag &=
3176 ~LPFC_DRIVER_ABORTED;
3177 spin_unlock_irqrestore(&phba->hbalock,
3179 saveq->iocb.ulpStatus =
3180 IOSTAT_LOCAL_REJECT;
3181 saveq->iocb.un.ulpWord[4] =
3184 /* Firmware could still be in progress
3185 * of DMAing payload, so don't free data
3186 * buffer till after a hbeat.
3188 spin_lock_irqsave(&phba->hbalock,
3190 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3191 spin_unlock_irqrestore(&phba->hbalock,
3194 if (phba->sli_rev == LPFC_SLI_REV4) {
3195 if (saveq->iocb_flag &
3196 LPFC_EXCHANGE_BUSY) {
3197 /* Set cmdiocb flag for the
3198 * exchange busy so sgl (xri)
3199 * will not be released until
3200 * the abort xri is received
3204 &phba->hbalock, iflag);
3205 cmdiocbp->iocb_flag |=
3207 spin_unlock_irqrestore(
3208 &phba->hbalock, iflag);
3210 if (cmdiocbp->iocb_flag &
3211 LPFC_DRIVER_ABORTED) {
3213 * Clear LPFC_DRIVER_ABORTED
3214 * bit in case it was driver
3218 &phba->hbalock, iflag);
3219 cmdiocbp->iocb_flag &=
3220 ~LPFC_DRIVER_ABORTED;
3221 spin_unlock_irqrestore(
3222 &phba->hbalock, iflag);
3223 cmdiocbp->iocb.ulpStatus =
3224 IOSTAT_LOCAL_REJECT;
3225 cmdiocbp->iocb.un.ulpWord[4] =
3226 IOERR_ABORT_REQUESTED;
3228 * For SLI4, irsiocb contains
3229 * NO_XRI in sli_xritag, it
3230 * shall not affect releasing
3231 * sgl (xri) process.
3233 saveq->iocb.ulpStatus =
3234 IOSTAT_LOCAL_REJECT;
3235 saveq->iocb.un.ulpWord[4] =
3238 &phba->hbalock, iflag);
3240 LPFC_DELAY_MEM_FREE;
3241 spin_unlock_irqrestore(
3242 &phba->hbalock, iflag);
3246 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3248 lpfc_sli_release_iocbq(phba, cmdiocbp);
3251 * Unknown initiating command based on the response iotag.
3252 * This could be the case on the ELS ring because of
3255 if (pring->ringno != LPFC_ELS_RING) {
3257 * Ring <ringno> handler: unexpected completion IoTag
3260 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3261 "0322 Ring %d handler: "
3262 "unexpected completion IoTag x%x "
3263 "Data: x%x x%x x%x x%x\n",
3265 saveq->iocb.ulpIoTag,
3266 saveq->iocb.ulpStatus,
3267 saveq->iocb.un.ulpWord[4],
3268 saveq->iocb.ulpCommand,
3269 saveq->iocb.ulpContext);
3277 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3278 * @phba: Pointer to HBA context object.
3279 * @pring: Pointer to driver SLI ring object.
3281 * This function is called from the iocb ring event handlers when
3282 * put pointer is ahead of the get pointer for a ring. This function signal
3283 * an error attention condition to the worker thread and the worker
3284 * thread will transition the HBA to offline state.
3287 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3289 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3291 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3292 * rsp ring <portRspMax>
3294 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3295 "0312 Ring %d handler: portRspPut %d "
3296 "is bigger than rsp ring %d\n",
3297 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3298 pring->sli.sli3.numRiocb);
3300 phba->link_state = LPFC_HBA_ERROR;
3303 * All error attention handlers are posted to
3306 phba->work_ha |= HA_ERATT;
3307 phba->work_hs = HS_FFER3;
3309 lpfc_worker_wake_up(phba);
3315 * lpfc_poll_eratt - Error attention polling timer timeout handler
3316 * @ptr: Pointer to address of HBA context object.
3318 * This function is invoked by the Error Attention polling timer when the
3319 * timer times out. It will check the SLI Error Attention register for
3320 * possible attention events. If so, it will post an Error Attention event
3321 * and wake up worker thread to process it. Otherwise, it will set up the
3322 * Error Attention polling timer for the next poll.
3324 void lpfc_poll_eratt(struct timer_list *t)
3326 struct lpfc_hba *phba;
3328 uint64_t sli_intr, cnt;
3330 phba = from_timer(phba, t, eratt_poll);
3332 /* Here we will also keep track of interrupts per sec of the hba */
3333 sli_intr = phba->sli.slistat.sli_intr;
3335 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3336 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3339 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3341 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3342 do_div(cnt, phba->eratt_poll_interval);
3343 phba->sli.slistat.sli_ips = cnt;
3345 phba->sli.slistat.sli_prev_intr = sli_intr;
3347 /* Check chip HA register for error event */
3348 eratt = lpfc_sli_check_eratt(phba);
3351 /* Tell the worker thread there is work to do */
3352 lpfc_worker_wake_up(phba);
3354 /* Restart the timer for next eratt poll */
3355 mod_timer(&phba->eratt_poll,
3357 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3363 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3364 * @phba: Pointer to HBA context object.
3365 * @pring: Pointer to driver SLI ring object.
3366 * @mask: Host attention register mask for this ring.
3368 * This function is called from the interrupt context when there is a ring
3369 * event for the fcp ring. The caller does not hold any lock.
3370 * The function processes each response iocb in the response ring until it
3371 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3372 * LE bit set. The function will call the completion handler of the command iocb
3373 * if the response iocb indicates a completion for a command iocb or it is
3374 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3375 * function if this is an unsolicited iocb.
3376 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3377 * to check it explicitly.
3380 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3381 struct lpfc_sli_ring *pring, uint32_t mask)
3383 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3384 IOCB_t *irsp = NULL;
3385 IOCB_t *entry = NULL;
3386 struct lpfc_iocbq *cmdiocbq = NULL;
3387 struct lpfc_iocbq rspiocbq;
3389 uint32_t portRspPut, portRspMax;
3391 lpfc_iocb_type type;
3392 unsigned long iflag;
3393 uint32_t rsp_cmpl = 0;
3395 spin_lock_irqsave(&phba->hbalock, iflag);
3396 pring->stats.iocb_event++;
3399 * The next available response entry should never exceed the maximum
3400 * entries. If it does, treat it as an adapter hardware error.
3402 portRspMax = pring->sli.sli3.numRiocb;
3403 portRspPut = le32_to_cpu(pgp->rspPutInx);
3404 if (unlikely(portRspPut >= portRspMax)) {
3405 lpfc_sli_rsp_pointers_error(phba, pring);
3406 spin_unlock_irqrestore(&phba->hbalock, iflag);
3409 if (phba->fcp_ring_in_use) {
3410 spin_unlock_irqrestore(&phba->hbalock, iflag);
3413 phba->fcp_ring_in_use = 1;
3416 while (pring->sli.sli3.rspidx != portRspPut) {
3418 * Fetch an entry off the ring and copy it into a local data
3419 * structure. The copy involves a byte-swap since the
3420 * network byte order and pci byte orders are different.
3422 entry = lpfc_resp_iocb(phba, pring);
3423 phba->last_completion_time = jiffies;
3425 if (++pring->sli.sli3.rspidx >= portRspMax)
3426 pring->sli.sli3.rspidx = 0;
3428 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3429 (uint32_t *) &rspiocbq.iocb,
3430 phba->iocb_rsp_size);
3431 INIT_LIST_HEAD(&(rspiocbq.list));
3432 irsp = &rspiocbq.iocb;
3434 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3435 pring->stats.iocb_rsp++;
3438 if (unlikely(irsp->ulpStatus)) {
3440 * If resource errors reported from HBA, reduce
3441 * queuedepths of the SCSI device.
3443 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3444 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3445 IOERR_NO_RESOURCES)) {
3446 spin_unlock_irqrestore(&phba->hbalock, iflag);
3447 phba->lpfc_rampdown_queue_depth(phba);
3448 spin_lock_irqsave(&phba->hbalock, iflag);
3451 /* Rsp ring <ringno> error: IOCB */
3452 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3453 "0336 Rsp Ring %d error: IOCB Data: "
3454 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3456 irsp->un.ulpWord[0],
3457 irsp->un.ulpWord[1],
3458 irsp->un.ulpWord[2],
3459 irsp->un.ulpWord[3],
3460 irsp->un.ulpWord[4],
3461 irsp->un.ulpWord[5],
3462 *(uint32_t *)&irsp->un1,
3463 *((uint32_t *)&irsp->un1 + 1));
3467 case LPFC_ABORT_IOCB:
3470 * Idle exchange closed via ABTS from port. No iocb
3471 * resources need to be recovered.
3473 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3474 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3475 "0333 IOCB cmd 0x%x"
3476 " processed. Skipping"
3482 spin_unlock_irqrestore(&phba->hbalock, iflag);
3483 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3485 spin_lock_irqsave(&phba->hbalock, iflag);
3486 if (unlikely(!cmdiocbq))
3488 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3489 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3490 if (cmdiocbq->iocb_cmpl) {
3491 spin_unlock_irqrestore(&phba->hbalock, iflag);
3492 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3494 spin_lock_irqsave(&phba->hbalock, iflag);
3497 case LPFC_UNSOL_IOCB:
3498 spin_unlock_irqrestore(&phba->hbalock, iflag);
3499 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3500 spin_lock_irqsave(&phba->hbalock, iflag);
3503 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3504 char adaptermsg[LPFC_MAX_ADPTMSG];
3505 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3506 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3508 dev_warn(&((phba->pcidev)->dev),
3510 phba->brd_no, adaptermsg);
3512 /* Unknown IOCB command */
3513 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3514 "0334 Unknown IOCB command "
3515 "Data: x%x, x%x x%x x%x x%x\n",
3516 type, irsp->ulpCommand,
3525 * The response IOCB has been processed. Update the ring
3526 * pointer in SLIM. If the port response put pointer has not
3527 * been updated, sync the pgp->rspPutInx and fetch the new port
3528 * response put pointer.
3530 writel(pring->sli.sli3.rspidx,
3531 &phba->host_gp[pring->ringno].rspGetInx);
3533 if (pring->sli.sli3.rspidx == portRspPut)
3534 portRspPut = le32_to_cpu(pgp->rspPutInx);
3537 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3538 pring->stats.iocb_rsp_full++;
3539 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3540 writel(status, phba->CAregaddr);
3541 readl(phba->CAregaddr);
3543 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3544 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3545 pring->stats.iocb_cmd_empty++;
3547 /* Force update of the local copy of cmdGetInx */
3548 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3549 lpfc_sli_resume_iocb(phba, pring);
3551 if ((pring->lpfc_sli_cmd_available))
3552 (pring->lpfc_sli_cmd_available) (phba, pring);
3556 phba->fcp_ring_in_use = 0;
3557 spin_unlock_irqrestore(&phba->hbalock, iflag);
3562 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3563 * @phba: Pointer to HBA context object.
3564 * @pring: Pointer to driver SLI ring object.
3565 * @rspiocbp: Pointer to driver response IOCB object.
3567 * This function is called from the worker thread when there is a slow-path
3568 * response IOCB to process. This function chains all the response iocbs until
3569 * seeing the iocb with the LE bit set. The function will call
3570 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3571 * completion of a command iocb. The function will call the
3572 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3573 * The function frees the resources or calls the completion handler if this
3574 * iocb is an abort completion. The function returns NULL when the response
3575 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3576 * this function shall chain the iocb on to the iocb_continueq and return the
3577 * response iocb passed in.
3579 static struct lpfc_iocbq *
3580 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3581 struct lpfc_iocbq *rspiocbp)
3583 struct lpfc_iocbq *saveq;
3584 struct lpfc_iocbq *cmdiocbp;
3585 struct lpfc_iocbq *next_iocb;
3586 IOCB_t *irsp = NULL;
3587 uint32_t free_saveq;
3588 uint8_t iocb_cmd_type;
3589 lpfc_iocb_type type;
3590 unsigned long iflag;
3593 spin_lock_irqsave(&phba->hbalock, iflag);
3594 /* First add the response iocb to the countinueq list */
3595 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3596 pring->iocb_continueq_cnt++;
3598 /* Now, determine whether the list is completed for processing */
3599 irsp = &rspiocbp->iocb;
3602 * By default, the driver expects to free all resources
3603 * associated with this iocb completion.
3606 saveq = list_get_first(&pring->iocb_continueq,
3607 struct lpfc_iocbq, list);
3608 irsp = &(saveq->iocb);
3609 list_del_init(&pring->iocb_continueq);
3610 pring->iocb_continueq_cnt = 0;
3612 pring->stats.iocb_rsp++;
3615 * If resource errors reported from HBA, reduce
3616 * queuedepths of the SCSI device.
3618 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3619 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3620 IOERR_NO_RESOURCES)) {
3621 spin_unlock_irqrestore(&phba->hbalock, iflag);
3622 phba->lpfc_rampdown_queue_depth(phba);
3623 spin_lock_irqsave(&phba->hbalock, iflag);
3626 if (irsp->ulpStatus) {
3627 /* Rsp ring <ringno> error: IOCB */
3628 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3629 "0328 Rsp Ring %d error: "
3634 "x%x x%x x%x x%x\n",
3636 irsp->un.ulpWord[0],
3637 irsp->un.ulpWord[1],
3638 irsp->un.ulpWord[2],
3639 irsp->un.ulpWord[3],
3640 irsp->un.ulpWord[4],
3641 irsp->un.ulpWord[5],
3642 *(((uint32_t *) irsp) + 6),
3643 *(((uint32_t *) irsp) + 7),
3644 *(((uint32_t *) irsp) + 8),
3645 *(((uint32_t *) irsp) + 9),
3646 *(((uint32_t *) irsp) + 10),
3647 *(((uint32_t *) irsp) + 11),
3648 *(((uint32_t *) irsp) + 12),
3649 *(((uint32_t *) irsp) + 13),
3650 *(((uint32_t *) irsp) + 14),
3651 *(((uint32_t *) irsp) + 15));
3655 * Fetch the IOCB command type and call the correct completion
3656 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3657 * get freed back to the lpfc_iocb_list by the discovery
3660 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3661 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3664 spin_unlock_irqrestore(&phba->hbalock, iflag);
3665 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3666 spin_lock_irqsave(&phba->hbalock, iflag);
3669 case LPFC_UNSOL_IOCB:
3670 spin_unlock_irqrestore(&phba->hbalock, iflag);
3671 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3672 spin_lock_irqsave(&phba->hbalock, iflag);
3677 case LPFC_ABORT_IOCB:
3679 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3680 spin_unlock_irqrestore(&phba->hbalock, iflag);
3681 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3683 spin_lock_irqsave(&phba->hbalock, iflag);
3686 /* Call the specified completion routine */
3687 if (cmdiocbp->iocb_cmpl) {
3688 spin_unlock_irqrestore(&phba->hbalock,
3690 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3692 spin_lock_irqsave(&phba->hbalock,
3695 __lpfc_sli_release_iocbq(phba,
3700 case LPFC_UNKNOWN_IOCB:
3701 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3702 char adaptermsg[LPFC_MAX_ADPTMSG];
3703 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3704 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3706 dev_warn(&((phba->pcidev)->dev),
3708 phba->brd_no, adaptermsg);
3710 /* Unknown IOCB command */
3711 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3712 "0335 Unknown IOCB "
3713 "command Data: x%x "
3724 list_for_each_entry_safe(rspiocbp, next_iocb,
3725 &saveq->list, list) {
3726 list_del_init(&rspiocbp->list);
3727 __lpfc_sli_release_iocbq(phba, rspiocbp);
3729 __lpfc_sli_release_iocbq(phba, saveq);
3733 spin_unlock_irqrestore(&phba->hbalock, iflag);
3738 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3739 * @phba: Pointer to HBA context object.
3740 * @pring: Pointer to driver SLI ring object.
3741 * @mask: Host attention register mask for this ring.
3743 * This routine wraps the actual slow_ring event process routine from the
3744 * API jump table function pointer from the lpfc_hba struct.
3747 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3748 struct lpfc_sli_ring *pring, uint32_t mask)
3750 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3754 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3755 * @phba: Pointer to HBA context object.
3756 * @pring: Pointer to driver SLI ring object.
3757 * @mask: Host attention register mask for this ring.
3759 * This function is called from the worker thread when there is a ring event
3760 * for non-fcp rings. The caller does not hold any lock. The function will
3761 * remove each response iocb in the response ring and calls the handle
3762 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3765 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3766 struct lpfc_sli_ring *pring, uint32_t mask)
3768 struct lpfc_pgp *pgp;
3770 IOCB_t *irsp = NULL;
3771 struct lpfc_iocbq *rspiocbp = NULL;
3772 uint32_t portRspPut, portRspMax;
3773 unsigned long iflag;
3776 pgp = &phba->port_gp[pring->ringno];
3777 spin_lock_irqsave(&phba->hbalock, iflag);
3778 pring->stats.iocb_event++;
3781 * The next available response entry should never exceed the maximum
3782 * entries. If it does, treat it as an adapter hardware error.
3784 portRspMax = pring->sli.sli3.numRiocb;
3785 portRspPut = le32_to_cpu(pgp->rspPutInx);
3786 if (portRspPut >= portRspMax) {
3788 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3789 * rsp ring <portRspMax>
3791 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3792 "0303 Ring %d handler: portRspPut %d "
3793 "is bigger than rsp ring %d\n",
3794 pring->ringno, portRspPut, portRspMax);
3796 phba->link_state = LPFC_HBA_ERROR;
3797 spin_unlock_irqrestore(&phba->hbalock, iflag);
3799 phba->work_hs = HS_FFER3;
3800 lpfc_handle_eratt(phba);
3806 while (pring->sli.sli3.rspidx != portRspPut) {
3808 * Build a completion list and call the appropriate handler.
3809 * The process is to get the next available response iocb, get
3810 * a free iocb from the list, copy the response data into the
3811 * free iocb, insert to the continuation list, and update the
3812 * next response index to slim. This process makes response
3813 * iocb's in the ring available to DMA as fast as possible but
3814 * pays a penalty for a copy operation. Since the iocb is
3815 * only 32 bytes, this penalty is considered small relative to
3816 * the PCI reads for register values and a slim write. When
3817 * the ulpLe field is set, the entire Command has been
3820 entry = lpfc_resp_iocb(phba, pring);
3822 phba->last_completion_time = jiffies;
3823 rspiocbp = __lpfc_sli_get_iocbq(phba);
3824 if (rspiocbp == NULL) {
3825 printk(KERN_ERR "%s: out of buffers! Failing "
3826 "completion.\n", __func__);
3830 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3831 phba->iocb_rsp_size);
3832 irsp = &rspiocbp->iocb;
3834 if (++pring->sli.sli3.rspidx >= portRspMax)
3835 pring->sli.sli3.rspidx = 0;
3837 if (pring->ringno == LPFC_ELS_RING) {
3838 lpfc_debugfs_slow_ring_trc(phba,
3839 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3840 *(((uint32_t *) irsp) + 4),
3841 *(((uint32_t *) irsp) + 6),
3842 *(((uint32_t *) irsp) + 7));
3845 writel(pring->sli.sli3.rspidx,
3846 &phba->host_gp[pring->ringno].rspGetInx);
3848 spin_unlock_irqrestore(&phba->hbalock, iflag);
3849 /* Handle the response IOCB */
3850 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3851 spin_lock_irqsave(&phba->hbalock, iflag);
3854 * If the port response put pointer has not been updated, sync
3855 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3856 * response put pointer.
3858 if (pring->sli.sli3.rspidx == portRspPut) {
3859 portRspPut = le32_to_cpu(pgp->rspPutInx);
3861 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3863 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3864 /* At least one response entry has been freed */
3865 pring->stats.iocb_rsp_full++;
3866 /* SET RxRE_RSP in Chip Att register */
3867 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3868 writel(status, phba->CAregaddr);
3869 readl(phba->CAregaddr); /* flush */
3871 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3872 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3873 pring->stats.iocb_cmd_empty++;
3875 /* Force update of the local copy of cmdGetInx */
3876 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3877 lpfc_sli_resume_iocb(phba, pring);
3879 if ((pring->lpfc_sli_cmd_available))
3880 (pring->lpfc_sli_cmd_available) (phba, pring);
3884 spin_unlock_irqrestore(&phba->hbalock, iflag);
3889 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3890 * @phba: Pointer to HBA context object.
3891 * @pring: Pointer to driver SLI ring object.
3892 * @mask: Host attention register mask for this ring.
3894 * This function is called from the worker thread when there is a pending
3895 * ELS response iocb on the driver internal slow-path response iocb worker
3896 * queue. The caller does not hold any lock. The function will remove each
3897 * response iocb from the response worker queue and calls the handle
3898 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3901 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3902 struct lpfc_sli_ring *pring, uint32_t mask)
3904 struct lpfc_iocbq *irspiocbq;
3905 struct hbq_dmabuf *dmabuf;
3906 struct lpfc_cq_event *cq_event;
3907 unsigned long iflag;
3910 spin_lock_irqsave(&phba->hbalock, iflag);
3911 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3912 spin_unlock_irqrestore(&phba->hbalock, iflag);
3913 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3914 /* Get the response iocb from the head of work queue */
3915 spin_lock_irqsave(&phba->hbalock, iflag);
3916 list_remove_head(&phba->sli4_hba.sp_queue_event,
3917 cq_event, struct lpfc_cq_event, list);
3918 spin_unlock_irqrestore(&phba->hbalock, iflag);
3920 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3921 case CQE_CODE_COMPL_WQE:
3922 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3924 /* Translate ELS WCQE to response IOCBQ */
3925 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3928 lpfc_sli_sp_handle_rspiocb(phba, pring,
3932 case CQE_CODE_RECEIVE:
3933 case CQE_CODE_RECEIVE_V1:
3934 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3936 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3943 /* Limit the number of events to 64 to avoid soft lockups */
3950 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3951 * @phba: Pointer to HBA context object.
3952 * @pring: Pointer to driver SLI ring object.
3954 * This function aborts all iocbs in the given ring and frees all the iocb
3955 * objects in txq. This function issues an abort iocb for all the iocb commands
3956 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3957 * the return of this function. The caller is not required to hold any locks.
3960 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3962 LIST_HEAD(completions);
3963 struct lpfc_iocbq *iocb, *next_iocb;
3965 if (pring->ringno == LPFC_ELS_RING) {
3966 lpfc_fabric_abort_hba(phba);
3969 /* Error everything on txq and txcmplq
3972 if (phba->sli_rev >= LPFC_SLI_REV4) {
3973 spin_lock_irq(&pring->ring_lock);
3974 list_splice_init(&pring->txq, &completions);
3976 spin_unlock_irq(&pring->ring_lock);
3978 spin_lock_irq(&phba->hbalock);
3979 /* Next issue ABTS for everything on the txcmplq */
3980 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3981 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3982 spin_unlock_irq(&phba->hbalock);
3984 spin_lock_irq(&phba->hbalock);
3985 list_splice_init(&pring->txq, &completions);
3988 /* Next issue ABTS for everything on the txcmplq */
3989 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3990 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3991 spin_unlock_irq(&phba->hbalock);
3994 /* Cancel all the IOCBs from the completions list */
3995 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4000 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4001 * @phba: Pointer to HBA context object.
4002 * @pring: Pointer to driver SLI ring object.
4004 * This function aborts all iocbs in FCP rings and frees all the iocb
4005 * objects in txq. This function issues an abort iocb for all the iocb commands
4006 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4007 * the return of this function. The caller is not required to hold any locks.
4010 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4012 struct lpfc_sli *psli = &phba->sli;
4013 struct lpfc_sli_ring *pring;
4016 /* Look on all the FCP Rings for the iotag */
4017 if (phba->sli_rev >= LPFC_SLI_REV4) {
4018 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4019 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4020 lpfc_sli_abort_iocb_ring(phba, pring);
4023 pring = &psli->sli3_ring[LPFC_FCP_RING];
4024 lpfc_sli_abort_iocb_ring(phba, pring);
4029 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4030 * @phba: Pointer to HBA context object.
4032 * This function flushes all iocbs in the IO ring and frees all the iocb
4033 * objects in txq and txcmplq. This function will not issue abort iocbs
4034 * for all the iocb commands in txcmplq, they will just be returned with
4035 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4036 * slot has been permanently disabled.
4039 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4043 struct lpfc_sli *psli = &phba->sli;
4044 struct lpfc_sli_ring *pring;
4046 struct lpfc_iocbq *piocb, *next_iocb;
4048 spin_lock_irq(&phba->hbalock);
4049 if (phba->hba_flag & HBA_IOQ_FLUSH ||
4050 !phba->sli4_hba.hdwq) {
4051 spin_unlock_irq(&phba->hbalock);
4054 /* Indicate the I/O queues are flushed */
4055 phba->hba_flag |= HBA_IOQ_FLUSH;
4056 spin_unlock_irq(&phba->hbalock);
4058 /* Look on all the FCP Rings for the iotag */
4059 if (phba->sli_rev >= LPFC_SLI_REV4) {
4060 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4061 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4063 spin_lock_irq(&pring->ring_lock);
4064 /* Retrieve everything on txq */
4065 list_splice_init(&pring->txq, &txq);
4066 list_for_each_entry_safe(piocb, next_iocb,
4067 &pring->txcmplq, list)
4068 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4069 /* Retrieve everything on the txcmplq */
4070 list_splice_init(&pring->txcmplq, &txcmplq);
4072 pring->txcmplq_cnt = 0;
4073 spin_unlock_irq(&pring->ring_lock);
4076 lpfc_sli_cancel_iocbs(phba, &txq,
4077 IOSTAT_LOCAL_REJECT,
4079 /* Flush the txcmpq */
4080 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4081 IOSTAT_LOCAL_REJECT,
4085 pring = &psli->sli3_ring[LPFC_FCP_RING];
4087 spin_lock_irq(&phba->hbalock);
4088 /* Retrieve everything on txq */
4089 list_splice_init(&pring->txq, &txq);
4090 list_for_each_entry_safe(piocb, next_iocb,
4091 &pring->txcmplq, list)
4092 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4093 /* Retrieve everything on the txcmplq */
4094 list_splice_init(&pring->txcmplq, &txcmplq);
4096 pring->txcmplq_cnt = 0;
4097 spin_unlock_irq(&phba->hbalock);
4100 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4102 /* Flush the txcmpq */
4103 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4109 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4110 * @phba: Pointer to HBA context object.
4111 * @mask: Bit mask to be checked.
4113 * This function reads the host status register and compares
4114 * with the provided bit mask to check if HBA completed
4115 * the restart. This function will wait in a loop for the
4116 * HBA to complete restart. If the HBA does not restart within
4117 * 15 iterations, the function will reset the HBA again. The
4118 * function returns 1 when HBA fail to restart otherwise returns
4122 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4128 /* Read the HBA Host Status Register */
4129 if (lpfc_readl(phba->HSregaddr, &status))
4133 * Check status register every 100ms for 5 retries, then every
4134 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4135 * every 2.5 sec for 4.
4136 * Break our of the loop if errors occurred during init.
4138 while (((status & mask) != mask) &&
4139 !(status & HS_FFERM) &&
4151 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4152 lpfc_sli_brdrestart(phba);
4154 /* Read the HBA Host Status Register */
4155 if (lpfc_readl(phba->HSregaddr, &status)) {
4161 /* Check to see if any errors occurred during init */
4162 if ((status & HS_FFERM) || (i >= 20)) {
4163 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4164 "2751 Adapter failed to restart, "
4165 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4167 readl(phba->MBslimaddr + 0xa8),
4168 readl(phba->MBslimaddr + 0xac));
4169 phba->link_state = LPFC_HBA_ERROR;
4177 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4178 * @phba: Pointer to HBA context object.
4179 * @mask: Bit mask to be checked.
4181 * This function checks the host status register to check if HBA is
4182 * ready. This function will wait in a loop for the HBA to be ready
4183 * If the HBA is not ready , the function will will reset the HBA PCI
4184 * function again. The function returns 1 when HBA fail to be ready
4185 * otherwise returns zero.
4188 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4193 /* Read the HBA Host Status Register */
4194 status = lpfc_sli4_post_status_check(phba);
4197 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4198 lpfc_sli_brdrestart(phba);
4199 status = lpfc_sli4_post_status_check(phba);
4202 /* Check to see if any errors occurred during init */
4204 phba->link_state = LPFC_HBA_ERROR;
4207 phba->sli4_hba.intr_enable = 0;
4213 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4214 * @phba: Pointer to HBA context object.
4215 * @mask: Bit mask to be checked.
4217 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4218 * from the API jump table function pointer from the lpfc_hba struct.
4221 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4223 return phba->lpfc_sli_brdready(phba, mask);
4226 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4229 * lpfc_reset_barrier - Make HBA ready for HBA reset
4230 * @phba: Pointer to HBA context object.
4232 * This function is called before resetting an HBA. This function is called
4233 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4235 void lpfc_reset_barrier(struct lpfc_hba *phba)
4237 uint32_t __iomem *resp_buf;
4238 uint32_t __iomem *mbox_buf;
4239 volatile uint32_t mbox;
4240 uint32_t hc_copy, ha_copy, resp_data;
4244 lockdep_assert_held(&phba->hbalock);
4246 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4247 if (hdrtype != 0x80 ||
4248 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4249 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4253 * Tell the other part of the chip to suspend temporarily all
4256 resp_buf = phba->MBslimaddr;
4258 /* Disable the error attention */
4259 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4261 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4262 readl(phba->HCregaddr); /* flush */
4263 phba->link_flag |= LS_IGNORE_ERATT;
4265 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4267 if (ha_copy & HA_ERATT) {
4268 /* Clear Chip error bit */
4269 writel(HA_ERATT, phba->HAregaddr);
4270 phba->pport->stopped = 1;
4274 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4275 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4277 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4278 mbox_buf = phba->MBslimaddr;
4279 writel(mbox, mbox_buf);
4281 for (i = 0; i < 50; i++) {
4282 if (lpfc_readl((resp_buf + 1), &resp_data))
4284 if (resp_data != ~(BARRIER_TEST_PATTERN))
4290 if (lpfc_readl((resp_buf + 1), &resp_data))
4292 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4293 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4294 phba->pport->stopped)
4300 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4302 for (i = 0; i < 500; i++) {
4303 if (lpfc_readl(resp_buf, &resp_data))
4305 if (resp_data != mbox)
4314 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4316 if (!(ha_copy & HA_ERATT))
4322 if (readl(phba->HAregaddr) & HA_ERATT) {
4323 writel(HA_ERATT, phba->HAregaddr);
4324 phba->pport->stopped = 1;
4328 phba->link_flag &= ~LS_IGNORE_ERATT;
4329 writel(hc_copy, phba->HCregaddr);
4330 readl(phba->HCregaddr); /* flush */
4334 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4335 * @phba: Pointer to HBA context object.
4337 * This function issues a kill_board mailbox command and waits for
4338 * the error attention interrupt. This function is called for stopping
4339 * the firmware processing. The caller is not required to hold any
4340 * locks. This function calls lpfc_hba_down_post function to free
4341 * any pending commands after the kill. The function will return 1 when it
4342 * fails to kill the board else will return 0.
4345 lpfc_sli_brdkill(struct lpfc_hba *phba)
4347 struct lpfc_sli *psli;
4357 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4358 "0329 Kill HBA Data: x%x x%x\n",
4359 phba->pport->port_state, psli->sli_flag);
4361 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4365 /* Disable the error attention */
4366 spin_lock_irq(&phba->hbalock);
4367 if (lpfc_readl(phba->HCregaddr, &status)) {
4368 spin_unlock_irq(&phba->hbalock);
4369 mempool_free(pmb, phba->mbox_mem_pool);
4372 status &= ~HC_ERINT_ENA;
4373 writel(status, phba->HCregaddr);
4374 readl(phba->HCregaddr); /* flush */
4375 phba->link_flag |= LS_IGNORE_ERATT;
4376 spin_unlock_irq(&phba->hbalock);
4378 lpfc_kill_board(phba, pmb);
4379 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4380 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4382 if (retval != MBX_SUCCESS) {
4383 if (retval != MBX_BUSY)
4384 mempool_free(pmb, phba->mbox_mem_pool);
4385 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4386 "2752 KILL_BOARD command failed retval %d\n",
4388 spin_lock_irq(&phba->hbalock);
4389 phba->link_flag &= ~LS_IGNORE_ERATT;
4390 spin_unlock_irq(&phba->hbalock);
4394 spin_lock_irq(&phba->hbalock);
4395 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4396 spin_unlock_irq(&phba->hbalock);
4398 mempool_free(pmb, phba->mbox_mem_pool);
4400 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4401 * attention every 100ms for 3 seconds. If we don't get ERATT after
4402 * 3 seconds we still set HBA_ERROR state because the status of the
4403 * board is now undefined.
4405 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4407 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4409 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4413 del_timer_sync(&psli->mbox_tmo);
4414 if (ha_copy & HA_ERATT) {
4415 writel(HA_ERATT, phba->HAregaddr);
4416 phba->pport->stopped = 1;
4418 spin_lock_irq(&phba->hbalock);
4419 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4420 psli->mbox_active = NULL;
4421 phba->link_flag &= ~LS_IGNORE_ERATT;
4422 spin_unlock_irq(&phba->hbalock);
4424 lpfc_hba_down_post(phba);
4425 phba->link_state = LPFC_HBA_ERROR;
4427 return ha_copy & HA_ERATT ? 0 : 1;
4431 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4432 * @phba: Pointer to HBA context object.
4434 * This function resets the HBA by writing HC_INITFF to the control
4435 * register. After the HBA resets, this function resets all the iocb ring
4436 * indices. This function disables PCI layer parity checking during
4438 * This function returns 0 always.
4439 * The caller is not required to hold any locks.
4442 lpfc_sli_brdreset(struct lpfc_hba *phba)
4444 struct lpfc_sli *psli;
4445 struct lpfc_sli_ring *pring;
4452 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4453 "0325 Reset HBA Data: x%x x%x\n",
4454 (phba->pport) ? phba->pport->port_state : 0,
4457 /* perform board reset */
4458 phba->fc_eventTag = 0;
4459 phba->link_events = 0;
4461 phba->pport->fc_myDID = 0;
4462 phba->pport->fc_prevDID = 0;
4465 /* Turn off parity checking and serr during the physical reset */
4466 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4469 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4471 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4473 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4475 /* Now toggle INITFF bit in the Host Control Register */
4476 writel(HC_INITFF, phba->HCregaddr);
4478 readl(phba->HCregaddr); /* flush */
4479 writel(0, phba->HCregaddr);
4480 readl(phba->HCregaddr); /* flush */
4482 /* Restore PCI cmd register */
4483 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4485 /* Initialize relevant SLI info */
4486 for (i = 0; i < psli->num_rings; i++) {
4487 pring = &psli->sli3_ring[i];
4489 pring->sli.sli3.rspidx = 0;
4490 pring->sli.sli3.next_cmdidx = 0;
4491 pring->sli.sli3.local_getidx = 0;
4492 pring->sli.sli3.cmdidx = 0;
4493 pring->missbufcnt = 0;
4496 phba->link_state = LPFC_WARM_START;
4501 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4502 * @phba: Pointer to HBA context object.
4504 * This function resets a SLI4 HBA. This function disables PCI layer parity
4505 * checking during resets the device. The caller is not required to hold
4508 * This function returns 0 on success else returns negative error code.
4511 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4513 struct lpfc_sli *psli = &phba->sli;
4518 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4519 "0295 Reset HBA Data: x%x x%x x%x\n",
4520 phba->pport->port_state, psli->sli_flag,
4523 /* perform board reset */
4524 phba->fc_eventTag = 0;
4525 phba->link_events = 0;
4526 phba->pport->fc_myDID = 0;
4527 phba->pport->fc_prevDID = 0;
4529 spin_lock_irq(&phba->hbalock);
4530 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4531 phba->fcf.fcf_flag = 0;
4532 spin_unlock_irq(&phba->hbalock);
4534 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4535 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4536 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4540 /* Now physically reset the device */
4541 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4542 "0389 Performing PCI function reset!\n");
4544 /* Turn off parity checking and serr during the physical reset */
4545 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4546 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4547 "3205 PCI read Config failed\n");
4551 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4552 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4554 /* Perform FCoE PCI function reset before freeing queue memory */
4555 rc = lpfc_pci_function_reset(phba);
4557 /* Restore PCI cmd register */
4558 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4564 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4565 * @phba: Pointer to HBA context object.
4567 * This function is called in the SLI initialization code path to
4568 * restart the HBA. The caller is not required to hold any lock.
4569 * This function writes MBX_RESTART mailbox command to the SLIM and
4570 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4571 * function to free any pending commands. The function enables
4572 * POST only during the first initialization. The function returns zero.
4573 * The function does not guarantee completion of MBX_RESTART mailbox
4574 * command before the return of this function.
4577 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4580 struct lpfc_sli *psli;
4581 volatile uint32_t word0;
4582 void __iomem *to_slim;
4583 uint32_t hba_aer_enabled;
4585 spin_lock_irq(&phba->hbalock);
4587 /* Take PCIe device Advanced Error Reporting (AER) state */
4588 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4593 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4594 "0337 Restart HBA Data: x%x x%x\n",
4595 (phba->pport) ? phba->pport->port_state : 0,
4599 mb = (MAILBOX_t *) &word0;
4600 mb->mbxCommand = MBX_RESTART;
4603 lpfc_reset_barrier(phba);
4605 to_slim = phba->MBslimaddr;
4606 writel(*(uint32_t *) mb, to_slim);
4607 readl(to_slim); /* flush */
4609 /* Only skip post after fc_ffinit is completed */
4610 if (phba->pport && phba->pport->port_state)
4611 word0 = 1; /* This is really setting up word1 */
4613 word0 = 0; /* This is really setting up word1 */
4614 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4615 writel(*(uint32_t *) mb, to_slim);
4616 readl(to_slim); /* flush */
4618 lpfc_sli_brdreset(phba);
4620 phba->pport->stopped = 0;
4621 phba->link_state = LPFC_INIT_START;
4623 spin_unlock_irq(&phba->hbalock);
4625 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4626 psli->stats_start = ktime_get_seconds();
4628 /* Give the INITFF and Post time to settle. */
4631 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4632 if (hba_aer_enabled)
4633 pci_disable_pcie_error_reporting(phba->pcidev);
4635 lpfc_hba_down_post(phba);
4641 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4642 * @phba: Pointer to HBA context object.
4644 * This function is called in the SLI initialization code path to restart
4645 * a SLI4 HBA. The caller is not required to hold any lock.
4646 * At the end of the function, it calls lpfc_hba_down_post function to
4647 * free any pending commands.
4650 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4652 struct lpfc_sli *psli = &phba->sli;
4653 uint32_t hba_aer_enabled;
4657 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4658 "0296 Restart HBA Data: x%x x%x\n",
4659 phba->pport->port_state, psli->sli_flag);
4661 /* Take PCIe device Advanced Error Reporting (AER) state */
4662 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4664 rc = lpfc_sli4_brdreset(phba);
4666 phba->link_state = LPFC_HBA_ERROR;
4667 goto hba_down_queue;
4670 spin_lock_irq(&phba->hbalock);
4671 phba->pport->stopped = 0;
4672 phba->link_state = LPFC_INIT_START;
4674 spin_unlock_irq(&phba->hbalock);
4676 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4677 psli->stats_start = ktime_get_seconds();
4679 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4680 if (hba_aer_enabled)
4681 pci_disable_pcie_error_reporting(phba->pcidev);
4684 lpfc_hba_down_post(phba);
4685 lpfc_sli4_queue_destroy(phba);
4691 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4692 * @phba: Pointer to HBA context object.
4694 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4695 * API jump table function pointer from the lpfc_hba struct.
4698 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4700 return phba->lpfc_sli_brdrestart(phba);
4704 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4705 * @phba: Pointer to HBA context object.
4707 * This function is called after a HBA restart to wait for successful
4708 * restart of the HBA. Successful restart of the HBA is indicated by
4709 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4710 * iteration, the function will restart the HBA again. The function returns
4711 * zero if HBA successfully restarted else returns negative error code.
4714 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4716 uint32_t status, i = 0;
4718 /* Read the HBA Host Status Register */
4719 if (lpfc_readl(phba->HSregaddr, &status))
4722 /* Check status register to see what current state is */
4724 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4726 /* Check every 10ms for 10 retries, then every 100ms for 90
4727 * retries, then every 1 sec for 50 retires for a total of
4728 * ~60 seconds before reset the board again and check every
4729 * 1 sec for 50 retries. The up to 60 seconds before the
4730 * board ready is required by the Falcon FIPS zeroization
4731 * complete, and any reset the board in between shall cause
4732 * restart of zeroization, further delay the board ready.
4735 /* Adapter failed to init, timeout, status reg
4737 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4738 "0436 Adapter failed to init, "
4739 "timeout, status reg x%x, "
4740 "FW Data: A8 x%x AC x%x\n", status,
4741 readl(phba->MBslimaddr + 0xa8),
4742 readl(phba->MBslimaddr + 0xac));
4743 phba->link_state = LPFC_HBA_ERROR;
4747 /* Check to see if any errors occurred during init */
4748 if (status & HS_FFERM) {
4749 /* ERROR: During chipset initialization */
4750 /* Adapter failed to init, chipset, status reg
4752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4753 "0437 Adapter failed to init, "
4754 "chipset, status reg x%x, "
4755 "FW Data: A8 x%x AC x%x\n", status,
4756 readl(phba->MBslimaddr + 0xa8),
4757 readl(phba->MBslimaddr + 0xac));
4758 phba->link_state = LPFC_HBA_ERROR;
4771 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4772 lpfc_sli_brdrestart(phba);
4774 /* Read the HBA Host Status Register */
4775 if (lpfc_readl(phba->HSregaddr, &status))
4779 /* Check to see if any errors occurred during init */
4780 if (status & HS_FFERM) {
4781 /* ERROR: During chipset initialization */
4782 /* Adapter failed to init, chipset, status reg <status> */
4783 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4784 "0438 Adapter failed to init, chipset, "
4786 "FW Data: A8 x%x AC x%x\n", status,
4787 readl(phba->MBslimaddr + 0xa8),
4788 readl(phba->MBslimaddr + 0xac));
4789 phba->link_state = LPFC_HBA_ERROR;
4793 /* Clear all interrupt enable conditions */
4794 writel(0, phba->HCregaddr);
4795 readl(phba->HCregaddr); /* flush */
4797 /* setup host attn register */
4798 writel(0xffffffff, phba->HAregaddr);
4799 readl(phba->HAregaddr); /* flush */
4804 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4806 * This function calculates and returns the number of HBQs required to be
4810 lpfc_sli_hbq_count(void)
4812 return ARRAY_SIZE(lpfc_hbq_defs);
4816 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4818 * This function adds the number of hbq entries in every HBQ to get
4819 * the total number of hbq entries required for the HBA and returns
4823 lpfc_sli_hbq_entry_count(void)
4825 int hbq_count = lpfc_sli_hbq_count();
4829 for (i = 0; i < hbq_count; ++i)
4830 count += lpfc_hbq_defs[i]->entry_count;
4835 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4837 * This function calculates amount of memory required for all hbq entries
4838 * to be configured and returns the total memory required.
4841 lpfc_sli_hbq_size(void)
4843 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4847 * lpfc_sli_hbq_setup - configure and initialize HBQs
4848 * @phba: Pointer to HBA context object.
4850 * This function is called during the SLI initialization to configure
4851 * all the HBQs and post buffers to the HBQ. The caller is not
4852 * required to hold any locks. This function will return zero if successful
4853 * else it will return negative error code.
4856 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4858 int hbq_count = lpfc_sli_hbq_count();
4862 uint32_t hbq_entry_index;
4864 /* Get a Mailbox buffer to setup mailbox
4865 * commands for HBA initialization
4867 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4874 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4875 phba->link_state = LPFC_INIT_MBX_CMDS;
4876 phba->hbq_in_use = 1;
4878 hbq_entry_index = 0;
4879 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4880 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4881 phba->hbqs[hbqno].hbqPutIdx = 0;
4882 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4883 phba->hbqs[hbqno].entry_count =
4884 lpfc_hbq_defs[hbqno]->entry_count;
4885 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4886 hbq_entry_index, pmb);
4887 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4889 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4890 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4891 mbxStatus <status>, ring <num> */
4893 lpfc_printf_log(phba, KERN_ERR,
4894 LOG_SLI | LOG_VPORT,
4895 "1805 Adapter failed to init. "
4896 "Data: x%x x%x x%x\n",
4898 pmbox->mbxStatus, hbqno);
4900 phba->link_state = LPFC_HBA_ERROR;
4901 mempool_free(pmb, phba->mbox_mem_pool);
4905 phba->hbq_count = hbq_count;
4907 mempool_free(pmb, phba->mbox_mem_pool);
4909 /* Initially populate or replenish the HBQs */
4910 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4911 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4916 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4917 * @phba: Pointer to HBA context object.
4919 * This function is called during the SLI initialization to configure
4920 * all the HBQs and post buffers to the HBQ. The caller is not
4921 * required to hold any locks. This function will return zero if successful
4922 * else it will return negative error code.
4925 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4927 phba->hbq_in_use = 1;
4929 * Specific case when the MDS diagnostics is enabled and supported.
4930 * The receive buffer count is truncated to manage the incoming
4933 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
4934 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4935 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
4937 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4938 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4939 phba->hbq_count = 1;
4940 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4941 /* Initially populate or replenish the HBQs */
4946 * lpfc_sli_config_port - Issue config port mailbox command
4947 * @phba: Pointer to HBA context object.
4948 * @sli_mode: sli mode - 2/3
4950 * This function is called by the sli initialization code path
4951 * to issue config_port mailbox command. This function restarts the
4952 * HBA firmware and issues a config_port mailbox command to configure
4953 * the SLI interface in the sli mode specified by sli_mode
4954 * variable. The caller is not required to hold any locks.
4955 * The function returns 0 if successful, else returns negative error
4959 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4962 uint32_t resetcount = 0, rc = 0, done = 0;
4964 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4966 phba->link_state = LPFC_HBA_ERROR;
4970 phba->sli_rev = sli_mode;
4971 while (resetcount < 2 && !done) {
4972 spin_lock_irq(&phba->hbalock);
4973 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4974 spin_unlock_irq(&phba->hbalock);
4975 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4976 lpfc_sli_brdrestart(phba);
4977 rc = lpfc_sli_chipset_init(phba);
4981 spin_lock_irq(&phba->hbalock);
4982 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4983 spin_unlock_irq(&phba->hbalock);
4986 /* Call pre CONFIG_PORT mailbox command initialization. A
4987 * value of 0 means the call was successful. Any other
4988 * nonzero value is a failure, but if ERESTART is returned,
4989 * the driver may reset the HBA and try again.
4991 rc = lpfc_config_port_prep(phba);
4992 if (rc == -ERESTART) {
4993 phba->link_state = LPFC_LINK_UNKNOWN;
4998 phba->link_state = LPFC_INIT_MBX_CMDS;
4999 lpfc_config_port(phba, pmb);
5000 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5001 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5002 LPFC_SLI3_HBQ_ENABLED |
5003 LPFC_SLI3_CRP_ENABLED |
5004 LPFC_SLI3_DSS_ENABLED);
5005 if (rc != MBX_SUCCESS) {
5006 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5007 "0442 Adapter failed to init, mbxCmd x%x "
5008 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5009 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5010 spin_lock_irq(&phba->hbalock);
5011 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5012 spin_unlock_irq(&phba->hbalock);
5015 /* Allow asynchronous mailbox command to go through */
5016 spin_lock_irq(&phba->hbalock);
5017 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5018 spin_unlock_irq(&phba->hbalock);
5021 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5022 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5023 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5024 "3110 Port did not grant ASABT\n");
5029 goto do_prep_failed;
5031 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5032 if (!pmb->u.mb.un.varCfgPort.cMA) {
5034 goto do_prep_failed;
5036 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5037 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5038 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5039 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5040 phba->max_vpi : phba->max_vports;
5044 phba->fips_level = 0;
5045 phba->fips_spec_rev = 0;
5046 if (pmb->u.mb.un.varCfgPort.gdss) {
5047 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
5048 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
5049 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
5050 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5051 "2850 Security Crypto Active. FIPS x%d "
5053 phba->fips_level, phba->fips_spec_rev);
5055 if (pmb->u.mb.un.varCfgPort.sec_err) {
5056 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5057 "2856 Config Port Security Crypto "
5059 pmb->u.mb.un.varCfgPort.sec_err);
5061 if (pmb->u.mb.un.varCfgPort.gerbm)
5062 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5063 if (pmb->u.mb.un.varCfgPort.gcrp)
5064 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5066 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5067 phba->port_gp = phba->mbox->us.s3_pgp.port;
5069 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5070 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5071 phba->cfg_enable_bg = 0;
5072 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5073 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5074 "0443 Adapter did not grant "
5079 phba->hbq_get = NULL;
5080 phba->port_gp = phba->mbox->us.s2.port;
5084 mempool_free(pmb, phba->mbox_mem_pool);
5090 * lpfc_sli_hba_setup - SLI initialization function
5091 * @phba: Pointer to HBA context object.
5093 * This function is the main SLI initialization function. This function
5094 * is called by the HBA initialization code, HBA reset code and HBA
5095 * error attention handler code. Caller is not required to hold any
5096 * locks. This function issues config_port mailbox command to configure
5097 * the SLI, setup iocb rings and HBQ rings. In the end the function
5098 * calls the config_port_post function to issue init_link mailbox
5099 * command and to start the discovery. The function will return zero
5100 * if successful, else it will return negative error code.
5103 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5109 switch (phba->cfg_sli_mode) {
5111 if (phba->cfg_enable_npiv) {
5112 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5113 "1824 NPIV enabled: Override sli_mode "
5114 "parameter (%d) to auto (0).\n",
5115 phba->cfg_sli_mode);
5124 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5125 "1819 Unrecognized sli_mode parameter: %d.\n",
5126 phba->cfg_sli_mode);
5130 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5132 rc = lpfc_sli_config_port(phba, mode);
5134 if (rc && phba->cfg_sli_mode == 3)
5135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5136 "1820 Unable to select SLI-3. "
5137 "Not supported by adapter.\n");
5138 if (rc && mode != 2)
5139 rc = lpfc_sli_config_port(phba, 2);
5140 else if (rc && mode == 2)
5141 rc = lpfc_sli_config_port(phba, 3);
5143 goto lpfc_sli_hba_setup_error;
5145 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5146 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5147 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5149 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5150 "2709 This device supports "
5151 "Advanced Error Reporting (AER)\n");
5152 spin_lock_irq(&phba->hbalock);
5153 phba->hba_flag |= HBA_AER_ENABLED;
5154 spin_unlock_irq(&phba->hbalock);
5156 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5157 "2708 This device does not support "
5158 "Advanced Error Reporting (AER): %d\n",
5160 phba->cfg_aer_support = 0;
5164 if (phba->sli_rev == 3) {
5165 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5166 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5168 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5169 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5170 phba->sli3_options = 0;
5173 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5174 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5175 phba->sli_rev, phba->max_vpi);
5176 rc = lpfc_sli_ring_map(phba);
5179 goto lpfc_sli_hba_setup_error;
5181 /* Initialize VPIs. */
5182 if (phba->sli_rev == LPFC_SLI_REV3) {
5184 * The VPI bitmask and physical ID array are allocated
5185 * and initialized once only - at driver load. A port
5186 * reset doesn't need to reinitialize this memory.
5188 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5189 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5190 phba->vpi_bmask = kcalloc(longs,
5191 sizeof(unsigned long),
5193 if (!phba->vpi_bmask) {
5195 goto lpfc_sli_hba_setup_error;
5198 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5201 if (!phba->vpi_ids) {
5202 kfree(phba->vpi_bmask);
5204 goto lpfc_sli_hba_setup_error;
5206 for (i = 0; i < phba->max_vpi; i++)
5207 phba->vpi_ids[i] = i;
5212 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5213 rc = lpfc_sli_hbq_setup(phba);
5215 goto lpfc_sli_hba_setup_error;
5217 spin_lock_irq(&phba->hbalock);
5218 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5219 spin_unlock_irq(&phba->hbalock);
5221 rc = lpfc_config_port_post(phba);
5223 goto lpfc_sli_hba_setup_error;
5227 lpfc_sli_hba_setup_error:
5228 phba->link_state = LPFC_HBA_ERROR;
5229 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5230 "0445 Firmware initialization failed\n");
5235 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5236 * @phba: Pointer to HBA context object.
5237 * @mboxq: mailbox pointer.
5238 * This function issue a dump mailbox command to read config region
5239 * 23 and parse the records in the region and populate driver
5243 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5245 LPFC_MBOXQ_t *mboxq;
5246 struct lpfc_dmabuf *mp;
5247 struct lpfc_mqe *mqe;
5248 uint32_t data_length;
5251 /* Program the default value of vlan_id and fc_map */
5252 phba->valid_vlan = 0;
5253 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5254 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5255 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5257 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5261 mqe = &mboxq->u.mqe;
5262 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5264 goto out_free_mboxq;
5267 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5268 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5270 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5271 "(%d):2571 Mailbox cmd x%x Status x%x "
5272 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5273 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5274 "CQ: x%x x%x x%x x%x\n",
5275 mboxq->vport ? mboxq->vport->vpi : 0,
5276 bf_get(lpfc_mqe_command, mqe),
5277 bf_get(lpfc_mqe_status, mqe),
5278 mqe->un.mb_words[0], mqe->un.mb_words[1],
5279 mqe->un.mb_words[2], mqe->un.mb_words[3],
5280 mqe->un.mb_words[4], mqe->un.mb_words[5],
5281 mqe->un.mb_words[6], mqe->un.mb_words[7],
5282 mqe->un.mb_words[8], mqe->un.mb_words[9],
5283 mqe->un.mb_words[10], mqe->un.mb_words[11],
5284 mqe->un.mb_words[12], mqe->un.mb_words[13],
5285 mqe->un.mb_words[14], mqe->un.mb_words[15],
5286 mqe->un.mb_words[16], mqe->un.mb_words[50],
5288 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5289 mboxq->mcqe.trailer);
5292 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5295 goto out_free_mboxq;
5297 data_length = mqe->un.mb_words[5];
5298 if (data_length > DMP_RGN23_SIZE) {
5299 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5302 goto out_free_mboxq;
5305 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5306 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5311 mempool_free(mboxq, phba->mbox_mem_pool);
5316 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5317 * @phba: pointer to lpfc hba data structure.
5318 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5319 * @vpd: pointer to the memory to hold resulting port vpd data.
5320 * @vpd_size: On input, the number of bytes allocated to @vpd.
5321 * On output, the number of data bytes in @vpd.
5323 * This routine executes a READ_REV SLI4 mailbox command. In
5324 * addition, this routine gets the port vpd data.
5328 * -ENOMEM - could not allocated memory.
5331 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5332 uint8_t *vpd, uint32_t *vpd_size)
5336 struct lpfc_dmabuf *dmabuf;
5337 struct lpfc_mqe *mqe;
5339 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5344 * Get a DMA buffer for the vpd data resulting from the READ_REV
5347 dma_size = *vpd_size;
5348 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5349 &dmabuf->phys, GFP_KERNEL);
5350 if (!dmabuf->virt) {
5356 * The SLI4 implementation of READ_REV conflicts at word1,
5357 * bits 31:16 and SLI4 adds vpd functionality not present
5358 * in SLI3. This code corrects the conflicts.
5360 lpfc_read_rev(phba, mboxq);
5361 mqe = &mboxq->u.mqe;
5362 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5363 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5364 mqe->un.read_rev.word1 &= 0x0000FFFF;
5365 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5366 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5368 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5370 dma_free_coherent(&phba->pcidev->dev, dma_size,
5371 dmabuf->virt, dmabuf->phys);
5377 * The available vpd length cannot be bigger than the
5378 * DMA buffer passed to the port. Catch the less than
5379 * case and update the caller's size.
5381 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5382 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5384 memcpy(vpd, dmabuf->virt, *vpd_size);
5386 dma_free_coherent(&phba->pcidev->dev, dma_size,
5387 dmabuf->virt, dmabuf->phys);
5393 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5394 * @phba: pointer to lpfc hba data structure.
5396 * This routine retrieves SLI4 device physical port name this PCI function
5401 * otherwise - failed to retrieve controller attributes
5404 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5406 LPFC_MBOXQ_t *mboxq;
5407 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5408 struct lpfc_controller_attribute *cntl_attr;
5409 void *virtaddr = NULL;
5410 uint32_t alloclen, reqlen;
5411 uint32_t shdr_status, shdr_add_status;
5412 union lpfc_sli4_cfg_shdr *shdr;
5415 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5419 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5420 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5421 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5422 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5423 LPFC_SLI4_MBX_NEMBED);
5425 if (alloclen < reqlen) {
5426 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5427 "3084 Allocated DMA memory size (%d) is "
5428 "less than the requested DMA memory size "
5429 "(%d)\n", alloclen, reqlen);
5431 goto out_free_mboxq;
5433 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5434 virtaddr = mboxq->sge_array->addr[0];
5435 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5436 shdr = &mbx_cntl_attr->cfg_shdr;
5437 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5438 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5439 if (shdr_status || shdr_add_status || rc) {
5440 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5441 "3085 Mailbox x%x (x%x/x%x) failed, "
5442 "rc:x%x, status:x%x, add_status:x%x\n",
5443 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5444 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5445 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5446 rc, shdr_status, shdr_add_status);
5448 goto out_free_mboxq;
5451 cntl_attr = &mbx_cntl_attr->cntl_attr;
5452 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5453 phba->sli4_hba.lnk_info.lnk_tp =
5454 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5455 phba->sli4_hba.lnk_info.lnk_no =
5456 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5458 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5459 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5460 sizeof(phba->BIOSVersion));
5462 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5463 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5464 phba->sli4_hba.lnk_info.lnk_tp,
5465 phba->sli4_hba.lnk_info.lnk_no,
5468 if (rc != MBX_TIMEOUT) {
5469 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5470 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5472 mempool_free(mboxq, phba->mbox_mem_pool);
5478 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5479 * @phba: pointer to lpfc hba data structure.
5481 * This routine retrieves SLI4 device physical port name this PCI function
5486 * otherwise - failed to retrieve physical port name
5489 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5491 LPFC_MBOXQ_t *mboxq;
5492 struct lpfc_mbx_get_port_name *get_port_name;
5493 uint32_t shdr_status, shdr_add_status;
5494 union lpfc_sli4_cfg_shdr *shdr;
5495 char cport_name = 0;
5498 /* We assume nothing at this point */
5499 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5500 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5502 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5505 /* obtain link type and link number via READ_CONFIG */
5506 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5507 lpfc_sli4_read_config(phba);
5508 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5509 goto retrieve_ppname;
5511 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5512 rc = lpfc_sli4_get_ctl_attr(phba);
5514 goto out_free_mboxq;
5517 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5518 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5519 sizeof(struct lpfc_mbx_get_port_name) -
5520 sizeof(struct lpfc_sli4_cfg_mhdr),
5521 LPFC_SLI4_MBX_EMBED);
5522 get_port_name = &mboxq->u.mqe.un.get_port_name;
5523 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5524 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5525 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5526 phba->sli4_hba.lnk_info.lnk_tp);
5527 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5528 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5529 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5530 if (shdr_status || shdr_add_status || rc) {
5531 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5532 "3087 Mailbox x%x (x%x/x%x) failed: "
5533 "rc:x%x, status:x%x, add_status:x%x\n",
5534 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5535 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5536 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5537 rc, shdr_status, shdr_add_status);
5539 goto out_free_mboxq;
5541 switch (phba->sli4_hba.lnk_info.lnk_no) {
5542 case LPFC_LINK_NUMBER_0:
5543 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5544 &get_port_name->u.response);
5545 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5547 case LPFC_LINK_NUMBER_1:
5548 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5549 &get_port_name->u.response);
5550 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5552 case LPFC_LINK_NUMBER_2:
5553 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5554 &get_port_name->u.response);
5555 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5557 case LPFC_LINK_NUMBER_3:
5558 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5559 &get_port_name->u.response);
5560 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5566 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5567 phba->Port[0] = cport_name;
5568 phba->Port[1] = '\0';
5569 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5570 "3091 SLI get port name: %s\n", phba->Port);
5574 if (rc != MBX_TIMEOUT) {
5575 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5576 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5578 mempool_free(mboxq, phba->mbox_mem_pool);
5584 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5585 * @phba: pointer to lpfc hba data structure.
5587 * This routine is called to explicitly arm the SLI4 device's completion and
5591 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5594 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5595 struct lpfc_sli4_hdw_queue *qp;
5596 struct lpfc_queue *eq;
5598 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5599 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
5600 if (sli4_hba->nvmels_cq)
5601 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5604 if (sli4_hba->hdwq) {
5605 /* Loop thru all Hardware Queues */
5606 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5607 qp = &sli4_hba->hdwq[qidx];
5608 /* ARM the corresponding CQ */
5609 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
5613 /* Loop thru all IRQ vectors */
5614 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5615 eq = sli4_hba->hba_eq_hdl[qidx].eq;
5616 /* ARM the corresponding EQ */
5617 sli4_hba->sli4_write_eq_db(phba, eq,
5618 0, LPFC_QUEUE_REARM);
5622 if (phba->nvmet_support) {
5623 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5624 sli4_hba->sli4_write_cq_db(phba,
5625 sli4_hba->nvmet_cqset[qidx], 0,
5632 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5633 * @phba: Pointer to HBA context object.
5634 * @type: The resource extent type.
5635 * @extnt_count: buffer to hold port available extent count.
5636 * @extnt_size: buffer to hold element count per extent.
5638 * This function calls the port and retrievs the number of available
5639 * extents and their size for a particular extent type.
5641 * Returns: 0 if successful. Nonzero otherwise.
5644 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5645 uint16_t *extnt_count, uint16_t *extnt_size)
5650 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5653 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5657 /* Find out how many extents are available for this resource type */
5658 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5659 sizeof(struct lpfc_sli4_cfg_mhdr));
5660 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5661 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5662 length, LPFC_SLI4_MBX_EMBED);
5664 /* Send an extents count of 0 - the GET doesn't use it. */
5665 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5666 LPFC_SLI4_MBX_EMBED);
5672 if (!phba->sli4_hba.intr_enable)
5673 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5675 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5676 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5683 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5684 if (bf_get(lpfc_mbox_hdr_status,
5685 &rsrc_info->header.cfg_shdr.response)) {
5686 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5687 "2930 Failed to get resource extents "
5688 "Status 0x%x Add'l Status 0x%x\n",
5689 bf_get(lpfc_mbox_hdr_status,
5690 &rsrc_info->header.cfg_shdr.response),
5691 bf_get(lpfc_mbox_hdr_add_status,
5692 &rsrc_info->header.cfg_shdr.response));
5697 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5699 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5702 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5703 "3162 Retrieved extents type-%d from port: count:%d, "
5704 "size:%d\n", type, *extnt_count, *extnt_size);
5707 mempool_free(mbox, phba->mbox_mem_pool);
5712 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5713 * @phba: Pointer to HBA context object.
5714 * @type: The extent type to check.
5716 * This function reads the current available extents from the port and checks
5717 * if the extent count or extent size has changed since the last access.
5718 * Callers use this routine post port reset to understand if there is a
5719 * extent reprovisioning requirement.
5722 * -Error: error indicates problem.
5723 * 1: Extent count or size has changed.
5727 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5729 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5730 uint16_t size_diff, rsrc_ext_size;
5732 struct lpfc_rsrc_blks *rsrc_entry;
5733 struct list_head *rsrc_blk_list = NULL;
5737 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5744 case LPFC_RSC_TYPE_FCOE_RPI:
5745 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5747 case LPFC_RSC_TYPE_FCOE_VPI:
5748 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5750 case LPFC_RSC_TYPE_FCOE_XRI:
5751 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5753 case LPFC_RSC_TYPE_FCOE_VFI:
5754 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5760 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5762 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5766 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5773 * lpfc_sli4_cfg_post_extnts -
5774 * @phba: Pointer to HBA context object.
5775 * @extnt_cnt - number of available extents.
5776 * @type - the extent type (rpi, xri, vfi, vpi).
5777 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5778 * @mbox - pointer to the caller's allocated mailbox structure.
5780 * This function executes the extents allocation request. It also
5781 * takes care of the amount of memory needed to allocate or get the
5782 * allocated extents. It is the caller's responsibility to evaluate
5786 * -Error: Error value describes the condition found.
5790 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5791 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5796 uint32_t alloc_len, mbox_tmo;
5798 /* Calculate the total requested length of the dma memory */
5799 req_len = extnt_cnt * sizeof(uint16_t);
5802 * Calculate the size of an embedded mailbox. The uint32_t
5803 * accounts for extents-specific word.
5805 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5809 * Presume the allocation and response will fit into an embedded
5810 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5812 *emb = LPFC_SLI4_MBX_EMBED;
5813 if (req_len > emb_len) {
5814 req_len = extnt_cnt * sizeof(uint16_t) +
5815 sizeof(union lpfc_sli4_cfg_shdr) +
5817 *emb = LPFC_SLI4_MBX_NEMBED;
5820 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5821 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5823 if (alloc_len < req_len) {
5824 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5825 "2982 Allocated DMA memory size (x%x) is "
5826 "less than the requested DMA memory "
5827 "size (x%x)\n", alloc_len, req_len);
5830 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5834 if (!phba->sli4_hba.intr_enable)
5835 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5837 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5838 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5847 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5848 * @phba: Pointer to HBA context object.
5849 * @type: The resource extent type to allocate.
5851 * This function allocates the number of elements for the specified
5855 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5858 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5859 uint16_t rsrc_id, rsrc_start, j, k;
5862 unsigned long longs;
5863 unsigned long *bmask;
5864 struct lpfc_rsrc_blks *rsrc_blks;
5867 struct lpfc_id_range *id_array = NULL;
5868 void *virtaddr = NULL;
5869 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5870 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5871 struct list_head *ext_blk_list;
5873 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5879 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5880 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5881 "3009 No available Resource Extents "
5882 "for resource type 0x%x: Count: 0x%x, "
5883 "Size 0x%x\n", type, rsrc_cnt,
5888 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5889 "2903 Post resource extents type-0x%x: "
5890 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5892 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5896 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5903 * Figure out where the response is located. Then get local pointers
5904 * to the response data. The port does not guarantee to respond to
5905 * all extents counts request so update the local variable with the
5906 * allocated count from the port.
5908 if (emb == LPFC_SLI4_MBX_EMBED) {
5909 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5910 id_array = &rsrc_ext->u.rsp.id[0];
5911 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5913 virtaddr = mbox->sge_array->addr[0];
5914 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5915 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5916 id_array = &n_rsrc->id;
5919 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5920 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5923 * Based on the resource size and count, correct the base and max
5926 length = sizeof(struct lpfc_rsrc_blks);
5928 case LPFC_RSC_TYPE_FCOE_RPI:
5929 phba->sli4_hba.rpi_bmask = kcalloc(longs,
5930 sizeof(unsigned long),
5932 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5936 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
5939 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5940 kfree(phba->sli4_hba.rpi_bmask);
5946 * The next_rpi was initialized with the maximum available
5947 * count but the port may allocate a smaller number. Catch
5948 * that case and update the next_rpi.
5950 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5952 /* Initialize local ptrs for common extent processing later. */
5953 bmask = phba->sli4_hba.rpi_bmask;
5954 ids = phba->sli4_hba.rpi_ids;
5955 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5957 case LPFC_RSC_TYPE_FCOE_VPI:
5958 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
5960 if (unlikely(!phba->vpi_bmask)) {
5964 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
5966 if (unlikely(!phba->vpi_ids)) {
5967 kfree(phba->vpi_bmask);
5972 /* Initialize local ptrs for common extent processing later. */
5973 bmask = phba->vpi_bmask;
5974 ids = phba->vpi_ids;
5975 ext_blk_list = &phba->lpfc_vpi_blk_list;
5977 case LPFC_RSC_TYPE_FCOE_XRI:
5978 phba->sli4_hba.xri_bmask = kcalloc(longs,
5979 sizeof(unsigned long),
5981 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5985 phba->sli4_hba.max_cfg_param.xri_used = 0;
5986 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
5989 if (unlikely(!phba->sli4_hba.xri_ids)) {
5990 kfree(phba->sli4_hba.xri_bmask);
5995 /* Initialize local ptrs for common extent processing later. */
5996 bmask = phba->sli4_hba.xri_bmask;
5997 ids = phba->sli4_hba.xri_ids;
5998 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6000 case LPFC_RSC_TYPE_FCOE_VFI:
6001 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6002 sizeof(unsigned long),
6004 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6008 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6011 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6012 kfree(phba->sli4_hba.vfi_bmask);
6017 /* Initialize local ptrs for common extent processing later. */
6018 bmask = phba->sli4_hba.vfi_bmask;
6019 ids = phba->sli4_hba.vfi_ids;
6020 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6023 /* Unsupported Opcode. Fail call. */
6027 ext_blk_list = NULL;
6032 * Complete initializing the extent configuration with the
6033 * allocated ids assigned to this function. The bitmask serves
6034 * as an index into the array and manages the available ids. The
6035 * array just stores the ids communicated to the port via the wqes.
6037 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6039 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6042 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6045 rsrc_blks = kzalloc(length, GFP_KERNEL);
6046 if (unlikely(!rsrc_blks)) {
6052 rsrc_blks->rsrc_start = rsrc_id;
6053 rsrc_blks->rsrc_size = rsrc_size;
6054 list_add_tail(&rsrc_blks->list, ext_blk_list);
6055 rsrc_start = rsrc_id;
6056 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6057 phba->sli4_hba.io_xri_start = rsrc_start +
6058 lpfc_sli4_get_iocb_cnt(phba);
6061 while (rsrc_id < (rsrc_start + rsrc_size)) {
6066 /* Entire word processed. Get next word.*/
6071 lpfc_sli4_mbox_cmd_free(phba, mbox);
6078 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6079 * @phba: Pointer to HBA context object.
6080 * @type: the extent's type.
6082 * This function deallocates all extents of a particular resource type.
6083 * SLI4 does not allow for deallocating a particular extent range. It
6084 * is the caller's responsibility to release all kernel memory resources.
6087 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6090 uint32_t length, mbox_tmo = 0;
6092 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6093 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6095 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6100 * This function sends an embedded mailbox because it only sends the
6101 * the resource type. All extents of this type are released by the
6104 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6105 sizeof(struct lpfc_sli4_cfg_mhdr));
6106 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6107 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6108 length, LPFC_SLI4_MBX_EMBED);
6110 /* Send an extents count of 0 - the dealloc doesn't use it. */
6111 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6112 LPFC_SLI4_MBX_EMBED);
6117 if (!phba->sli4_hba.intr_enable)
6118 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6120 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6121 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6128 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6129 if (bf_get(lpfc_mbox_hdr_status,
6130 &dealloc_rsrc->header.cfg_shdr.response)) {
6131 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6132 "2919 Failed to release resource extents "
6133 "for type %d - Status 0x%x Add'l Status 0x%x. "
6134 "Resource memory not released.\n",
6136 bf_get(lpfc_mbox_hdr_status,
6137 &dealloc_rsrc->header.cfg_shdr.response),
6138 bf_get(lpfc_mbox_hdr_add_status,
6139 &dealloc_rsrc->header.cfg_shdr.response));
6144 /* Release kernel memory resources for the specific type. */
6146 case LPFC_RSC_TYPE_FCOE_VPI:
6147 kfree(phba->vpi_bmask);
6148 kfree(phba->vpi_ids);
6149 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6150 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6151 &phba->lpfc_vpi_blk_list, list) {
6152 list_del_init(&rsrc_blk->list);
6155 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6157 case LPFC_RSC_TYPE_FCOE_XRI:
6158 kfree(phba->sli4_hba.xri_bmask);
6159 kfree(phba->sli4_hba.xri_ids);
6160 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6161 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6162 list_del_init(&rsrc_blk->list);
6166 case LPFC_RSC_TYPE_FCOE_VFI:
6167 kfree(phba->sli4_hba.vfi_bmask);
6168 kfree(phba->sli4_hba.vfi_ids);
6169 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6170 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6171 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6172 list_del_init(&rsrc_blk->list);
6176 case LPFC_RSC_TYPE_FCOE_RPI:
6177 /* RPI bitmask and physical id array are cleaned up earlier. */
6178 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6179 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6180 list_del_init(&rsrc_blk->list);
6188 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6191 mempool_free(mbox, phba->mbox_mem_pool);
6196 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6201 len = sizeof(struct lpfc_mbx_set_feature) -
6202 sizeof(struct lpfc_sli4_cfg_mhdr);
6203 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6204 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6205 LPFC_SLI4_MBX_EMBED);
6208 case LPFC_SET_UE_RECOVERY:
6209 bf_set(lpfc_mbx_set_feature_UER,
6210 &mbox->u.mqe.un.set_feature, 1);
6211 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6212 mbox->u.mqe.un.set_feature.param_len = 8;
6214 case LPFC_SET_MDS_DIAGS:
6215 bf_set(lpfc_mbx_set_feature_mds,
6216 &mbox->u.mqe.un.set_feature, 1);
6217 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6218 &mbox->u.mqe.un.set_feature, 1);
6219 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6220 mbox->u.mqe.un.set_feature.param_len = 8;
6222 case LPFC_SET_DUAL_DUMP:
6223 bf_set(lpfc_mbx_set_feature_dd,
6224 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6225 bf_set(lpfc_mbx_set_feature_ddquery,
6226 &mbox->u.mqe.un.set_feature, 0);
6227 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6228 mbox->u.mqe.un.set_feature.param_len = 4;
6236 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6237 * @phba: Pointer to HBA context object.
6239 * Disable FW logging into host memory on the adapter. To
6240 * be done before reading logs from the host memory.
6243 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6245 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6247 spin_lock_irq(&phba->hbalock);
6248 ras_fwlog->state = INACTIVE;
6249 spin_unlock_irq(&phba->hbalock);
6251 /* Disable FW logging to host memory */
6252 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6253 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6255 /* Wait 10ms for firmware to stop using DMA buffer */
6256 usleep_range(10 * 1000, 20 * 1000);
6260 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6261 * @phba: Pointer to HBA context object.
6263 * This function is called to free memory allocated for RAS FW logging
6264 * support in the driver.
6267 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6269 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6270 struct lpfc_dmabuf *dmabuf, *next;
6272 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6273 list_for_each_entry_safe(dmabuf, next,
6274 &ras_fwlog->fwlog_buff_list,
6276 list_del(&dmabuf->list);
6277 dma_free_coherent(&phba->pcidev->dev,
6278 LPFC_RAS_MAX_ENTRY_SIZE,
6279 dmabuf->virt, dmabuf->phys);
6284 if (ras_fwlog->lwpd.virt) {
6285 dma_free_coherent(&phba->pcidev->dev,
6286 sizeof(uint32_t) * 2,
6287 ras_fwlog->lwpd.virt,
6288 ras_fwlog->lwpd.phys);
6289 ras_fwlog->lwpd.virt = NULL;
6292 spin_lock_irq(&phba->hbalock);
6293 ras_fwlog->state = INACTIVE;
6294 spin_unlock_irq(&phba->hbalock);
6298 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6299 * @phba: Pointer to HBA context object.
6300 * @fwlog_buff_count: Count of buffers to be created.
6302 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6303 * to update FW log is posted to the adapter.
6304 * Buffer count is calculated based on module param ras_fwlog_buffsize
6305 * Size of each buffer posted to FW is 64K.
6309 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6310 uint32_t fwlog_buff_count)
6312 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6313 struct lpfc_dmabuf *dmabuf;
6316 /* Initialize List */
6317 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6319 /* Allocate memory for the LWPD */
6320 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6321 sizeof(uint32_t) * 2,
6322 &ras_fwlog->lwpd.phys,
6324 if (!ras_fwlog->lwpd.virt) {
6325 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6326 "6185 LWPD Memory Alloc Failed\n");
6331 ras_fwlog->fw_buffcount = fwlog_buff_count;
6332 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6333 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6337 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6338 "6186 Memory Alloc failed FW logging");
6342 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6343 LPFC_RAS_MAX_ENTRY_SIZE,
6344 &dmabuf->phys, GFP_KERNEL);
6345 if (!dmabuf->virt) {
6348 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6349 "6187 DMA Alloc Failed FW logging");
6352 dmabuf->buffer_tag = i;
6353 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6358 lpfc_sli4_ras_dma_free(phba);
6364 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6365 * @phba: pointer to lpfc hba data structure.
6366 * @pmboxq: pointer to the driver internal queue element for mailbox command.
6368 * Completion handler for driver's RAS MBX command to the device.
6371 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6374 union lpfc_sli4_cfg_shdr *shdr;
6375 uint32_t shdr_status, shdr_add_status;
6376 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6380 shdr = (union lpfc_sli4_cfg_shdr *)
6381 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6382 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6383 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6385 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6386 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
6387 "6188 FW LOG mailbox "
6388 "completed with status x%x add_status x%x,"
6389 " mbx status x%x\n",
6390 shdr_status, shdr_add_status, mb->mbxStatus);
6392 ras_fwlog->ras_hwsupport = false;
6396 spin_lock_irq(&phba->hbalock);
6397 ras_fwlog->state = ACTIVE;
6398 spin_unlock_irq(&phba->hbalock);
6399 mempool_free(pmb, phba->mbox_mem_pool);
6404 /* Free RAS DMA memory */
6405 lpfc_sli4_ras_dma_free(phba);
6406 mempool_free(pmb, phba->mbox_mem_pool);
6410 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6411 * @phba: pointer to lpfc hba data structure.
6412 * @fwlog_level: Logging verbosity level.
6413 * @fwlog_enable: Enable/Disable logging.
6415 * Initialize memory and post mailbox command to enable FW logging in host
6419 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6420 uint32_t fwlog_level,
6421 uint32_t fwlog_enable)
6423 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6424 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6425 struct lpfc_dmabuf *dmabuf;
6427 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6430 spin_lock_irq(&phba->hbalock);
6431 ras_fwlog->state = INACTIVE;
6432 spin_unlock_irq(&phba->hbalock);
6434 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6435 phba->cfg_ras_fwlog_buffsize);
6436 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6439 * If re-enabling FW logging support use earlier allocated
6440 * DMA buffers while posting MBX command.
6442 if (!ras_fwlog->lwpd.virt) {
6443 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6445 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6446 "6189 FW Log Memory Allocation Failed");
6451 /* Setup Mailbox command */
6452 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6455 "6190 RAS MBX Alloc Failed");
6460 ras_fwlog->fw_loglevel = fwlog_level;
6461 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6462 sizeof(struct lpfc_sli4_cfg_mhdr));
6464 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6465 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6466 len, LPFC_SLI4_MBX_EMBED);
6468 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6469 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6471 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6472 ras_fwlog->fw_loglevel);
6473 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6474 ras_fwlog->fw_buffcount);
6475 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6476 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6478 /* Update DMA buffer address */
6479 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6480 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6482 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6483 putPaddrLow(dmabuf->phys);
6485 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6486 putPaddrHigh(dmabuf->phys);
6489 /* Update LPWD address */
6490 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6491 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6493 spin_lock_irq(&phba->hbalock);
6494 ras_fwlog->state = REG_INPROGRESS;
6495 spin_unlock_irq(&phba->hbalock);
6496 mbox->vport = phba->pport;
6497 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6499 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6501 if (rc == MBX_NOT_FINISHED) {
6502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6503 "6191 FW-Log Mailbox failed. "
6504 "status %d mbxStatus : x%x", rc,
6505 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6506 mempool_free(mbox, phba->mbox_mem_pool);
6513 lpfc_sli4_ras_dma_free(phba);
6519 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6520 * @phba: Pointer to HBA context object.
6522 * Check if RAS is supported on the adapter and initialize it.
6525 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6527 /* Check RAS FW Log needs to be enabled or not */
6528 if (lpfc_check_fwlog_support(phba))
6531 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6532 LPFC_RAS_ENABLE_LOGGING);
6536 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6537 * @phba: Pointer to HBA context object.
6539 * This function allocates all SLI4 resource identifiers.
6542 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6544 int i, rc, error = 0;
6545 uint16_t count, base;
6546 unsigned long longs;
6548 if (!phba->sli4_hba.rpi_hdrs_in_use)
6549 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6550 if (phba->sli4_hba.extents_in_use) {
6552 * The port supports resource extents. The XRI, VPI, VFI, RPI
6553 * resource extent count must be read and allocated before
6554 * provisioning the resource id arrays.
6556 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6557 LPFC_IDX_RSRC_RDY) {
6559 * Extent-based resources are set - the driver could
6560 * be in a port reset. Figure out if any corrective
6561 * actions need to be taken.
6563 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6564 LPFC_RSC_TYPE_FCOE_VFI);
6567 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6568 LPFC_RSC_TYPE_FCOE_VPI);
6571 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6572 LPFC_RSC_TYPE_FCOE_XRI);
6575 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6576 LPFC_RSC_TYPE_FCOE_RPI);
6581 * It's possible that the number of resources
6582 * provided to this port instance changed between
6583 * resets. Detect this condition and reallocate
6584 * resources. Otherwise, there is no action.
6587 lpfc_printf_log(phba, KERN_INFO,
6588 LOG_MBOX | LOG_INIT,
6589 "2931 Detected extent resource "
6590 "change. Reallocating all "
6592 rc = lpfc_sli4_dealloc_extent(phba,
6593 LPFC_RSC_TYPE_FCOE_VFI);
6594 rc = lpfc_sli4_dealloc_extent(phba,
6595 LPFC_RSC_TYPE_FCOE_VPI);
6596 rc = lpfc_sli4_dealloc_extent(phba,
6597 LPFC_RSC_TYPE_FCOE_XRI);
6598 rc = lpfc_sli4_dealloc_extent(phba,
6599 LPFC_RSC_TYPE_FCOE_RPI);
6604 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6608 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6612 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6616 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6619 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6624 * The port does not support resource extents. The XRI, VPI,
6625 * VFI, RPI resource ids were determined from READ_CONFIG.
6626 * Just allocate the bitmasks and provision the resource id
6627 * arrays. If a port reset is active, the resources don't
6628 * need any action - just exit.
6630 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6631 LPFC_IDX_RSRC_RDY) {
6632 lpfc_sli4_dealloc_resource_identifiers(phba);
6633 lpfc_sli4_remove_rpis(phba);
6636 count = phba->sli4_hba.max_cfg_param.max_rpi;
6638 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6639 "3279 Invalid provisioning of "
6644 base = phba->sli4_hba.max_cfg_param.rpi_base;
6645 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6646 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6647 sizeof(unsigned long),
6649 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6653 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6655 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6657 goto free_rpi_bmask;
6660 for (i = 0; i < count; i++)
6661 phba->sli4_hba.rpi_ids[i] = base + i;
6664 count = phba->sli4_hba.max_cfg_param.max_vpi;
6666 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6667 "3280 Invalid provisioning of "
6672 base = phba->sli4_hba.max_cfg_param.vpi_base;
6673 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6674 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6676 if (unlikely(!phba->vpi_bmask)) {
6680 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6682 if (unlikely(!phba->vpi_ids)) {
6684 goto free_vpi_bmask;
6687 for (i = 0; i < count; i++)
6688 phba->vpi_ids[i] = base + i;
6691 count = phba->sli4_hba.max_cfg_param.max_xri;
6693 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6694 "3281 Invalid provisioning of "
6699 base = phba->sli4_hba.max_cfg_param.xri_base;
6700 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6701 phba->sli4_hba.xri_bmask = kcalloc(longs,
6702 sizeof(unsigned long),
6704 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6708 phba->sli4_hba.max_cfg_param.xri_used = 0;
6709 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6711 if (unlikely(!phba->sli4_hba.xri_ids)) {
6713 goto free_xri_bmask;
6716 for (i = 0; i < count; i++)
6717 phba->sli4_hba.xri_ids[i] = base + i;
6720 count = phba->sli4_hba.max_cfg_param.max_vfi;
6722 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6723 "3282 Invalid provisioning of "
6728 base = phba->sli4_hba.max_cfg_param.vfi_base;
6729 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6730 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6731 sizeof(unsigned long),
6733 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6737 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6739 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6741 goto free_vfi_bmask;
6744 for (i = 0; i < count; i++)
6745 phba->sli4_hba.vfi_ids[i] = base + i;
6748 * Mark all resources ready. An HBA reset doesn't need
6749 * to reset the initialization.
6751 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6757 kfree(phba->sli4_hba.vfi_bmask);
6758 phba->sli4_hba.vfi_bmask = NULL;
6760 kfree(phba->sli4_hba.xri_ids);
6761 phba->sli4_hba.xri_ids = NULL;
6763 kfree(phba->sli4_hba.xri_bmask);
6764 phba->sli4_hba.xri_bmask = NULL;
6766 kfree(phba->vpi_ids);
6767 phba->vpi_ids = NULL;
6769 kfree(phba->vpi_bmask);
6770 phba->vpi_bmask = NULL;
6772 kfree(phba->sli4_hba.rpi_ids);
6773 phba->sli4_hba.rpi_ids = NULL;
6775 kfree(phba->sli4_hba.rpi_bmask);
6776 phba->sli4_hba.rpi_bmask = NULL;
6782 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6783 * @phba: Pointer to HBA context object.
6785 * This function allocates the number of elements for the specified
6789 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6791 if (phba->sli4_hba.extents_in_use) {
6792 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6793 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6794 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6795 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6797 kfree(phba->vpi_bmask);
6798 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6799 kfree(phba->vpi_ids);
6800 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6801 kfree(phba->sli4_hba.xri_bmask);
6802 kfree(phba->sli4_hba.xri_ids);
6803 kfree(phba->sli4_hba.vfi_bmask);
6804 kfree(phba->sli4_hba.vfi_ids);
6805 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6806 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6813 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6814 * @phba: Pointer to HBA context object.
6815 * @type: The resource extent type.
6816 * @extnt_count: buffer to hold port extent count response
6817 * @extnt_size: buffer to hold port extent size response.
6819 * This function calls the port to read the host allocated extents
6820 * for a particular type.
6823 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6824 uint16_t *extnt_cnt, uint16_t *extnt_size)
6828 uint16_t curr_blks = 0;
6829 uint32_t req_len, emb_len;
6830 uint32_t alloc_len, mbox_tmo;
6831 struct list_head *blk_list_head;
6832 struct lpfc_rsrc_blks *rsrc_blk;
6834 void *virtaddr = NULL;
6835 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6836 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6837 union lpfc_sli4_cfg_shdr *shdr;
6840 case LPFC_RSC_TYPE_FCOE_VPI:
6841 blk_list_head = &phba->lpfc_vpi_blk_list;
6843 case LPFC_RSC_TYPE_FCOE_XRI:
6844 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6846 case LPFC_RSC_TYPE_FCOE_VFI:
6847 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6849 case LPFC_RSC_TYPE_FCOE_RPI:
6850 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6856 /* Count the number of extents currently allocatd for this type. */
6857 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6858 if (curr_blks == 0) {
6860 * The GET_ALLOCATED mailbox does not return the size,
6861 * just the count. The size should be just the size
6862 * stored in the current allocated block and all sizes
6863 * for an extent type are the same so set the return
6866 *extnt_size = rsrc_blk->rsrc_size;
6872 * Calculate the size of an embedded mailbox. The uint32_t
6873 * accounts for extents-specific word.
6875 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6879 * Presume the allocation and response will fit into an embedded
6880 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6882 emb = LPFC_SLI4_MBX_EMBED;
6884 if (req_len > emb_len) {
6885 req_len = curr_blks * sizeof(uint16_t) +
6886 sizeof(union lpfc_sli4_cfg_shdr) +
6888 emb = LPFC_SLI4_MBX_NEMBED;
6891 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6894 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6896 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6897 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6899 if (alloc_len < req_len) {
6900 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6901 "2983 Allocated DMA memory size (x%x) is "
6902 "less than the requested DMA memory "
6903 "size (x%x)\n", alloc_len, req_len);
6907 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6913 if (!phba->sli4_hba.intr_enable)
6914 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6916 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6917 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6926 * Figure out where the response is located. Then get local pointers
6927 * to the response data. The port does not guarantee to respond to
6928 * all extents counts request so update the local variable with the
6929 * allocated count from the port.
6931 if (emb == LPFC_SLI4_MBX_EMBED) {
6932 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6933 shdr = &rsrc_ext->header.cfg_shdr;
6934 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6936 virtaddr = mbox->sge_array->addr[0];
6937 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6938 shdr = &n_rsrc->cfg_shdr;
6939 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6942 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6943 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6944 "2984 Failed to read allocated resources "
6945 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6947 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6948 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6953 lpfc_sli4_mbox_cmd_free(phba, mbox);
6958 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
6959 * @phba: pointer to lpfc hba data structure.
6960 * @pring: Pointer to driver SLI ring object.
6961 * @sgl_list: linked link of sgl buffers to post
6962 * @cnt: number of linked list buffers
6964 * This routine walks the list of buffers that have been allocated and
6965 * repost them to the port by using SGL block post. This is needed after a
6966 * pci_function_reset/warm_start or start. It attempts to construct blocks
6967 * of buffer sgls which contains contiguous xris and uses the non-embedded
6968 * SGL block post mailbox commands to post them to the port. For single
6969 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6970 * mailbox command for posting.
6972 * Returns: 0 = success, non-zero failure.
6975 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6976 struct list_head *sgl_list, int cnt)
6978 struct lpfc_sglq *sglq_entry = NULL;
6979 struct lpfc_sglq *sglq_entry_next = NULL;
6980 struct lpfc_sglq *sglq_entry_first = NULL;
6981 int status, total_cnt;
6982 int post_cnt = 0, num_posted = 0, block_cnt = 0;
6983 int last_xritag = NO_XRI;
6984 LIST_HEAD(prep_sgl_list);
6985 LIST_HEAD(blck_sgl_list);
6986 LIST_HEAD(allc_sgl_list);
6987 LIST_HEAD(post_sgl_list);
6988 LIST_HEAD(free_sgl_list);
6990 spin_lock_irq(&phba->hbalock);
6991 spin_lock(&phba->sli4_hba.sgl_list_lock);
6992 list_splice_init(sgl_list, &allc_sgl_list);
6993 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6994 spin_unlock_irq(&phba->hbalock);
6997 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6998 &allc_sgl_list, list) {
6999 list_del_init(&sglq_entry->list);
7001 if ((last_xritag != NO_XRI) &&
7002 (sglq_entry->sli4_xritag != last_xritag + 1)) {
7003 /* a hole in xri block, form a sgl posting block */
7004 list_splice_init(&prep_sgl_list, &blck_sgl_list);
7005 post_cnt = block_cnt - 1;
7006 /* prepare list for next posting block */
7007 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7010 /* prepare list for next posting block */
7011 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7012 /* enough sgls for non-embed sgl mbox command */
7013 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7014 list_splice_init(&prep_sgl_list,
7016 post_cnt = block_cnt;
7022 /* keep track of last sgl's xritag */
7023 last_xritag = sglq_entry->sli4_xritag;
7025 /* end of repost sgl list condition for buffers */
7026 if (num_posted == total_cnt) {
7027 if (post_cnt == 0) {
7028 list_splice_init(&prep_sgl_list,
7030 post_cnt = block_cnt;
7031 } else if (block_cnt == 1) {
7032 status = lpfc_sli4_post_sgl(phba,
7033 sglq_entry->phys, 0,
7034 sglq_entry->sli4_xritag);
7036 /* successful, put sgl to posted list */
7037 list_add_tail(&sglq_entry->list,
7040 /* Failure, put sgl to free list */
7041 lpfc_printf_log(phba, KERN_WARNING,
7043 "3159 Failed to post "
7044 "sgl, xritag:x%x\n",
7045 sglq_entry->sli4_xritag);
7046 list_add_tail(&sglq_entry->list,
7053 /* continue until a nembed page worth of sgls */
7057 /* post the buffer list sgls as a block */
7058 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7062 /* success, put sgl list to posted sgl list */
7063 list_splice_init(&blck_sgl_list, &post_sgl_list);
7065 /* Failure, put sgl list to free sgl list */
7066 sglq_entry_first = list_first_entry(&blck_sgl_list,
7069 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7070 "3160 Failed to post sgl-list, "
7072 sglq_entry_first->sli4_xritag,
7073 (sglq_entry_first->sli4_xritag +
7075 list_splice_init(&blck_sgl_list, &free_sgl_list);
7076 total_cnt -= post_cnt;
7079 /* don't reset xirtag due to hole in xri block */
7081 last_xritag = NO_XRI;
7083 /* reset sgl post count for next round of posting */
7087 /* free the sgls failed to post */
7088 lpfc_free_sgl_list(phba, &free_sgl_list);
7090 /* push sgls posted to the available list */
7091 if (!list_empty(&post_sgl_list)) {
7092 spin_lock_irq(&phba->hbalock);
7093 spin_lock(&phba->sli4_hba.sgl_list_lock);
7094 list_splice_init(&post_sgl_list, sgl_list);
7095 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7096 spin_unlock_irq(&phba->hbalock);
7098 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7099 "3161 Failure to post sgl to port.\n");
7103 /* return the number of XRIs actually posted */
7108 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7109 * @phba: pointer to lpfc hba data structure.
7111 * This routine walks the list of nvme buffers that have been allocated and
7112 * repost them to the port by using SGL block post. This is needed after a
7113 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7114 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7115 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7117 * Returns: 0 = success, non-zero failure.
7120 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7122 LIST_HEAD(post_nblist);
7123 int num_posted, rc = 0;
7125 /* get all NVME buffers need to repost to a local list */
7126 lpfc_io_buf_flush(phba, &post_nblist);
7128 /* post the list of nvme buffer sgls to port if available */
7129 if (!list_empty(&post_nblist)) {
7130 num_posted = lpfc_sli4_post_io_sgl_list(
7131 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7132 /* failed to post any nvme buffer, return error */
7133 if (num_posted == 0)
7140 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7144 len = sizeof(struct lpfc_mbx_set_host_data) -
7145 sizeof(struct lpfc_sli4_cfg_mhdr);
7146 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7147 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7148 LPFC_SLI4_MBX_EMBED);
7150 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7151 mbox->u.mqe.un.set_host_data.param_len =
7152 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7153 snprintf(mbox->u.mqe.un.set_host_data.data,
7154 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7155 "Linux %s v"LPFC_DRIVER_VERSION,
7156 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7160 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7161 struct lpfc_queue *drq, int count, int idx)
7164 struct lpfc_rqe hrqe;
7165 struct lpfc_rqe drqe;
7166 struct lpfc_rqb *rqbp;
7167 unsigned long flags;
7168 struct rqb_dmabuf *rqb_buffer;
7169 LIST_HEAD(rqb_buf_list);
7171 spin_lock_irqsave(&phba->hbalock, flags);
7173 for (i = 0; i < count; i++) {
7174 /* IF RQ is already full, don't bother */
7175 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7177 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7180 rqb_buffer->hrq = hrq;
7181 rqb_buffer->drq = drq;
7182 rqb_buffer->idx = idx;
7183 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7185 while (!list_empty(&rqb_buf_list)) {
7186 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7189 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7190 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7191 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7192 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7193 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7195 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7196 "6421 Cannot post to HRQ %d: %x %x %x "
7204 rqbp->rqb_free_buffer(phba, rqb_buffer);
7206 list_add_tail(&rqb_buffer->hbuf.list,
7207 &rqbp->rqb_buffer_list);
7208 rqbp->buffer_count++;
7211 spin_unlock_irqrestore(&phba->hbalock, flags);
7216 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
7217 * @phba: Pointer to HBA context object.
7219 * This function is the main SLI4 device initialization PCI function. This
7220 * function is called by the HBA initialization code, HBA reset code and
7221 * HBA error attention handler code. Caller is not required to hold any
7225 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7227 int rc, i, cnt, len, dd;
7228 LPFC_MBOXQ_t *mboxq;
7229 struct lpfc_mqe *mqe;
7232 uint32_t ftr_rsp = 0;
7233 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7234 struct lpfc_vport *vport = phba->pport;
7235 struct lpfc_dmabuf *mp;
7236 struct lpfc_rqb *rqbp;
7238 /* Perform a PCI function reset to start from clean */
7239 rc = lpfc_pci_function_reset(phba);
7243 /* Check the HBA Host Status Register for readyness */
7244 rc = lpfc_sli4_post_status_check(phba);
7248 spin_lock_irq(&phba->hbalock);
7249 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7250 spin_unlock_irq(&phba->hbalock);
7254 * Allocate a single mailbox container for initializing the
7257 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7261 /* Issue READ_REV to collect vpd and FW information. */
7262 vpd_size = SLI4_PAGE_SIZE;
7263 vpd = kzalloc(vpd_size, GFP_KERNEL);
7269 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7275 mqe = &mboxq->u.mqe;
7276 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7277 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7278 phba->hba_flag |= HBA_FCOE_MODE;
7279 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7281 phba->hba_flag &= ~HBA_FCOE_MODE;
7284 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7286 phba->hba_flag |= HBA_FIP_SUPPORT;
7288 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7290 phba->hba_flag &= ~HBA_IOQ_FLUSH;
7292 if (phba->sli_rev != LPFC_SLI_REV4) {
7293 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7294 "0376 READ_REV Error. SLI Level %d "
7295 "FCoE enabled %d\n",
7296 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7303 * Continue initialization with default values even if driver failed
7304 * to read FCoE param config regions, only read parameters if the
7307 if (phba->hba_flag & HBA_FCOE_MODE &&
7308 lpfc_sli4_read_fcoe_params(phba))
7309 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7310 "2570 Failed to read FCoE parameters\n");
7313 * Retrieve sli4 device physical port name, failure of doing it
7314 * is considered as non-fatal.
7316 rc = lpfc_sli4_retrieve_pport_name(phba);
7318 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7319 "3080 Successful retrieving SLI4 device "
7320 "physical port name: %s.\n", phba->Port);
7322 rc = lpfc_sli4_get_ctl_attr(phba);
7324 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7325 "8351 Successful retrieving SLI4 device "
7329 * Evaluate the read rev and vpd data. Populate the driver
7330 * state with the results. If this routine fails, the failure
7331 * is not fatal as the driver will use generic values.
7333 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7334 if (unlikely(!rc)) {
7335 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7336 "0377 Error %d parsing vpd. "
7337 "Using defaults.\n", rc);
7342 /* Save information as VPD data */
7343 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7344 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7347 * This is because first G7 ASIC doesn't support the standard
7348 * 0x5a NVME cmd descriptor type/subtype
7350 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7351 LPFC_SLI_INTF_IF_TYPE_6) &&
7352 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7353 (phba->vpd.rev.smRev == 0) &&
7354 (phba->cfg_nvme_embed_cmd == 1))
7355 phba->cfg_nvme_embed_cmd = 0;
7357 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7358 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7360 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7362 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7364 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7366 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7367 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7368 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7369 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7370 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7371 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7372 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7373 "(%d):0380 READ_REV Status x%x "
7374 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7375 mboxq->vport ? mboxq->vport->vpi : 0,
7376 bf_get(lpfc_mqe_status, mqe),
7377 phba->vpd.rev.opFwName,
7378 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7379 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7381 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7382 LPFC_SLI_INTF_IF_TYPE_0) {
7383 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7384 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7385 if (rc == MBX_SUCCESS) {
7386 phba->hba_flag |= HBA_RECOVERABLE_UE;
7387 /* Set 1Sec interval to detect UE */
7388 phba->eratt_poll_interval = 1;
7389 phba->sli4_hba.ue_to_sr = bf_get(
7390 lpfc_mbx_set_feature_UESR,
7391 &mboxq->u.mqe.un.set_feature);
7392 phba->sli4_hba.ue_to_rp = bf_get(
7393 lpfc_mbx_set_feature_UERP,
7394 &mboxq->u.mqe.un.set_feature);
7398 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7399 /* Enable MDS Diagnostics only if the SLI Port supports it */
7400 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7401 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7402 if (rc != MBX_SUCCESS)
7403 phba->mds_diags_support = 0;
7407 * Discover the port's supported feature set and match it against the
7410 lpfc_request_features(phba, mboxq);
7411 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7418 * The port must support FCP initiator mode as this is the
7419 * only mode running in the host.
7421 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7422 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7423 "0378 No support for fcpi mode.\n");
7427 /* Performance Hints are ONLY for FCoE */
7428 if (phba->hba_flag & HBA_FCOE_MODE) {
7429 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7430 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7432 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7436 * If the port cannot support the host's requested features
7437 * then turn off the global config parameters to disable the
7438 * feature in the driver. This is not a fatal error.
7440 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7441 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7442 phba->cfg_enable_bg = 0;
7443 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7448 if (phba->max_vpi && phba->cfg_enable_npiv &&
7449 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7453 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7454 "0379 Feature Mismatch Data: x%08x %08x "
7455 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7456 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7457 phba->cfg_enable_npiv, phba->max_vpi);
7458 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7459 phba->cfg_enable_bg = 0;
7460 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7461 phba->cfg_enable_npiv = 0;
7464 /* These SLI3 features are assumed in SLI4 */
7465 spin_lock_irq(&phba->hbalock);
7466 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7467 spin_unlock_irq(&phba->hbalock);
7469 /* Always try to enable dual dump feature if we can */
7470 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
7471 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7472 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
7473 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
7474 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_INIT,
7475 "6448 Dual Dump is enabled\n");
7477 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
7478 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
7480 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7481 lpfc_sli_config_mbox_subsys_get(
7483 lpfc_sli_config_mbox_opcode_get(
7487 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7488 * calls depends on these resources to complete port setup.
7490 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7492 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7493 "2920 Failed to alloc Resource IDs "
7498 lpfc_set_host_data(phba, mboxq);
7500 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7502 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7503 "2134 Failed to set host os driver version %x",
7507 /* Read the port's service parameters. */
7508 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7510 phba->link_state = LPFC_HBA_ERROR;
7515 mboxq->vport = vport;
7516 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7517 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7518 if (rc == MBX_SUCCESS) {
7519 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7524 * This memory was allocated by the lpfc_read_sparam routine. Release
7525 * it to the mbuf pool.
7527 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7529 mboxq->ctx_buf = NULL;
7531 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7532 "0382 READ_SPARAM command failed "
7533 "status %d, mbxStatus x%x\n",
7534 rc, bf_get(lpfc_mqe_status, mqe));
7535 phba->link_state = LPFC_HBA_ERROR;
7540 lpfc_update_vport_wwn(vport);
7542 /* Update the fc_host data structures with new wwn. */
7543 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7544 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7546 /* Create all the SLI4 queues */
7547 rc = lpfc_sli4_queue_create(phba);
7549 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7550 "3089 Failed to allocate queues\n");
7554 /* Set up all the queues to the device */
7555 rc = lpfc_sli4_queue_setup(phba);
7557 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7558 "0381 Error %d during queue setup.\n ", rc);
7559 goto out_stop_timers;
7561 /* Initialize the driver internal SLI layer lists. */
7562 lpfc_sli4_setup(phba);
7563 lpfc_sli4_queue_init(phba);
7565 /* update host els xri-sgl sizes and mappings */
7566 rc = lpfc_sli4_els_sgl_update(phba);
7568 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7569 "1400 Failed to update xri-sgl size and "
7570 "mapping: %d\n", rc);
7571 goto out_destroy_queue;
7574 /* register the els sgl pool to the port */
7575 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7576 phba->sli4_hba.els_xri_cnt);
7577 if (unlikely(rc < 0)) {
7578 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7579 "0582 Error %d during els sgl post "
7582 goto out_destroy_queue;
7584 phba->sli4_hba.els_xri_cnt = rc;
7586 if (phba->nvmet_support) {
7587 /* update host nvmet xri-sgl sizes and mappings */
7588 rc = lpfc_sli4_nvmet_sgl_update(phba);
7590 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7591 "6308 Failed to update nvmet-sgl size "
7592 "and mapping: %d\n", rc);
7593 goto out_destroy_queue;
7596 /* register the nvmet sgl pool to the port */
7597 rc = lpfc_sli4_repost_sgl_list(
7599 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7600 phba->sli4_hba.nvmet_xri_cnt);
7601 if (unlikely(rc < 0)) {
7602 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7603 "3117 Error %d during nvmet "
7606 goto out_destroy_queue;
7608 phba->sli4_hba.nvmet_xri_cnt = rc;
7610 /* We allocate an iocbq for every receive context SGL.
7611 * The additional allocation is for abort and ls handling.
7613 cnt = phba->sli4_hba.nvmet_xri_cnt +
7614 phba->sli4_hba.max_cfg_param.max_xri;
7616 /* update host common xri-sgl sizes and mappings */
7617 rc = lpfc_sli4_io_sgl_update(phba);
7619 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7620 "6082 Failed to update nvme-sgl size "
7621 "and mapping: %d\n", rc);
7622 goto out_destroy_queue;
7625 /* register the allocated common sgl pool to the port */
7626 rc = lpfc_sli4_repost_io_sgl_list(phba);
7628 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7629 "6116 Error %d during nvme sgl post "
7631 /* Some NVME buffers were moved to abort nvme list */
7632 /* A pci function reset will repost them */
7634 goto out_destroy_queue;
7636 /* Each lpfc_io_buf job structure has an iocbq element.
7637 * This cnt provides for abort, els, ct and ls requests.
7639 cnt = phba->sli4_hba.max_cfg_param.max_xri;
7642 if (!phba->sli.iocbq_lookup) {
7643 /* Initialize and populate the iocb list per host */
7644 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7645 "2821 initialize iocb list with %d entries\n",
7647 rc = lpfc_init_iocb_list(phba, cnt);
7649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7650 "1413 Failed to init iocb list.\n");
7651 goto out_destroy_queue;
7655 if (phba->nvmet_support)
7656 lpfc_nvmet_create_targetport(phba);
7658 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7659 /* Post initial buffers to all RQs created */
7660 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7661 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7662 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7663 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7664 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7665 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7666 rqbp->buffer_count = 0;
7668 lpfc_post_rq_buffer(
7669 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7670 phba->sli4_hba.nvmet_mrq_data[i],
7671 phba->cfg_nvmet_mrq_post, i);
7675 /* Post the rpi header region to the device. */
7676 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7678 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7679 "0393 Error %d during rpi post operation\n",
7682 goto out_destroy_queue;
7684 lpfc_sli4_node_prep(phba);
7686 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7687 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7689 * The FC Port needs to register FCFI (index 0)
7691 lpfc_reg_fcfi(phba, mboxq);
7692 mboxq->vport = phba->pport;
7693 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7694 if (rc != MBX_SUCCESS)
7695 goto out_unset_queue;
7697 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7698 &mboxq->u.mqe.un.reg_fcfi);
7700 /* We are a NVME Target mode with MRQ > 1 */
7702 /* First register the FCFI */
7703 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7704 mboxq->vport = phba->pport;
7705 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7706 if (rc != MBX_SUCCESS)
7707 goto out_unset_queue;
7709 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7710 &mboxq->u.mqe.un.reg_fcfi_mrq);
7712 /* Next register the MRQs */
7713 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7714 mboxq->vport = phba->pport;
7715 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7716 if (rc != MBX_SUCCESS)
7717 goto out_unset_queue;
7720 /* Check if the port is configured to be disabled */
7721 lpfc_sli_read_link_ste(phba);
7724 /* Don't post more new bufs if repost already recovered
7727 if (phba->nvmet_support == 0) {
7728 if (phba->sli4_hba.io_xri_cnt == 0) {
7729 len = lpfc_new_io_buf(
7730 phba, phba->sli4_hba.io_xri_max);
7733 goto out_unset_queue;
7736 if (phba->cfg_xri_rebalancing)
7737 lpfc_create_multixri_pools(phba);
7740 phba->cfg_xri_rebalancing = 0;
7743 /* Allow asynchronous mailbox command to go through */
7744 spin_lock_irq(&phba->hbalock);
7745 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7746 spin_unlock_irq(&phba->hbalock);
7748 /* Post receive buffers to the device */
7749 lpfc_sli4_rb_setup(phba);
7751 /* Reset HBA FCF states after HBA reset */
7752 phba->fcf.fcf_flag = 0;
7753 phba->fcf.current_rec.flag = 0;
7755 /* Start the ELS watchdog timer */
7756 mod_timer(&vport->els_tmofunc,
7757 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7759 /* Start heart beat timer */
7760 mod_timer(&phba->hb_tmofunc,
7761 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7762 phba->hb_outstanding = 0;
7763 phba->last_completion_time = jiffies;
7765 /* start eq_delay heartbeat */
7766 if (phba->cfg_auto_imax)
7767 queue_delayed_work(phba->wq, &phba->eq_delay_work,
7768 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7770 /* Start error attention (ERATT) polling timer */
7771 mod_timer(&phba->eratt_poll,
7772 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7774 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7775 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7776 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7778 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7779 "2829 This device supports "
7780 "Advanced Error Reporting (AER)\n");
7781 spin_lock_irq(&phba->hbalock);
7782 phba->hba_flag |= HBA_AER_ENABLED;
7783 spin_unlock_irq(&phba->hbalock);
7785 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7786 "2830 This device does not support "
7787 "Advanced Error Reporting (AER)\n");
7788 phba->cfg_aer_support = 0;
7794 * The port is ready, set the host's link state to LINK_DOWN
7795 * in preparation for link interrupts.
7797 spin_lock_irq(&phba->hbalock);
7798 phba->link_state = LPFC_LINK_DOWN;
7800 /* Check if physical ports are trunked */
7801 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7802 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7803 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7804 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7805 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7806 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7807 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7808 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
7809 spin_unlock_irq(&phba->hbalock);
7811 /* Arm the CQs and then EQs on device */
7812 lpfc_sli4_arm_cqeq_intr(phba);
7814 /* Indicate device interrupt mode */
7815 phba->sli4_hba.intr_enable = 1;
7817 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7818 (phba->hba_flag & LINK_DISABLED)) {
7819 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7820 "3103 Adapter Link is disabled.\n");
7821 lpfc_down_link(phba, mboxq);
7822 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7823 if (rc != MBX_SUCCESS) {
7824 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7825 "3104 Adapter failed to issue "
7826 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7827 goto out_io_buff_free;
7829 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7830 /* don't perform init_link on SLI4 FC port loopback test */
7831 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7832 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7834 goto out_io_buff_free;
7837 mempool_free(mboxq, phba->mbox_mem_pool);
7840 /* Free allocated IO Buffers */
7843 /* Unset all the queues set up in this routine when error out */
7844 lpfc_sli4_queue_unset(phba);
7846 lpfc_free_iocb_list(phba);
7847 lpfc_sli4_queue_destroy(phba);
7849 lpfc_stop_hba_timers(phba);
7851 mempool_free(mboxq, phba->mbox_mem_pool);
7856 * lpfc_mbox_timeout - Timeout call back function for mbox timer
7857 * @ptr: context object - pointer to hba structure.
7859 * This is the callback function for mailbox timer. The mailbox
7860 * timer is armed when a new mailbox command is issued and the timer
7861 * is deleted when the mailbox complete. The function is called by
7862 * the kernel timer code when a mailbox does not complete within
7863 * expected time. This function wakes up the worker thread to
7864 * process the mailbox timeout and returns. All the processing is
7865 * done by the worker thread function lpfc_mbox_timeout_handler.
7868 lpfc_mbox_timeout(struct timer_list *t)
7870 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
7871 unsigned long iflag;
7872 uint32_t tmo_posted;
7874 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7875 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7877 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7878 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7881 lpfc_worker_wake_up(phba);
7886 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7888 * @phba: Pointer to HBA context object.
7890 * This function checks if any mailbox completions are present on the mailbox
7894 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7898 struct lpfc_queue *mcq;
7899 struct lpfc_mcqe *mcqe;
7900 bool pending_completions = false;
7903 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7906 /* Check for completions on mailbox completion queue */
7908 mcq = phba->sli4_hba.mbx_cq;
7909 idx = mcq->hba_index;
7910 qe_valid = mcq->qe_valid;
7911 while (bf_get_le32(lpfc_cqe_valid,
7912 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
7913 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
7914 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7915 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7916 pending_completions = true;
7919 idx = (idx + 1) % mcq->entry_count;
7920 if (mcq->hba_index == idx)
7923 /* if the index wrapped around, toggle the valid bit */
7924 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7925 qe_valid = (qe_valid) ? 0 : 1;
7927 return pending_completions;
7932 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7934 * @phba: Pointer to HBA context object.
7936 * For sli4, it is possible to miss an interrupt. As such mbox completions
7937 * maybe missed causing erroneous mailbox timeouts to occur. This function
7938 * checks to see if mbox completions are on the mailbox completion queue
7939 * and will process all the completions associated with the eq for the
7940 * mailbox completion queue.
7943 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7945 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
7947 struct lpfc_queue *fpeq = NULL;
7948 struct lpfc_queue *eq;
7951 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7954 /* Find the EQ associated with the mbox CQ */
7955 if (sli4_hba->hdwq) {
7956 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
7957 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
7958 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
7967 /* Turn off interrupts from this EQ */
7969 sli4_hba->sli4_eq_clr_intr(fpeq);
7971 /* Check to see if a mbox completion is pending */
7973 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7976 * If a mbox completion is pending, process all the events on EQ
7977 * associated with the mbox completion queue (this could include
7978 * mailbox commands, async events, els commands, receive queue data
7983 /* process and rearm the EQ */
7984 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
7986 /* Always clear and re-arm the EQ */
7987 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
7989 return mbox_pending;
7994 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7995 * @phba: Pointer to HBA context object.
7997 * This function is called from worker thread when a mailbox command times out.
7998 * The caller is not required to hold any locks. This function will reset the
7999 * HBA and recover all the pending commands.
8002 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
8004 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
8005 MAILBOX_t *mb = NULL;
8007 struct lpfc_sli *psli = &phba->sli;
8009 /* If the mailbox completed, process the completion and return */
8010 if (lpfc_sli4_process_missed_mbox_completions(phba))
8015 /* Check the pmbox pointer first. There is a race condition
8016 * between the mbox timeout handler getting executed in the
8017 * worklist and the mailbox actually completing. When this
8018 * race condition occurs, the mbox_active will be NULL.
8020 spin_lock_irq(&phba->hbalock);
8021 if (pmbox == NULL) {
8022 lpfc_printf_log(phba, KERN_WARNING,
8024 "0353 Active Mailbox cleared - mailbox timeout "
8026 spin_unlock_irq(&phba->hbalock);
8030 /* Mbox cmd <mbxCommand> timeout */
8031 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8032 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
8034 phba->pport->port_state,
8036 phba->sli.mbox_active);
8037 spin_unlock_irq(&phba->hbalock);
8039 /* Setting state unknown so lpfc_sli_abort_iocb_ring
8040 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
8041 * it to fail all outstanding SCSI IO.
8043 spin_lock_irq(&phba->pport->work_port_lock);
8044 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8045 spin_unlock_irq(&phba->pport->work_port_lock);
8046 spin_lock_irq(&phba->hbalock);
8047 phba->link_state = LPFC_LINK_UNKNOWN;
8048 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8049 spin_unlock_irq(&phba->hbalock);
8051 lpfc_sli_abort_fcp_rings(phba);
8053 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8054 "0345 Resetting board due to mailbox timeout\n");
8056 /* Reset the HBA device */
8057 lpfc_reset_hba(phba);
8061 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
8062 * @phba: Pointer to HBA context object.
8063 * @pmbox: Pointer to mailbox object.
8064 * @flag: Flag indicating how the mailbox need to be processed.
8066 * This function is called by discovery code and HBA management code
8067 * to submit a mailbox command to firmware with SLI-3 interface spec. This
8068 * function gets the hbalock to protect the data structures.
8069 * The mailbox command can be submitted in polling mode, in which case
8070 * this function will wait in a polling loop for the completion of the
8072 * If the mailbox is submitted in no_wait mode (not polling) the
8073 * function will submit the command and returns immediately without waiting
8074 * for the mailbox completion. The no_wait is supported only when HBA
8075 * is in SLI2/SLI3 mode - interrupts are enabled.
8076 * The SLI interface allows only one mailbox pending at a time. If the
8077 * mailbox is issued in polling mode and there is already a mailbox
8078 * pending, then the function will return an error. If the mailbox is issued
8079 * in NO_WAIT mode and there is a mailbox pending already, the function
8080 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
8081 * The sli layer owns the mailbox object until the completion of mailbox
8082 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
8083 * return codes the caller owns the mailbox command after the return of
8087 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8091 struct lpfc_sli *psli = &phba->sli;
8092 uint32_t status, evtctr;
8093 uint32_t ha_copy, hc_copy;
8095 unsigned long timeout;
8096 unsigned long drvr_flag = 0;
8097 uint32_t word0, ldata;
8098 void __iomem *to_slim;
8099 int processing_queue = 0;
8101 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8103 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8104 /* processing mbox queue from intr_handler */
8105 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8106 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8109 processing_queue = 1;
8110 pmbox = lpfc_mbox_get(phba);
8112 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8117 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8118 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8120 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8121 lpfc_printf_log(phba, KERN_ERR,
8122 LOG_MBOX | LOG_VPORT,
8123 "1806 Mbox x%x failed. No vport\n",
8124 pmbox->u.mb.mbxCommand);
8126 goto out_not_finished;
8130 /* If the PCI channel is in offline state, do not post mbox. */
8131 if (unlikely(pci_channel_offline(phba->pcidev))) {
8132 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8133 goto out_not_finished;
8136 /* If HBA has a deferred error attention, fail the iocb. */
8137 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8138 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8139 goto out_not_finished;
8145 status = MBX_SUCCESS;
8147 if (phba->link_state == LPFC_HBA_ERROR) {
8148 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8150 /* Mbox command <mbxCommand> cannot issue */
8151 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8152 "(%d):0311 Mailbox command x%x cannot "
8153 "issue Data: x%x x%x\n",
8154 pmbox->vport ? pmbox->vport->vpi : 0,
8155 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8156 goto out_not_finished;
8159 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8160 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8161 !(hc_copy & HC_MBINT_ENA)) {
8162 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8163 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8164 "(%d):2528 Mailbox command x%x cannot "
8165 "issue Data: x%x x%x\n",
8166 pmbox->vport ? pmbox->vport->vpi : 0,
8167 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8168 goto out_not_finished;
8172 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8173 /* Polling for a mbox command when another one is already active
8174 * is not allowed in SLI. Also, the driver must have established
8175 * SLI2 mode to queue and process multiple mbox commands.
8178 if (flag & MBX_POLL) {
8179 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8181 /* Mbox command <mbxCommand> cannot issue */
8182 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8183 "(%d):2529 Mailbox command x%x "
8184 "cannot issue Data: x%x x%x\n",
8185 pmbox->vport ? pmbox->vport->vpi : 0,
8186 pmbox->u.mb.mbxCommand,
8187 psli->sli_flag, flag);
8188 goto out_not_finished;
8191 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8192 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8193 /* Mbox command <mbxCommand> cannot issue */
8194 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8195 "(%d):2530 Mailbox command x%x "
8196 "cannot issue Data: x%x x%x\n",
8197 pmbox->vport ? pmbox->vport->vpi : 0,
8198 pmbox->u.mb.mbxCommand,
8199 psli->sli_flag, flag);
8200 goto out_not_finished;
8203 /* Another mailbox command is still being processed, queue this
8204 * command to be processed later.
8206 lpfc_mbox_put(phba, pmbox);
8208 /* Mbox cmd issue - BUSY */
8209 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8210 "(%d):0308 Mbox cmd issue - BUSY Data: "
8211 "x%x x%x x%x x%x\n",
8212 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8214 phba->pport ? phba->pport->port_state : 0xff,
8215 psli->sli_flag, flag);
8217 psli->slistat.mbox_busy++;
8218 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8221 lpfc_debugfs_disc_trc(pmbox->vport,
8222 LPFC_DISC_TRC_MBOX_VPORT,
8223 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
8224 (uint32_t)mbx->mbxCommand,
8225 mbx->un.varWords[0], mbx->un.varWords[1]);
8228 lpfc_debugfs_disc_trc(phba->pport,
8230 "MBOX Bsy: cmd:x%x mb:x%x x%x",
8231 (uint32_t)mbx->mbxCommand,
8232 mbx->un.varWords[0], mbx->un.varWords[1]);
8238 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8240 /* If we are not polling, we MUST be in SLI2 mode */
8241 if (flag != MBX_POLL) {
8242 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8243 (mbx->mbxCommand != MBX_KILL_BOARD)) {
8244 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8245 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8246 /* Mbox command <mbxCommand> cannot issue */
8247 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8248 "(%d):2531 Mailbox command x%x "
8249 "cannot issue Data: x%x x%x\n",
8250 pmbox->vport ? pmbox->vport->vpi : 0,
8251 pmbox->u.mb.mbxCommand,
8252 psli->sli_flag, flag);
8253 goto out_not_finished;
8255 /* timeout active mbox command */
8256 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8258 mod_timer(&psli->mbox_tmo, jiffies + timeout);
8261 /* Mailbox cmd <cmd> issue */
8262 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8263 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8265 pmbox->vport ? pmbox->vport->vpi : 0,
8267 phba->pport ? phba->pport->port_state : 0xff,
8268 psli->sli_flag, flag);
8270 if (mbx->mbxCommand != MBX_HEARTBEAT) {
8272 lpfc_debugfs_disc_trc(pmbox->vport,
8273 LPFC_DISC_TRC_MBOX_VPORT,
8274 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8275 (uint32_t)mbx->mbxCommand,
8276 mbx->un.varWords[0], mbx->un.varWords[1]);
8279 lpfc_debugfs_disc_trc(phba->pport,
8281 "MBOX Send: cmd:x%x mb:x%x x%x",
8282 (uint32_t)mbx->mbxCommand,
8283 mbx->un.varWords[0], mbx->un.varWords[1]);
8287 psli->slistat.mbox_cmd++;
8288 evtctr = psli->slistat.mbox_event;
8290 /* next set own bit for the adapter and copy over command word */
8291 mbx->mbxOwner = OWN_CHIP;
8293 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8294 /* Populate mbox extension offset word. */
8295 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8296 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8297 = (uint8_t *)phba->mbox_ext
8298 - (uint8_t *)phba->mbox;
8301 /* Copy the mailbox extension data */
8302 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8303 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8304 (uint8_t *)phba->mbox_ext,
8305 pmbox->in_ext_byte_len);
8307 /* Copy command data to host SLIM area */
8308 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8310 /* Populate mbox extension offset word. */
8311 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8312 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8313 = MAILBOX_HBA_EXT_OFFSET;
8315 /* Copy the mailbox extension data */
8316 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8317 lpfc_memcpy_to_slim(phba->MBslimaddr +
8318 MAILBOX_HBA_EXT_OFFSET,
8319 pmbox->ctx_buf, pmbox->in_ext_byte_len);
8321 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8322 /* copy command data into host mbox for cmpl */
8323 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8326 /* First copy mbox command data to HBA SLIM, skip past first
8328 to_slim = phba->MBslimaddr + sizeof (uint32_t);
8329 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8330 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8332 /* Next copy over first word, with mbxOwner set */
8333 ldata = *((uint32_t *)mbx);
8334 to_slim = phba->MBslimaddr;
8335 writel(ldata, to_slim);
8336 readl(to_slim); /* flush */
8338 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8339 /* switch over to host mailbox */
8340 psli->sli_flag |= LPFC_SLI_ACTIVE;
8347 /* Set up reference to mailbox command */
8348 psli->mbox_active = pmbox;
8349 /* Interrupt board to do it */
8350 writel(CA_MBATT, phba->CAregaddr);
8351 readl(phba->CAregaddr); /* flush */
8352 /* Don't wait for it to finish, just return */
8356 /* Set up null reference to mailbox command */
8357 psli->mbox_active = NULL;
8358 /* Interrupt board to do it */
8359 writel(CA_MBATT, phba->CAregaddr);
8360 readl(phba->CAregaddr); /* flush */
8362 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8363 /* First read mbox status word */
8364 word0 = *((uint32_t *)phba->mbox);
8365 word0 = le32_to_cpu(word0);
8367 /* First read mbox status word */
8368 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8369 spin_unlock_irqrestore(&phba->hbalock,
8371 goto out_not_finished;
8375 /* Read the HBA Host Attention Register */
8376 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8377 spin_unlock_irqrestore(&phba->hbalock,
8379 goto out_not_finished;
8381 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8384 /* Wait for command to complete */
8385 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8386 (!(ha_copy & HA_MBATT) &&
8387 (phba->link_state > LPFC_WARM_START))) {
8388 if (time_after(jiffies, timeout)) {
8389 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8390 spin_unlock_irqrestore(&phba->hbalock,
8392 goto out_not_finished;
8395 /* Check if we took a mbox interrupt while we were
8397 if (((word0 & OWN_CHIP) != OWN_CHIP)
8398 && (evtctr != psli->slistat.mbox_event))
8402 spin_unlock_irqrestore(&phba->hbalock,
8405 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8408 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8409 /* First copy command data */
8410 word0 = *((uint32_t *)phba->mbox);
8411 word0 = le32_to_cpu(word0);
8412 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8415 /* Check real SLIM for any errors */
8416 slimword0 = readl(phba->MBslimaddr);
8417 slimmb = (MAILBOX_t *) & slimword0;
8418 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8419 && slimmb->mbxStatus) {
8426 /* First copy command data */
8427 word0 = readl(phba->MBslimaddr);
8429 /* Read the HBA Host Attention Register */
8430 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8431 spin_unlock_irqrestore(&phba->hbalock,
8433 goto out_not_finished;
8437 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8438 /* copy results back to user */
8439 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8441 /* Copy the mailbox extension data */
8442 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8443 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8445 pmbox->out_ext_byte_len);
8448 /* First copy command data */
8449 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8451 /* Copy the mailbox extension data */
8452 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8453 lpfc_memcpy_from_slim(
8456 MAILBOX_HBA_EXT_OFFSET,
8457 pmbox->out_ext_byte_len);
8461 writel(HA_MBATT, phba->HAregaddr);
8462 readl(phba->HAregaddr); /* flush */
8464 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8465 status = mbx->mbxStatus;
8468 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8472 if (processing_queue) {
8473 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8474 lpfc_mbox_cmpl_put(phba, pmbox);
8476 return MBX_NOT_FINISHED;
8480 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8481 * @phba: Pointer to HBA context object.
8483 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8484 * the driver internal pending mailbox queue. It will then try to wait out the
8485 * possible outstanding mailbox command before return.
8488 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8489 * the outstanding mailbox command timed out.
8492 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8494 struct lpfc_sli *psli = &phba->sli;
8496 unsigned long timeout = 0;
8498 /* Mark the asynchronous mailbox command posting as blocked */
8499 spin_lock_irq(&phba->hbalock);
8500 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8501 /* Determine how long we might wait for the active mailbox
8502 * command to be gracefully completed by firmware.
8504 if (phba->sli.mbox_active)
8505 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8506 phba->sli.mbox_active) *
8508 spin_unlock_irq(&phba->hbalock);
8510 /* Make sure the mailbox is really active */
8512 lpfc_sli4_process_missed_mbox_completions(phba);
8514 /* Wait for the outstnading mailbox command to complete */
8515 while (phba->sli.mbox_active) {
8516 /* Check active mailbox complete status every 2ms */
8518 if (time_after(jiffies, timeout)) {
8519 /* Timeout, marked the outstanding cmd not complete */
8525 /* Can not cleanly block async mailbox command, fails it */
8527 spin_lock_irq(&phba->hbalock);
8528 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8529 spin_unlock_irq(&phba->hbalock);
8535 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8536 * @phba: Pointer to HBA context object.
8538 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8539 * commands from the driver internal pending mailbox queue. It makes sure
8540 * that there is no outstanding mailbox command before resuming posting
8541 * asynchronous mailbox commands. If, for any reason, there is outstanding
8542 * mailbox command, it will try to wait it out before resuming asynchronous
8543 * mailbox command posting.
8546 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8548 struct lpfc_sli *psli = &phba->sli;
8550 spin_lock_irq(&phba->hbalock);
8551 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8552 /* Asynchronous mailbox posting is not blocked, do nothing */
8553 spin_unlock_irq(&phba->hbalock);
8557 /* Outstanding synchronous mailbox command is guaranteed to be done,
8558 * successful or timeout, after timing-out the outstanding mailbox
8559 * command shall always be removed, so just unblock posting async
8560 * mailbox command and resume
8562 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8563 spin_unlock_irq(&phba->hbalock);
8565 /* wake up worker thread to post asynchronous mailbox command */
8566 lpfc_worker_wake_up(phba);
8570 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8571 * @phba: Pointer to HBA context object.
8572 * @mboxq: Pointer to mailbox object.
8574 * The function waits for the bootstrap mailbox register ready bit from
8575 * port for twice the regular mailbox command timeout value.
8577 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8578 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8581 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8584 unsigned long timeout;
8585 struct lpfc_register bmbx_reg;
8587 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8591 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8592 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8596 if (time_after(jiffies, timeout))
8597 return MBXERR_ERROR;
8598 } while (!db_ready);
8604 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8605 * @phba: Pointer to HBA context object.
8606 * @mboxq: Pointer to mailbox object.
8608 * The function posts a mailbox to the port. The mailbox is expected
8609 * to be comletely filled in and ready for the port to operate on it.
8610 * This routine executes a synchronous completion operation on the
8611 * mailbox by polling for its completion.
8613 * The caller must not be holding any locks when calling this routine.
8616 * MBX_SUCCESS - mailbox posted successfully
8617 * Any of the MBX error values.
8620 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8622 int rc = MBX_SUCCESS;
8623 unsigned long iflag;
8624 uint32_t mcqe_status;
8626 struct lpfc_sli *psli = &phba->sli;
8627 struct lpfc_mqe *mb = &mboxq->u.mqe;
8628 struct lpfc_bmbx_create *mbox_rgn;
8629 struct dma_address *dma_address;
8632 * Only one mailbox can be active to the bootstrap mailbox region
8633 * at a time and there is no queueing provided.
8635 spin_lock_irqsave(&phba->hbalock, iflag);
8636 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8637 spin_unlock_irqrestore(&phba->hbalock, iflag);
8638 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8639 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8640 "cannot issue Data: x%x x%x\n",
8641 mboxq->vport ? mboxq->vport->vpi : 0,
8642 mboxq->u.mb.mbxCommand,
8643 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8644 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8645 psli->sli_flag, MBX_POLL);
8646 return MBXERR_ERROR;
8648 /* The server grabs the token and owns it until release */
8649 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8650 phba->sli.mbox_active = mboxq;
8651 spin_unlock_irqrestore(&phba->hbalock, iflag);
8653 /* wait for bootstrap mbox register for readyness */
8654 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8658 * Initialize the bootstrap memory region to avoid stale data areas
8659 * in the mailbox post. Then copy the caller's mailbox contents to
8660 * the bmbx mailbox region.
8662 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8663 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8664 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8665 sizeof(struct lpfc_mqe));
8667 /* Post the high mailbox dma address to the port and wait for ready. */
8668 dma_address = &phba->sli4_hba.bmbx.dma_address;
8669 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8671 /* wait for bootstrap mbox register for hi-address write done */
8672 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8676 /* Post the low mailbox dma address to the port. */
8677 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8679 /* wait for bootstrap mbox register for low address write done */
8680 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8685 * Read the CQ to ensure the mailbox has completed.
8686 * If so, update the mailbox status so that the upper layers
8687 * can complete the request normally.
8689 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8690 sizeof(struct lpfc_mqe));
8691 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8692 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8693 sizeof(struct lpfc_mcqe));
8694 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8696 * When the CQE status indicates a failure and the mailbox status
8697 * indicates success then copy the CQE status into the mailbox status
8698 * (and prefix it with x4000).
8700 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8701 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8702 bf_set(lpfc_mqe_status, mb,
8703 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8706 lpfc_sli4_swap_str(phba, mboxq);
8708 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8709 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8710 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8711 " x%x x%x CQ: x%x x%x x%x x%x\n",
8712 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8713 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8714 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8715 bf_get(lpfc_mqe_status, mb),
8716 mb->un.mb_words[0], mb->un.mb_words[1],
8717 mb->un.mb_words[2], mb->un.mb_words[3],
8718 mb->un.mb_words[4], mb->un.mb_words[5],
8719 mb->un.mb_words[6], mb->un.mb_words[7],
8720 mb->un.mb_words[8], mb->un.mb_words[9],
8721 mb->un.mb_words[10], mb->un.mb_words[11],
8722 mb->un.mb_words[12], mboxq->mcqe.word0,
8723 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8724 mboxq->mcqe.trailer);
8726 /* We are holding the token, no needed for lock when release */
8727 spin_lock_irqsave(&phba->hbalock, iflag);
8728 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8729 phba->sli.mbox_active = NULL;
8730 spin_unlock_irqrestore(&phba->hbalock, iflag);
8735 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8736 * @phba: Pointer to HBA context object.
8737 * @pmbox: Pointer to mailbox object.
8738 * @flag: Flag indicating how the mailbox need to be processed.
8740 * This function is called by discovery code and HBA management code to submit
8741 * a mailbox command to firmware with SLI-4 interface spec.
8743 * Return codes the caller owns the mailbox command after the return of the
8747 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8750 struct lpfc_sli *psli = &phba->sli;
8751 unsigned long iflags;
8754 /* dump from issue mailbox command if setup */
8755 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8757 rc = lpfc_mbox_dev_check(phba);
8759 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8760 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8761 "cannot issue Data: x%x x%x\n",
8762 mboxq->vport ? mboxq->vport->vpi : 0,
8763 mboxq->u.mb.mbxCommand,
8764 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8765 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8766 psli->sli_flag, flag);
8767 goto out_not_finished;
8770 /* Detect polling mode and jump to a handler */
8771 if (!phba->sli4_hba.intr_enable) {
8772 if (flag == MBX_POLL)
8773 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8776 if (rc != MBX_SUCCESS)
8777 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8778 "(%d):2541 Mailbox command x%x "
8779 "(x%x/x%x) failure: "
8780 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8782 mboxq->vport ? mboxq->vport->vpi : 0,
8783 mboxq->u.mb.mbxCommand,
8784 lpfc_sli_config_mbox_subsys_get(phba,
8786 lpfc_sli_config_mbox_opcode_get(phba,
8788 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8789 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8790 bf_get(lpfc_mcqe_ext_status,
8792 psli->sli_flag, flag);
8794 } else if (flag == MBX_POLL) {
8795 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8796 "(%d):2542 Try to issue mailbox command "
8797 "x%x (x%x/x%x) synchronously ahead of async "
8798 "mailbox command queue: x%x x%x\n",
8799 mboxq->vport ? mboxq->vport->vpi : 0,
8800 mboxq->u.mb.mbxCommand,
8801 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8802 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8803 psli->sli_flag, flag);
8804 /* Try to block the asynchronous mailbox posting */
8805 rc = lpfc_sli4_async_mbox_block(phba);
8807 /* Successfully blocked, now issue sync mbox cmd */
8808 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8809 if (rc != MBX_SUCCESS)
8810 lpfc_printf_log(phba, KERN_WARNING,
8812 "(%d):2597 Sync Mailbox command "
8813 "x%x (x%x/x%x) failure: "
8814 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8816 mboxq->vport ? mboxq->vport->vpi : 0,
8817 mboxq->u.mb.mbxCommand,
8818 lpfc_sli_config_mbox_subsys_get(phba,
8820 lpfc_sli_config_mbox_opcode_get(phba,
8822 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8823 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8824 bf_get(lpfc_mcqe_ext_status,
8826 psli->sli_flag, flag);
8827 /* Unblock the async mailbox posting afterward */
8828 lpfc_sli4_async_mbox_unblock(phba);
8833 /* Now, interrupt mode asynchronous mailbox command */
8834 rc = lpfc_mbox_cmd_check(phba, mboxq);
8836 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8837 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8838 "cannot issue Data: x%x x%x\n",
8839 mboxq->vport ? mboxq->vport->vpi : 0,
8840 mboxq->u.mb.mbxCommand,
8841 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8842 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8843 psli->sli_flag, flag);
8844 goto out_not_finished;
8847 /* Put the mailbox command to the driver internal FIFO */
8848 psli->slistat.mbox_busy++;
8849 spin_lock_irqsave(&phba->hbalock, iflags);
8850 lpfc_mbox_put(phba, mboxq);
8851 spin_unlock_irqrestore(&phba->hbalock, iflags);
8852 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8853 "(%d):0354 Mbox cmd issue - Enqueue Data: "
8854 "x%x (x%x/x%x) x%x x%x x%x\n",
8855 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8856 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8857 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8858 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8859 phba->pport->port_state,
8860 psli->sli_flag, MBX_NOWAIT);
8861 /* Wake up worker thread to transport mailbox command from head */
8862 lpfc_worker_wake_up(phba);
8867 return MBX_NOT_FINISHED;
8871 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8872 * @phba: Pointer to HBA context object.
8874 * This function is called by worker thread to send a mailbox command to
8875 * SLI4 HBA firmware.
8879 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8881 struct lpfc_sli *psli = &phba->sli;
8882 LPFC_MBOXQ_t *mboxq;
8883 int rc = MBX_SUCCESS;
8884 unsigned long iflags;
8885 struct lpfc_mqe *mqe;
8888 /* Check interrupt mode before post async mailbox command */
8889 if (unlikely(!phba->sli4_hba.intr_enable))
8890 return MBX_NOT_FINISHED;
8892 /* Check for mailbox command service token */
8893 spin_lock_irqsave(&phba->hbalock, iflags);
8894 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8895 spin_unlock_irqrestore(&phba->hbalock, iflags);
8896 return MBX_NOT_FINISHED;
8898 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8899 spin_unlock_irqrestore(&phba->hbalock, iflags);
8900 return MBX_NOT_FINISHED;
8902 if (unlikely(phba->sli.mbox_active)) {
8903 spin_unlock_irqrestore(&phba->hbalock, iflags);
8904 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8905 "0384 There is pending active mailbox cmd\n");
8906 return MBX_NOT_FINISHED;
8908 /* Take the mailbox command service token */
8909 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8911 /* Get the next mailbox command from head of queue */
8912 mboxq = lpfc_mbox_get(phba);
8914 /* If no more mailbox command waiting for post, we're done */
8916 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8917 spin_unlock_irqrestore(&phba->hbalock, iflags);
8920 phba->sli.mbox_active = mboxq;
8921 spin_unlock_irqrestore(&phba->hbalock, iflags);
8923 /* Check device readiness for posting mailbox command */
8924 rc = lpfc_mbox_dev_check(phba);
8926 /* Driver clean routine will clean up pending mailbox */
8927 goto out_not_finished;
8929 /* Prepare the mbox command to be posted */
8930 mqe = &mboxq->u.mqe;
8931 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8933 /* Start timer for the mbox_tmo and log some mailbox post messages */
8934 mod_timer(&psli->mbox_tmo, (jiffies +
8935 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
8937 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8938 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
8940 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8941 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8942 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8943 phba->pport->port_state, psli->sli_flag);
8945 if (mbx_cmnd != MBX_HEARTBEAT) {
8947 lpfc_debugfs_disc_trc(mboxq->vport,
8948 LPFC_DISC_TRC_MBOX_VPORT,
8949 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8950 mbx_cmnd, mqe->un.mb_words[0],
8951 mqe->un.mb_words[1]);
8953 lpfc_debugfs_disc_trc(phba->pport,
8955 "MBOX Send: cmd:x%x mb:x%x x%x",
8956 mbx_cmnd, mqe->un.mb_words[0],
8957 mqe->un.mb_words[1]);
8960 psli->slistat.mbox_cmd++;
8962 /* Post the mailbox command to the port */
8963 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8964 if (rc != MBX_SUCCESS) {
8965 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8966 "(%d):2533 Mailbox command x%x (x%x/x%x) "
8967 "cannot issue Data: x%x x%x\n",
8968 mboxq->vport ? mboxq->vport->vpi : 0,
8969 mboxq->u.mb.mbxCommand,
8970 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8971 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8972 psli->sli_flag, MBX_NOWAIT);
8973 goto out_not_finished;
8979 spin_lock_irqsave(&phba->hbalock, iflags);
8980 if (phba->sli.mbox_active) {
8981 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8982 __lpfc_mbox_cmpl_put(phba, mboxq);
8983 /* Release the token */
8984 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8985 phba->sli.mbox_active = NULL;
8987 spin_unlock_irqrestore(&phba->hbalock, iflags);
8989 return MBX_NOT_FINISHED;
8993 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8994 * @phba: Pointer to HBA context object.
8995 * @pmbox: Pointer to mailbox object.
8996 * @flag: Flag indicating how the mailbox need to be processed.
8998 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8999 * the API jump table function pointer from the lpfc_hba struct.
9001 * Return codes the caller owns the mailbox command after the return of the
9005 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
9007 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
9011 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
9012 * @phba: The hba struct for which this call is being executed.
9013 * @dev_grp: The HBA PCI-Device group number.
9015 * This routine sets up the mbox interface API function jump table in @phba
9017 * Returns: 0 - success, -ENODEV - failure.
9020 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9024 case LPFC_PCI_DEV_LP:
9025 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
9026 phba->lpfc_sli_handle_slow_ring_event =
9027 lpfc_sli_handle_slow_ring_event_s3;
9028 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
9029 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
9030 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
9032 case LPFC_PCI_DEV_OC:
9033 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
9034 phba->lpfc_sli_handle_slow_ring_event =
9035 lpfc_sli_handle_slow_ring_event_s4;
9036 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
9037 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
9038 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
9041 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9042 "1420 Invalid HBA PCI-device group: 0x%x\n",
9051 * __lpfc_sli_ringtx_put - Add an iocb to the txq
9052 * @phba: Pointer to HBA context object.
9053 * @pring: Pointer to driver SLI ring object.
9054 * @piocb: Pointer to address of newly added command iocb.
9056 * This function is called with hbalock held for SLI3 ports or
9057 * the ring lock held for SLI4 ports to add a command
9058 * iocb to the txq when SLI layer cannot submit the command iocb
9062 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9063 struct lpfc_iocbq *piocb)
9065 if (phba->sli_rev == LPFC_SLI_REV4)
9066 lockdep_assert_held(&pring->ring_lock);
9068 lockdep_assert_held(&phba->hbalock);
9069 /* Insert the caller's iocb in the txq tail for later processing. */
9070 list_add_tail(&piocb->list, &pring->txq);
9074 * lpfc_sli_next_iocb - Get the next iocb in the txq
9075 * @phba: Pointer to HBA context object.
9076 * @pring: Pointer to driver SLI ring object.
9077 * @piocb: Pointer to address of newly added command iocb.
9079 * This function is called with hbalock held before a new
9080 * iocb is submitted to the firmware. This function checks
9081 * txq to flush the iocbs in txq to Firmware before
9082 * submitting new iocbs to the Firmware.
9083 * If there are iocbs in the txq which need to be submitted
9084 * to firmware, lpfc_sli_next_iocb returns the first element
9085 * of the txq after dequeuing it from txq.
9086 * If there is no iocb in the txq then the function will return
9087 * *piocb and *piocb is set to NULL. Caller needs to check
9088 * *piocb to find if there are more commands in the txq.
9090 static struct lpfc_iocbq *
9091 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9092 struct lpfc_iocbq **piocb)
9094 struct lpfc_iocbq * nextiocb;
9096 lockdep_assert_held(&phba->hbalock);
9098 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9108 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
9109 * @phba: Pointer to HBA context object.
9110 * @ring_number: SLI ring number to issue iocb on.
9111 * @piocb: Pointer to command iocb.
9112 * @flag: Flag indicating if this command can be put into txq.
9114 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9115 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9116 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9117 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9118 * this function allows only iocbs for posting buffers. This function finds
9119 * next available slot in the command ring and posts the command to the
9120 * available slot and writes the port attention register to request HBA start
9121 * processing new iocb. If there is no slot available in the ring and
9122 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9123 * the function returns IOCB_BUSY.
9125 * This function is called with hbalock held. The function will return success
9126 * after it successfully submit the iocb to firmware or after adding to the
9130 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9131 struct lpfc_iocbq *piocb, uint32_t flag)
9133 struct lpfc_iocbq *nextiocb;
9135 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9137 lockdep_assert_held(&phba->hbalock);
9139 if (piocb->iocb_cmpl && (!piocb->vport) &&
9140 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9141 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9142 lpfc_printf_log(phba, KERN_ERR,
9143 LOG_SLI | LOG_VPORT,
9144 "1807 IOCB x%x failed. No vport\n",
9145 piocb->iocb.ulpCommand);
9151 /* If the PCI channel is in offline state, do not post iocbs. */
9152 if (unlikely(pci_channel_offline(phba->pcidev)))
9155 /* If HBA has a deferred error attention, fail the iocb. */
9156 if (unlikely(phba->hba_flag & DEFER_ERATT))
9160 * We should never get an IOCB if we are in a < LINK_DOWN state
9162 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9166 * Check to see if we are blocking IOCB processing because of a
9167 * outstanding event.
9169 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9172 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9174 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
9175 * can be issued if the link is not up.
9177 switch (piocb->iocb.ulpCommand) {
9178 case CMD_GEN_REQUEST64_CR:
9179 case CMD_GEN_REQUEST64_CX:
9180 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9181 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9182 FC_RCTL_DD_UNSOL_CMD) ||
9183 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9184 MENLO_TRANSPORT_TYPE))
9188 case CMD_QUE_RING_BUF_CN:
9189 case CMD_QUE_RING_BUF64_CN:
9191 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9192 * completion, iocb_cmpl MUST be 0.
9194 if (piocb->iocb_cmpl)
9195 piocb->iocb_cmpl = NULL;
9197 case CMD_CREATE_XRI_CR:
9198 case CMD_CLOSE_XRI_CN:
9199 case CMD_CLOSE_XRI_CX:
9206 * For FCP commands, we must be in a state where we can process link
9209 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9210 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9214 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9215 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9216 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9219 lpfc_sli_update_ring(phba, pring);
9221 lpfc_sli_update_full_ring(phba, pring);
9224 return IOCB_SUCCESS;
9229 pring->stats.iocb_cmd_delay++;
9233 if (!(flag & SLI_IOCB_RET_IOCB)) {
9234 __lpfc_sli_ringtx_put(phba, pring, piocb);
9235 return IOCB_SUCCESS;
9242 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9243 * @phba: Pointer to HBA context object.
9244 * @piocb: Pointer to command iocb.
9245 * @sglq: Pointer to the scatter gather queue object.
9247 * This routine converts the bpl or bde that is in the IOCB
9248 * to a sgl list for the sli4 hardware. The physical address
9249 * of the bpl/bde is converted back to a virtual address.
9250 * If the IOCB contains a BPL then the list of BDE's is
9251 * converted to sli4_sge's. If the IOCB contains a single
9252 * BDE then it is converted to a single sli_sge.
9253 * The IOCB is still in cpu endianess so the contents of
9254 * the bpl can be used without byte swapping.
9256 * Returns valid XRI = Success, NO_XRI = Failure.
9259 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9260 struct lpfc_sglq *sglq)
9262 uint16_t xritag = NO_XRI;
9263 struct ulp_bde64 *bpl = NULL;
9264 struct ulp_bde64 bde;
9265 struct sli4_sge *sgl = NULL;
9266 struct lpfc_dmabuf *dmabuf;
9270 uint32_t offset = 0; /* accumulated offset in the sg request list */
9271 int inbound = 0; /* number of sg reply entries inbound from firmware */
9273 if (!piocbq || !sglq)
9276 sgl = (struct sli4_sge *)sglq->sgl;
9277 icmd = &piocbq->iocb;
9278 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9279 return sglq->sli4_xritag;
9280 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9281 numBdes = icmd->un.genreq64.bdl.bdeSize /
9282 sizeof(struct ulp_bde64);
9283 /* The addrHigh and addrLow fields within the IOCB
9284 * have not been byteswapped yet so there is no
9285 * need to swap them back.
9287 if (piocbq->context3)
9288 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9292 bpl = (struct ulp_bde64 *)dmabuf->virt;
9296 for (i = 0; i < numBdes; i++) {
9297 /* Should already be byte swapped. */
9298 sgl->addr_hi = bpl->addrHigh;
9299 sgl->addr_lo = bpl->addrLow;
9301 sgl->word2 = le32_to_cpu(sgl->word2);
9302 if ((i+1) == numBdes)
9303 bf_set(lpfc_sli4_sge_last, sgl, 1);
9305 bf_set(lpfc_sli4_sge_last, sgl, 0);
9306 /* swap the size field back to the cpu so we
9307 * can assign it to the sgl.
9309 bde.tus.w = le32_to_cpu(bpl->tus.w);
9310 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9311 /* The offsets in the sgl need to be accumulated
9312 * separately for the request and reply lists.
9313 * The request is always first, the reply follows.
9315 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9316 /* add up the reply sg entries */
9317 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9319 /* first inbound? reset the offset */
9322 bf_set(lpfc_sli4_sge_offset, sgl, offset);
9323 bf_set(lpfc_sli4_sge_type, sgl,
9324 LPFC_SGE_TYPE_DATA);
9325 offset += bde.tus.f.bdeSize;
9327 sgl->word2 = cpu_to_le32(sgl->word2);
9331 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9332 /* The addrHigh and addrLow fields of the BDE have not
9333 * been byteswapped yet so they need to be swapped
9334 * before putting them in the sgl.
9337 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9339 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9340 sgl->word2 = le32_to_cpu(sgl->word2);
9341 bf_set(lpfc_sli4_sge_last, sgl, 1);
9342 sgl->word2 = cpu_to_le32(sgl->word2);
9344 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9346 return sglq->sli4_xritag;
9350 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
9351 * @phba: Pointer to HBA context object.
9352 * @piocb: Pointer to command iocb.
9353 * @wqe: Pointer to the work queue entry.
9355 * This routine converts the iocb command to its Work Queue Entry
9356 * equivalent. The wqe pointer should not have any fields set when
9357 * this routine is called because it will memcpy over them.
9358 * This routine does not set the CQ_ID or the WQEC bits in the
9361 * Returns: 0 = Success, IOCB_ERROR = Failure.
9364 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9365 union lpfc_wqe128 *wqe)
9367 uint32_t xmit_len = 0, total_len = 0;
9371 uint8_t command_type = ELS_COMMAND_NON_FIP;
9374 uint16_t abrt_iotag;
9375 struct lpfc_iocbq *abrtiocbq;
9376 struct ulp_bde64 *bpl = NULL;
9377 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9379 struct ulp_bde64 bde;
9380 struct lpfc_nodelist *ndlp;
9384 fip = phba->hba_flag & HBA_FIP_SUPPORT;
9385 /* The fcp commands will set command type */
9386 if (iocbq->iocb_flag & LPFC_IO_FCP)
9387 command_type = FCP_COMMAND;
9388 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9389 command_type = ELS_COMMAND_FIP;
9391 command_type = ELS_COMMAND_NON_FIP;
9393 if (phba->fcp_embed_io)
9394 memset(wqe, 0, sizeof(union lpfc_wqe128));
9395 /* Some of the fields are in the right position already */
9396 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9397 /* The ct field has moved so reset */
9398 wqe->generic.wqe_com.word7 = 0;
9399 wqe->generic.wqe_com.word10 = 0;
9401 abort_tag = (uint32_t) iocbq->iotag;
9402 xritag = iocbq->sli4_xritag;
9403 /* words0-2 bpl convert bde */
9404 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9405 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9406 sizeof(struct ulp_bde64);
9407 bpl = (struct ulp_bde64 *)
9408 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9412 /* Should already be byte swapped. */
9413 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9414 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9415 /* swap the size field back to the cpu so we
9416 * can assign it to the sgl.
9418 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
9419 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9421 for (i = 0; i < numBdes; i++) {
9422 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9423 total_len += bde.tus.f.bdeSize;
9426 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9428 iocbq->iocb.ulpIoTag = iocbq->iotag;
9429 cmnd = iocbq->iocb.ulpCommand;
9431 switch (iocbq->iocb.ulpCommand) {
9432 case CMD_ELS_REQUEST64_CR:
9433 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9434 ndlp = iocbq->context_un.ndlp;
9436 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9437 if (!iocbq->iocb.ulpLe) {
9438 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9439 "2007 Only Limited Edition cmd Format"
9440 " supported 0x%x\n",
9441 iocbq->iocb.ulpCommand);
9445 wqe->els_req.payload_len = xmit_len;
9446 /* Els_reguest64 has a TMO */
9447 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9448 iocbq->iocb.ulpTimeout);
9449 /* Need a VF for word 4 set the vf bit*/
9450 bf_set(els_req64_vf, &wqe->els_req, 0);
9451 /* And a VFID for word 12 */
9452 bf_set(els_req64_vfid, &wqe->els_req, 0);
9453 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9454 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9455 iocbq->iocb.ulpContext);
9456 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9457 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9458 /* CCP CCPE PV PRI in word10 were set in the memcpy */
9459 if (command_type == ELS_COMMAND_FIP)
9460 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9461 >> LPFC_FIP_ELS_ID_SHIFT);
9462 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9463 iocbq->context2)->virt);
9464 if_type = bf_get(lpfc_sli_intf_if_type,
9465 &phba->sli4_hba.sli_intf);
9466 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9467 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9468 *pcmd == ELS_CMD_SCR ||
9469 *pcmd == ELS_CMD_RDF ||
9470 *pcmd == ELS_CMD_RSCN_XMT ||
9471 *pcmd == ELS_CMD_FDISC ||
9472 *pcmd == ELS_CMD_LOGO ||
9473 *pcmd == ELS_CMD_PLOGI)) {
9474 bf_set(els_req64_sp, &wqe->els_req, 1);
9475 bf_set(els_req64_sid, &wqe->els_req,
9476 iocbq->vport->fc_myDID);
9477 if ((*pcmd == ELS_CMD_FLOGI) &&
9478 !(phba->fc_topology ==
9479 LPFC_TOPOLOGY_LOOP))
9480 bf_set(els_req64_sid, &wqe->els_req, 0);
9481 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9482 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9483 phba->vpi_ids[iocbq->vport->vpi]);
9484 } else if (pcmd && iocbq->context1) {
9485 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9486 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9487 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9490 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9491 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9492 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9493 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9494 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9495 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9496 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9497 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9498 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9500 case CMD_XMIT_SEQUENCE64_CX:
9501 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9502 iocbq->iocb.un.ulpWord[3]);
9503 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9504 iocbq->iocb.unsli3.rcvsli3.ox_id);
9505 /* The entire sequence is transmitted for this IOCB */
9506 xmit_len = total_len;
9507 cmnd = CMD_XMIT_SEQUENCE64_CR;
9508 if (phba->link_flag & LS_LOOPBACK_MODE)
9509 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9511 case CMD_XMIT_SEQUENCE64_CR:
9512 /* word3 iocb=io_tag32 wqe=reserved */
9513 wqe->xmit_sequence.rsvd3 = 0;
9514 /* word4 relative_offset memcpy */
9515 /* word5 r_ctl/df_ctl memcpy */
9516 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9517 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9518 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9519 LPFC_WQE_IOD_WRITE);
9520 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9521 LPFC_WQE_LENLOC_WORD12);
9522 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9523 wqe->xmit_sequence.xmit_len = xmit_len;
9524 command_type = OTHER_COMMAND;
9526 case CMD_XMIT_BCAST64_CN:
9527 /* word3 iocb=iotag32 wqe=seq_payload_len */
9528 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9529 /* word4 iocb=rsvd wqe=rsvd */
9530 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9531 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9532 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9533 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9534 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9535 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9536 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9537 LPFC_WQE_LENLOC_WORD3);
9538 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9540 case CMD_FCP_IWRITE64_CR:
9541 command_type = FCP_COMMAND_DATA_OUT;
9542 /* word3 iocb=iotag wqe=payload_offset_len */
9543 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9544 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9545 xmit_len + sizeof(struct fcp_rsp));
9546 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9548 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9549 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9550 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9551 iocbq->iocb.ulpFCP2Rcvy);
9552 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9553 /* Always open the exchange */
9554 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9555 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9556 LPFC_WQE_LENLOC_WORD4);
9557 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9558 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9559 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9560 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9561 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9562 if (iocbq->priority) {
9563 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9564 (iocbq->priority << 1));
9566 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9567 (phba->cfg_XLanePriority << 1));
9570 /* Note, word 10 is already initialized to 0 */
9572 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9573 if (phba->cfg_enable_pbde)
9574 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9576 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9578 if (phba->fcp_embed_io) {
9579 struct lpfc_io_buf *lpfc_cmd;
9580 struct sli4_sge *sgl;
9581 struct fcp_cmnd *fcp_cmnd;
9584 /* 128 byte wqe support here */
9586 lpfc_cmd = iocbq->context1;
9587 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9588 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9590 /* Word 0-2 - FCP_CMND */
9591 wqe->generic.bde.tus.f.bdeFlags =
9592 BUFF_TYPE_BDE_IMMED;
9593 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9594 wqe->generic.bde.addrHigh = 0;
9595 wqe->generic.bde.addrLow = 88; /* Word 22 */
9597 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9598 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9600 /* Word 22-29 FCP CMND Payload */
9601 ptr = &wqe->words[22];
9602 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9605 case CMD_FCP_IREAD64_CR:
9606 /* word3 iocb=iotag wqe=payload_offset_len */
9607 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9608 bf_set(payload_offset_len, &wqe->fcp_iread,
9609 xmit_len + sizeof(struct fcp_rsp));
9610 bf_set(cmd_buff_len, &wqe->fcp_iread,
9612 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9613 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9614 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9615 iocbq->iocb.ulpFCP2Rcvy);
9616 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9617 /* Always open the exchange */
9618 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9619 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9620 LPFC_WQE_LENLOC_WORD4);
9621 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9622 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9623 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9624 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9625 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9626 if (iocbq->priority) {
9627 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9628 (iocbq->priority << 1));
9630 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9631 (phba->cfg_XLanePriority << 1));
9634 /* Note, word 10 is already initialized to 0 */
9636 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9637 if (phba->cfg_enable_pbde)
9638 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9640 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9642 if (phba->fcp_embed_io) {
9643 struct lpfc_io_buf *lpfc_cmd;
9644 struct sli4_sge *sgl;
9645 struct fcp_cmnd *fcp_cmnd;
9648 /* 128 byte wqe support here */
9650 lpfc_cmd = iocbq->context1;
9651 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9652 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9654 /* Word 0-2 - FCP_CMND */
9655 wqe->generic.bde.tus.f.bdeFlags =
9656 BUFF_TYPE_BDE_IMMED;
9657 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9658 wqe->generic.bde.addrHigh = 0;
9659 wqe->generic.bde.addrLow = 88; /* Word 22 */
9661 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9662 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9664 /* Word 22-29 FCP CMND Payload */
9665 ptr = &wqe->words[22];
9666 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9669 case CMD_FCP_ICMND64_CR:
9670 /* word3 iocb=iotag wqe=payload_offset_len */
9671 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9672 bf_set(payload_offset_len, &wqe->fcp_icmd,
9673 xmit_len + sizeof(struct fcp_rsp));
9674 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9676 /* word3 iocb=IO_TAG wqe=reserved */
9677 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9678 /* Always open the exchange */
9679 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9680 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9681 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9682 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9683 LPFC_WQE_LENLOC_NONE);
9684 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9685 iocbq->iocb.ulpFCP2Rcvy);
9686 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9687 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9688 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9689 if (iocbq->priority) {
9690 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9691 (iocbq->priority << 1));
9693 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9694 (phba->cfg_XLanePriority << 1));
9697 /* Note, word 10 is already initialized to 0 */
9699 if (phba->fcp_embed_io) {
9700 struct lpfc_io_buf *lpfc_cmd;
9701 struct sli4_sge *sgl;
9702 struct fcp_cmnd *fcp_cmnd;
9705 /* 128 byte wqe support here */
9707 lpfc_cmd = iocbq->context1;
9708 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9709 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9711 /* Word 0-2 - FCP_CMND */
9712 wqe->generic.bde.tus.f.bdeFlags =
9713 BUFF_TYPE_BDE_IMMED;
9714 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9715 wqe->generic.bde.addrHigh = 0;
9716 wqe->generic.bde.addrLow = 88; /* Word 22 */
9718 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9719 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
9721 /* Word 22-29 FCP CMND Payload */
9722 ptr = &wqe->words[22];
9723 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9726 case CMD_GEN_REQUEST64_CR:
9727 /* For this command calculate the xmit length of the
9731 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9732 sizeof(struct ulp_bde64);
9733 for (i = 0; i < numBdes; i++) {
9734 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9735 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9737 xmit_len += bde.tus.f.bdeSize;
9739 /* word3 iocb=IO_TAG wqe=request_payload_len */
9740 wqe->gen_req.request_payload_len = xmit_len;
9741 /* word4 iocb=parameter wqe=relative_offset memcpy */
9742 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
9743 /* word6 context tag copied in memcpy */
9744 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9745 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9746 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9747 "2015 Invalid CT %x command 0x%x\n",
9748 ct, iocbq->iocb.ulpCommand);
9751 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9752 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9753 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9754 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9755 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9756 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9757 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9758 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9759 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9760 command_type = OTHER_COMMAND;
9762 case CMD_XMIT_ELS_RSP64_CX:
9763 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9764 /* words0-2 BDE memcpy */
9765 /* word3 iocb=iotag32 wqe=response_payload_len */
9766 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9768 wqe->xmit_els_rsp.word4 = 0;
9769 /* word5 iocb=rsvd wge=did */
9770 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9771 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9773 if_type = bf_get(lpfc_sli_intf_if_type,
9774 &phba->sli4_hba.sli_intf);
9775 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9776 if (iocbq->vport->fc_flag & FC_PT2PT) {
9777 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9778 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9779 iocbq->vport->fc_myDID);
9780 if (iocbq->vport->fc_myDID == Fabric_DID) {
9782 &wqe->xmit_els_rsp.wqe_dest, 0);
9786 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9787 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9788 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9789 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9790 iocbq->iocb.unsli3.rcvsli3.ox_id);
9791 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9792 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9793 phba->vpi_ids[iocbq->vport->vpi]);
9794 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9795 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9796 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9797 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9798 LPFC_WQE_LENLOC_WORD3);
9799 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9800 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9801 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9802 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9803 iocbq->context2)->virt);
9804 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9805 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9806 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9807 iocbq->vport->fc_myDID);
9808 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9809 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9810 phba->vpi_ids[phba->pport->vpi]);
9812 command_type = OTHER_COMMAND;
9814 case CMD_CLOSE_XRI_CN:
9815 case CMD_ABORT_XRI_CN:
9816 case CMD_ABORT_XRI_CX:
9817 /* words 0-2 memcpy should be 0 rserved */
9818 /* port will send abts */
9819 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9820 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9821 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9822 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9826 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9828 * The link is down, or the command was ELS_FIP
9829 * so the fw does not need to send abts
9832 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9834 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9835 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9836 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9837 wqe->abort_cmd.rsrvd5 = 0;
9838 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9839 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9840 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9842 * The abort handler will send us CMD_ABORT_XRI_CN or
9843 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9845 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9846 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9847 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9848 LPFC_WQE_LENLOC_NONE);
9849 cmnd = CMD_ABORT_XRI_CX;
9850 command_type = OTHER_COMMAND;
9853 case CMD_XMIT_BLS_RSP64_CX:
9854 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9855 /* As BLS ABTS RSP WQE is very different from other WQEs,
9856 * we re-construct this WQE here based on information in
9857 * iocbq from scratch.
9859 memset(wqe, 0, sizeof(*wqe));
9860 /* OX_ID is invariable to who sent ABTS to CT exchange */
9861 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
9862 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9863 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
9864 LPFC_ABTS_UNSOL_INT) {
9865 /* ABTS sent by initiator to CT exchange, the
9866 * RX_ID field will be filled with the newly
9867 * allocated responder XRI.
9869 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9870 iocbq->sli4_xritag);
9872 /* ABTS sent by responder to CT exchange, the
9873 * RX_ID field will be filled with the responder
9876 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9877 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
9879 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9880 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
9883 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9885 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9886 iocbq->iocb.ulpContext);
9887 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
9888 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
9889 phba->vpi_ids[phba->pport->vpi]);
9890 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9891 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9892 LPFC_WQE_LENLOC_NONE);
9893 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9894 command_type = OTHER_COMMAND;
9895 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9896 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9897 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9898 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9899 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9900 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9901 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9905 case CMD_SEND_FRAME:
9906 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
9907 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
9908 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
9909 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
9910 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
9911 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
9912 bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
9913 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
9914 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9915 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9916 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9918 case CMD_XRI_ABORTED_CX:
9919 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
9920 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9921 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9922 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9923 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9925 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9926 "2014 Invalid command 0x%x\n",
9927 iocbq->iocb.ulpCommand);
9932 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9933 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9934 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9935 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9936 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9937 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9938 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9939 LPFC_IO_DIF_INSERT);
9940 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9941 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9942 wqe->generic.wqe_com.abort_tag = abort_tag;
9943 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9944 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9945 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9946 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9951 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9952 * @phba: Pointer to HBA context object.
9953 * @ring_number: SLI ring number to issue iocb on.
9954 * @piocb: Pointer to command iocb.
9955 * @flag: Flag indicating if this command can be put into txq.
9957 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9958 * an iocb command to an HBA with SLI-4 interface spec.
9960 * This function is called with ringlock held. The function will return success
9961 * after it successfully submit the iocb to firmware or after adding to the
9965 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9966 struct lpfc_iocbq *piocb, uint32_t flag)
9968 struct lpfc_sglq *sglq;
9969 union lpfc_wqe128 wqe;
9970 struct lpfc_queue *wq;
9971 struct lpfc_sli_ring *pring;
9974 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9975 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9976 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
9978 wq = phba->sli4_hba.els_wq;
9981 /* Get corresponding ring */
9985 * The WQE can be either 64 or 128 bytes,
9988 lockdep_assert_held(&pring->ring_lock);
9990 if (piocb->sli4_xritag == NO_XRI) {
9991 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
9992 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
9995 if (!list_empty(&pring->txq)) {
9996 if (!(flag & SLI_IOCB_RET_IOCB)) {
9997 __lpfc_sli_ringtx_put(phba,
9999 return IOCB_SUCCESS;
10004 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10006 if (!(flag & SLI_IOCB_RET_IOCB)) {
10007 __lpfc_sli_ringtx_put(phba,
10010 return IOCB_SUCCESS;
10016 } else if (piocb->iocb_flag & LPFC_IO_FCP)
10017 /* These IO's already have an XRI and a mapped sgl. */
10021 * This is a continuation of a commandi,(CX) so this
10022 * sglq is on the active list
10024 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10030 piocb->sli4_lxritag = sglq->sli4_lxritag;
10031 piocb->sli4_xritag = sglq->sli4_xritag;
10032 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
10036 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
10039 if (lpfc_sli4_wq_put(wq, &wqe))
10041 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10047 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10049 * This routine wraps the actual lockless version for issusing IOCB function
10050 * pointer from the lpfc_hba struct.
10053 * IOCB_ERROR - Error
10054 * IOCB_SUCCESS - Success
10058 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10059 struct lpfc_iocbq *piocb, uint32_t flag)
10061 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10065 * lpfc_sli_api_table_setup - Set up sli api function jump table
10066 * @phba: The hba struct for which this call is being executed.
10067 * @dev_grp: The HBA PCI-Device group number.
10069 * This routine sets up the SLI interface API function jump table in @phba
10071 * Returns: 0 - success, -ENODEV - failure.
10074 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10078 case LPFC_PCI_DEV_LP:
10079 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10080 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10082 case LPFC_PCI_DEV_OC:
10083 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10084 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10087 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10088 "1419 Invalid HBA PCI-device group: 0x%x\n",
10093 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10098 * lpfc_sli4_calc_ring - Calculates which ring to use
10099 * @phba: Pointer to HBA context object.
10100 * @piocb: Pointer to command iocb.
10102 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10103 * hba_wqidx, thus we need to calculate the corresponding ring.
10104 * Since ABORTS must go on the same WQ of the command they are
10105 * aborting, we use command's hba_wqidx.
10107 struct lpfc_sli_ring *
10108 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10110 struct lpfc_io_buf *lpfc_cmd;
10112 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10113 if (unlikely(!phba->sli4_hba.hdwq))
10116 * for abort iocb hba_wqidx should already
10117 * be setup based on what work queue we used.
10119 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10120 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10121 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10123 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
10125 if (unlikely(!phba->sli4_hba.els_wq))
10127 piocb->hba_wqidx = 0;
10128 return phba->sli4_hba.els_wq->pring;
10133 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10134 * @phba: Pointer to HBA context object.
10135 * @pring: Pointer to driver SLI ring object.
10136 * @piocb: Pointer to command iocb.
10137 * @flag: Flag indicating if this command can be put into txq.
10139 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10140 * function. This function gets the hbalock and calls
10141 * __lpfc_sli_issue_iocb function and will return the error returned
10142 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10143 * functions which do not hold hbalock.
10146 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10147 struct lpfc_iocbq *piocb, uint32_t flag)
10149 struct lpfc_sli_ring *pring;
10150 struct lpfc_queue *eq;
10151 unsigned long iflags;
10154 if (phba->sli_rev == LPFC_SLI_REV4) {
10155 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
10157 pring = lpfc_sli4_calc_ring(phba, piocb);
10158 if (unlikely(pring == NULL))
10161 spin_lock_irqsave(&pring->ring_lock, iflags);
10162 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10163 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10165 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
10167 /* For now, SLI2/3 will still use hbalock */
10168 spin_lock_irqsave(&phba->hbalock, iflags);
10169 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10170 spin_unlock_irqrestore(&phba->hbalock, iflags);
10176 * lpfc_extra_ring_setup - Extra ring setup function
10177 * @phba: Pointer to HBA context object.
10179 * This function is called while driver attaches with the
10180 * HBA to setup the extra ring. The extra ring is used
10181 * only when driver needs to support target mode functionality
10182 * or IP over FC functionalities.
10184 * This function is called with no lock held. SLI3 only.
10187 lpfc_extra_ring_setup( struct lpfc_hba *phba)
10189 struct lpfc_sli *psli;
10190 struct lpfc_sli_ring *pring;
10194 /* Adjust cmd/rsp ring iocb entries more evenly */
10196 /* Take some away from the FCP ring */
10197 pring = &psli->sli3_ring[LPFC_FCP_RING];
10198 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10199 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10200 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10201 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10203 /* and give them to the extra ring */
10204 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10206 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10207 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10208 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10209 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10211 /* Setup default profile for this ring */
10212 pring->iotag_max = 4096;
10213 pring->num_mask = 1;
10214 pring->prt[0].profile = 0; /* Mask 0 */
10215 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10216 pring->prt[0].type = phba->cfg_multi_ring_type;
10217 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10221 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10222 * @phba: Pointer to HBA context object.
10223 * @iocbq: Pointer to iocb object.
10225 * The async_event handler calls this routine when it receives
10226 * an ASYNC_STATUS_CN event from the port. The port generates
10227 * this event when an Abort Sequence request to an rport fails
10228 * twice in succession. The abort could be originated by the
10229 * driver or by the port. The ABTS could have been for an ELS
10230 * or FCP IO. The port only generates this event when an ABTS
10231 * fails to complete after one retry.
10234 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10235 struct lpfc_iocbq *iocbq)
10237 struct lpfc_nodelist *ndlp = NULL;
10238 uint16_t rpi = 0, vpi = 0;
10239 struct lpfc_vport *vport = NULL;
10241 /* The rpi in the ulpContext is vport-sensitive. */
10242 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10243 rpi = iocbq->iocb.ulpContext;
10245 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10246 "3092 Port generated ABTS async event "
10247 "on vpi %d rpi %d status 0x%x\n",
10248 vpi, rpi, iocbq->iocb.ulpStatus);
10250 vport = lpfc_find_vport_by_vpid(phba, vpi);
10253 ndlp = lpfc_findnode_rpi(vport, rpi);
10254 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10257 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10258 lpfc_sli_abts_recover_port(vport, ndlp);
10262 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10263 "3095 Event Context not found, no "
10264 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10265 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10269 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10270 * @phba: pointer to HBA context object.
10271 * @ndlp: nodelist pointer for the impacted rport.
10272 * @axri: pointer to the wcqe containing the failed exchange.
10274 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10275 * port. The port generates this event when an abort exchange request to an
10276 * rport fails twice in succession with no reply. The abort could be originated
10277 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10280 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10281 struct lpfc_nodelist *ndlp,
10282 struct sli4_wcqe_xri_aborted *axri)
10284 struct lpfc_vport *vport;
10285 uint32_t ext_status = 0;
10287 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
10288 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10289 "3115 Node Context not found, driver "
10290 "ignoring abts err event\n");
10294 vport = ndlp->vport;
10295 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10296 "3116 Port generated FCP XRI ABORT event on "
10297 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10298 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10299 bf_get(lpfc_wcqe_xa_xri, axri),
10300 bf_get(lpfc_wcqe_xa_status, axri),
10304 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10305 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10306 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10308 ext_status = axri->parameter & IOERR_PARAM_MASK;
10309 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10310 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10311 lpfc_sli_abts_recover_port(vport, ndlp);
10315 * lpfc_sli_async_event_handler - ASYNC iocb handler function
10316 * @phba: Pointer to HBA context object.
10317 * @pring: Pointer to driver SLI ring object.
10318 * @iocbq: Pointer to iocb object.
10320 * This function is called by the slow ring event handler
10321 * function when there is an ASYNC event iocb in the ring.
10322 * This function is called with no lock held.
10323 * Currently this function handles only temperature related
10324 * ASYNC events. The function decodes the temperature sensor
10325 * event message and posts events for the management applications.
10328 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10329 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10333 struct temp_event temp_event_data;
10334 struct Scsi_Host *shost;
10337 icmd = &iocbq->iocb;
10338 evt_code = icmd->un.asyncstat.evt_code;
10340 switch (evt_code) {
10341 case ASYNC_TEMP_WARN:
10342 case ASYNC_TEMP_SAFE:
10343 temp_event_data.data = (uint32_t) icmd->ulpContext;
10344 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10345 if (evt_code == ASYNC_TEMP_WARN) {
10346 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10347 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10348 "0347 Adapter is very hot, please take "
10349 "corrective action. temperature : %d Celsius\n",
10350 (uint32_t) icmd->ulpContext);
10352 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10353 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10354 "0340 Adapter temperature is OK now. "
10355 "temperature : %d Celsius\n",
10356 (uint32_t) icmd->ulpContext);
10359 /* Send temperature change event to applications */
10360 shost = lpfc_shost_from_vport(phba->pport);
10361 fc_host_post_vendor_event(shost, fc_get_event_number(),
10362 sizeof(temp_event_data), (char *) &temp_event_data,
10363 LPFC_NL_VENDOR_ID);
10365 case ASYNC_STATUS_CN:
10366 lpfc_sli_abts_err_handler(phba, iocbq);
10369 iocb_w = (uint32_t *) icmd;
10370 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10371 "0346 Ring %d handler: unexpected ASYNC_STATUS"
10373 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10374 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10375 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10376 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10377 pring->ringno, icmd->un.asyncstat.evt_code,
10378 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10379 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10380 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10381 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10389 * lpfc_sli4_setup - SLI ring setup function
10390 * @phba: Pointer to HBA context object.
10392 * lpfc_sli_setup sets up rings of the SLI interface with
10393 * number of iocbs per ring and iotags. This function is
10394 * called while driver attach to the HBA and before the
10395 * interrupts are enabled. So there is no need for locking.
10397 * This function always returns 0.
10400 lpfc_sli4_setup(struct lpfc_hba *phba)
10402 struct lpfc_sli_ring *pring;
10404 pring = phba->sli4_hba.els_wq->pring;
10405 pring->num_mask = LPFC_MAX_RING_MASK;
10406 pring->prt[0].profile = 0; /* Mask 0 */
10407 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10408 pring->prt[0].type = FC_TYPE_ELS;
10409 pring->prt[0].lpfc_sli_rcv_unsol_event =
10410 lpfc_els_unsol_event;
10411 pring->prt[1].profile = 0; /* Mask 1 */
10412 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10413 pring->prt[1].type = FC_TYPE_ELS;
10414 pring->prt[1].lpfc_sli_rcv_unsol_event =
10415 lpfc_els_unsol_event;
10416 pring->prt[2].profile = 0; /* Mask 2 */
10417 /* NameServer Inquiry */
10418 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10420 pring->prt[2].type = FC_TYPE_CT;
10421 pring->prt[2].lpfc_sli_rcv_unsol_event =
10422 lpfc_ct_unsol_event;
10423 pring->prt[3].profile = 0; /* Mask 3 */
10424 /* NameServer response */
10425 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10427 pring->prt[3].type = FC_TYPE_CT;
10428 pring->prt[3].lpfc_sli_rcv_unsol_event =
10429 lpfc_ct_unsol_event;
10434 * lpfc_sli_setup - SLI ring setup function
10435 * @phba: Pointer to HBA context object.
10437 * lpfc_sli_setup sets up rings of the SLI interface with
10438 * number of iocbs per ring and iotags. This function is
10439 * called while driver attach to the HBA and before the
10440 * interrupts are enabled. So there is no need for locking.
10442 * This function always returns 0. SLI3 only.
10445 lpfc_sli_setup(struct lpfc_hba *phba)
10447 int i, totiocbsize = 0;
10448 struct lpfc_sli *psli = &phba->sli;
10449 struct lpfc_sli_ring *pring;
10451 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10452 psli->sli_flag = 0;
10454 psli->iocbq_lookup = NULL;
10455 psli->iocbq_lookup_len = 0;
10456 psli->last_iotag = 0;
10458 for (i = 0; i < psli->num_rings; i++) {
10459 pring = &psli->sli3_ring[i];
10461 case LPFC_FCP_RING: /* ring 0 - FCP */
10462 /* numCiocb and numRiocb are used in config_port */
10463 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10464 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10465 pring->sli.sli3.numCiocb +=
10466 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10467 pring->sli.sli3.numRiocb +=
10468 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10469 pring->sli.sli3.numCiocb +=
10470 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10471 pring->sli.sli3.numRiocb +=
10472 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10473 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10474 SLI3_IOCB_CMD_SIZE :
10475 SLI2_IOCB_CMD_SIZE;
10476 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10477 SLI3_IOCB_RSP_SIZE :
10478 SLI2_IOCB_RSP_SIZE;
10479 pring->iotag_ctr = 0;
10481 (phba->cfg_hba_queue_depth * 2);
10482 pring->fast_iotag = pring->iotag_max;
10483 pring->num_mask = 0;
10485 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
10486 /* numCiocb and numRiocb are used in config_port */
10487 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10488 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10489 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10490 SLI3_IOCB_CMD_SIZE :
10491 SLI2_IOCB_CMD_SIZE;
10492 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10493 SLI3_IOCB_RSP_SIZE :
10494 SLI2_IOCB_RSP_SIZE;
10495 pring->iotag_max = phba->cfg_hba_queue_depth;
10496 pring->num_mask = 0;
10498 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10499 /* numCiocb and numRiocb are used in config_port */
10500 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10501 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10502 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10503 SLI3_IOCB_CMD_SIZE :
10504 SLI2_IOCB_CMD_SIZE;
10505 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10506 SLI3_IOCB_RSP_SIZE :
10507 SLI2_IOCB_RSP_SIZE;
10508 pring->fast_iotag = 0;
10509 pring->iotag_ctr = 0;
10510 pring->iotag_max = 4096;
10511 pring->lpfc_sli_rcv_async_status =
10512 lpfc_sli_async_event_handler;
10513 pring->num_mask = LPFC_MAX_RING_MASK;
10514 pring->prt[0].profile = 0; /* Mask 0 */
10515 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10516 pring->prt[0].type = FC_TYPE_ELS;
10517 pring->prt[0].lpfc_sli_rcv_unsol_event =
10518 lpfc_els_unsol_event;
10519 pring->prt[1].profile = 0; /* Mask 1 */
10520 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10521 pring->prt[1].type = FC_TYPE_ELS;
10522 pring->prt[1].lpfc_sli_rcv_unsol_event =
10523 lpfc_els_unsol_event;
10524 pring->prt[2].profile = 0; /* Mask 2 */
10525 /* NameServer Inquiry */
10526 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10528 pring->prt[2].type = FC_TYPE_CT;
10529 pring->prt[2].lpfc_sli_rcv_unsol_event =
10530 lpfc_ct_unsol_event;
10531 pring->prt[3].profile = 0; /* Mask 3 */
10532 /* NameServer response */
10533 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10535 pring->prt[3].type = FC_TYPE_CT;
10536 pring->prt[3].lpfc_sli_rcv_unsol_event =
10537 lpfc_ct_unsol_event;
10540 totiocbsize += (pring->sli.sli3.numCiocb *
10541 pring->sli.sli3.sizeCiocb) +
10542 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10544 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10545 /* Too many cmd / rsp ring entries in SLI2 SLIM */
10546 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10547 "SLI2 SLIM Data: x%x x%lx\n",
10548 phba->brd_no, totiocbsize,
10549 (unsigned long) MAX_SLIM_IOCB_SIZE);
10551 if (phba->cfg_multi_ring_support == 2)
10552 lpfc_extra_ring_setup(phba);
10558 * lpfc_sli4_queue_init - Queue initialization function
10559 * @phba: Pointer to HBA context object.
10561 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
10562 * ring. This function also initializes ring indices of each ring.
10563 * This function is called during the initialization of the SLI
10564 * interface of an HBA.
10565 * This function is called with no lock held and always returns
10569 lpfc_sli4_queue_init(struct lpfc_hba *phba)
10571 struct lpfc_sli *psli;
10572 struct lpfc_sli_ring *pring;
10576 spin_lock_irq(&phba->hbalock);
10577 INIT_LIST_HEAD(&psli->mboxq);
10578 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10579 /* Initialize list headers for txq and txcmplq as double linked lists */
10580 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10581 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
10583 pring->ringno = LPFC_FCP_RING;
10584 pring->txcmplq_cnt = 0;
10585 INIT_LIST_HEAD(&pring->txq);
10586 INIT_LIST_HEAD(&pring->txcmplq);
10587 INIT_LIST_HEAD(&pring->iocb_continueq);
10588 spin_lock_init(&pring->ring_lock);
10590 pring = phba->sli4_hba.els_wq->pring;
10592 pring->ringno = LPFC_ELS_RING;
10593 pring->txcmplq_cnt = 0;
10594 INIT_LIST_HEAD(&pring->txq);
10595 INIT_LIST_HEAD(&pring->txcmplq);
10596 INIT_LIST_HEAD(&pring->iocb_continueq);
10597 spin_lock_init(&pring->ring_lock);
10599 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10600 pring = phba->sli4_hba.nvmels_wq->pring;
10602 pring->ringno = LPFC_ELS_RING;
10603 pring->txcmplq_cnt = 0;
10604 INIT_LIST_HEAD(&pring->txq);
10605 INIT_LIST_HEAD(&pring->txcmplq);
10606 INIT_LIST_HEAD(&pring->iocb_continueq);
10607 spin_lock_init(&pring->ring_lock);
10610 spin_unlock_irq(&phba->hbalock);
10614 * lpfc_sli_queue_init - Queue initialization function
10615 * @phba: Pointer to HBA context object.
10617 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10618 * ring. This function also initializes ring indices of each ring.
10619 * This function is called during the initialization of the SLI
10620 * interface of an HBA.
10621 * This function is called with no lock held and always returns
10625 lpfc_sli_queue_init(struct lpfc_hba *phba)
10627 struct lpfc_sli *psli;
10628 struct lpfc_sli_ring *pring;
10632 spin_lock_irq(&phba->hbalock);
10633 INIT_LIST_HEAD(&psli->mboxq);
10634 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10635 /* Initialize list headers for txq and txcmplq as double linked lists */
10636 for (i = 0; i < psli->num_rings; i++) {
10637 pring = &psli->sli3_ring[i];
10639 pring->sli.sli3.next_cmdidx = 0;
10640 pring->sli.sli3.local_getidx = 0;
10641 pring->sli.sli3.cmdidx = 0;
10642 INIT_LIST_HEAD(&pring->iocb_continueq);
10643 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10644 INIT_LIST_HEAD(&pring->postbufq);
10646 INIT_LIST_HEAD(&pring->txq);
10647 INIT_LIST_HEAD(&pring->txcmplq);
10648 spin_lock_init(&pring->ring_lock);
10650 spin_unlock_irq(&phba->hbalock);
10654 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10655 * @phba: Pointer to HBA context object.
10657 * This routine flushes the mailbox command subsystem. It will unconditionally
10658 * flush all the mailbox commands in the three possible stages in the mailbox
10659 * command sub-system: pending mailbox command queue; the outstanding mailbox
10660 * command; and completed mailbox command queue. It is caller's responsibility
10661 * to make sure that the driver is in the proper state to flush the mailbox
10662 * command sub-system. Namely, the posting of mailbox commands into the
10663 * pending mailbox command queue from the various clients must be stopped;
10664 * either the HBA is in a state that it will never works on the outstanding
10665 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10666 * mailbox command has been completed.
10669 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10671 LIST_HEAD(completions);
10672 struct lpfc_sli *psli = &phba->sli;
10674 unsigned long iflag;
10676 /* Disable softirqs, including timers from obtaining phba->hbalock */
10677 local_bh_disable();
10679 /* Flush all the mailbox commands in the mbox system */
10680 spin_lock_irqsave(&phba->hbalock, iflag);
10682 /* The pending mailbox command queue */
10683 list_splice_init(&phba->sli.mboxq, &completions);
10684 /* The outstanding active mailbox command */
10685 if (psli->mbox_active) {
10686 list_add_tail(&psli->mbox_active->list, &completions);
10687 psli->mbox_active = NULL;
10688 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10690 /* The completed mailbox command queue */
10691 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10692 spin_unlock_irqrestore(&phba->hbalock, iflag);
10694 /* Enable softirqs again, done with phba->hbalock */
10697 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10698 while (!list_empty(&completions)) {
10699 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10700 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10701 if (pmb->mbox_cmpl)
10702 pmb->mbox_cmpl(phba, pmb);
10707 * lpfc_sli_host_down - Vport cleanup function
10708 * @vport: Pointer to virtual port object.
10710 * lpfc_sli_host_down is called to clean up the resources
10711 * associated with a vport before destroying virtual
10712 * port data structures.
10713 * This function does following operations:
10714 * - Free discovery resources associated with this virtual
10716 * - Free iocbs associated with this virtual port in
10718 * - Send abort for all iocb commands associated with this
10719 * vport in txcmplq.
10721 * This function is called with no lock held and always returns 1.
10724 lpfc_sli_host_down(struct lpfc_vport *vport)
10726 LIST_HEAD(completions);
10727 struct lpfc_hba *phba = vport->phba;
10728 struct lpfc_sli *psli = &phba->sli;
10729 struct lpfc_queue *qp = NULL;
10730 struct lpfc_sli_ring *pring;
10731 struct lpfc_iocbq *iocb, *next_iocb;
10733 unsigned long flags = 0;
10734 uint16_t prev_pring_flag;
10736 lpfc_cleanup_discovery_resources(vport);
10738 spin_lock_irqsave(&phba->hbalock, flags);
10741 * Error everything on the txq since these iocbs
10742 * have not been given to the FW yet.
10743 * Also issue ABTS for everything on the txcmplq
10745 if (phba->sli_rev != LPFC_SLI_REV4) {
10746 for (i = 0; i < psli->num_rings; i++) {
10747 pring = &psli->sli3_ring[i];
10748 prev_pring_flag = pring->flag;
10749 /* Only slow rings */
10750 if (pring->ringno == LPFC_ELS_RING) {
10751 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10752 /* Set the lpfc data pending flag */
10753 set_bit(LPFC_DATA_READY, &phba->data_flags);
10755 list_for_each_entry_safe(iocb, next_iocb,
10756 &pring->txq, list) {
10757 if (iocb->vport != vport)
10759 list_move_tail(&iocb->list, &completions);
10761 list_for_each_entry_safe(iocb, next_iocb,
10762 &pring->txcmplq, list) {
10763 if (iocb->vport != vport)
10765 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10767 pring->flag = prev_pring_flag;
10770 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10774 if (pring == phba->sli4_hba.els_wq->pring) {
10775 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10776 /* Set the lpfc data pending flag */
10777 set_bit(LPFC_DATA_READY, &phba->data_flags);
10779 prev_pring_flag = pring->flag;
10780 spin_lock(&pring->ring_lock);
10781 list_for_each_entry_safe(iocb, next_iocb,
10782 &pring->txq, list) {
10783 if (iocb->vport != vport)
10785 list_move_tail(&iocb->list, &completions);
10787 spin_unlock(&pring->ring_lock);
10788 list_for_each_entry_safe(iocb, next_iocb,
10789 &pring->txcmplq, list) {
10790 if (iocb->vport != vport)
10792 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10794 pring->flag = prev_pring_flag;
10797 spin_unlock_irqrestore(&phba->hbalock, flags);
10799 /* Cancel all the IOCBs from the completions list */
10800 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10806 * lpfc_sli_hba_down - Resource cleanup function for the HBA
10807 * @phba: Pointer to HBA context object.
10809 * This function cleans up all iocb, buffers, mailbox commands
10810 * while shutting down the HBA. This function is called with no
10811 * lock held and always returns 1.
10812 * This function does the following to cleanup driver resources:
10813 * - Free discovery resources for each virtual port
10814 * - Cleanup any pending fabric iocbs
10815 * - Iterate through the iocb txq and free each entry
10817 * - Free up any buffer posted to the HBA
10818 * - Free mailbox commands in the mailbox queue.
10821 lpfc_sli_hba_down(struct lpfc_hba *phba)
10823 LIST_HEAD(completions);
10824 struct lpfc_sli *psli = &phba->sli;
10825 struct lpfc_queue *qp = NULL;
10826 struct lpfc_sli_ring *pring;
10827 struct lpfc_dmabuf *buf_ptr;
10828 unsigned long flags = 0;
10831 /* Shutdown the mailbox command sub-system */
10832 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10834 lpfc_hba_down_prep(phba);
10836 /* Disable softirqs, including timers from obtaining phba->hbalock */
10837 local_bh_disable();
10839 lpfc_fabric_abort_hba(phba);
10841 spin_lock_irqsave(&phba->hbalock, flags);
10844 * Error everything on the txq since these iocbs
10845 * have not been given to the FW yet.
10847 if (phba->sli_rev != LPFC_SLI_REV4) {
10848 for (i = 0; i < psli->num_rings; i++) {
10849 pring = &psli->sli3_ring[i];
10850 /* Only slow rings */
10851 if (pring->ringno == LPFC_ELS_RING) {
10852 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10853 /* Set the lpfc data pending flag */
10854 set_bit(LPFC_DATA_READY, &phba->data_flags);
10856 list_splice_init(&pring->txq, &completions);
10859 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10863 spin_lock(&pring->ring_lock);
10864 list_splice_init(&pring->txq, &completions);
10865 spin_unlock(&pring->ring_lock);
10866 if (pring == phba->sli4_hba.els_wq->pring) {
10867 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10868 /* Set the lpfc data pending flag */
10869 set_bit(LPFC_DATA_READY, &phba->data_flags);
10873 spin_unlock_irqrestore(&phba->hbalock, flags);
10875 /* Cancel all the IOCBs from the completions list */
10876 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10879 spin_lock_irqsave(&phba->hbalock, flags);
10880 list_splice_init(&phba->elsbuf, &completions);
10881 phba->elsbuf_cnt = 0;
10882 phba->elsbuf_prev_cnt = 0;
10883 spin_unlock_irqrestore(&phba->hbalock, flags);
10885 while (!list_empty(&completions)) {
10886 list_remove_head(&completions, buf_ptr,
10887 struct lpfc_dmabuf, list);
10888 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10892 /* Enable softirqs again, done with phba->hbalock */
10895 /* Return any active mbox cmds */
10896 del_timer_sync(&psli->mbox_tmo);
10898 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
10899 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10900 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
10906 * lpfc_sli_pcimem_bcopy - SLI memory copy function
10907 * @srcp: Source memory pointer.
10908 * @destp: Destination memory pointer.
10909 * @cnt: Number of words required to be copied.
10911 * This function is used for copying data between driver memory
10912 * and the SLI memory. This function also changes the endianness
10913 * of each word if native endianness is different from SLI
10914 * endianness. This function can be called with or without
10918 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10920 uint32_t *src = srcp;
10921 uint32_t *dest = destp;
10925 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10927 ldata = le32_to_cpu(ldata);
10936 * lpfc_sli_bemem_bcopy - SLI memory copy function
10937 * @srcp: Source memory pointer.
10938 * @destp: Destination memory pointer.
10939 * @cnt: Number of words required to be copied.
10941 * This function is used for copying data between a data structure
10942 * with big endian representation to local endianness.
10943 * This function can be called with or without lock.
10946 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10948 uint32_t *src = srcp;
10949 uint32_t *dest = destp;
10953 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10955 ldata = be32_to_cpu(ldata);
10963 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
10964 * @phba: Pointer to HBA context object.
10965 * @pring: Pointer to driver SLI ring object.
10966 * @mp: Pointer to driver buffer object.
10968 * This function is called with no lock held.
10969 * It always return zero after adding the buffer to the postbufq
10973 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10974 struct lpfc_dmabuf *mp)
10976 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10978 spin_lock_irq(&phba->hbalock);
10979 list_add_tail(&mp->list, &pring->postbufq);
10980 pring->postbufq_cnt++;
10981 spin_unlock_irq(&phba->hbalock);
10986 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
10987 * @phba: Pointer to HBA context object.
10989 * When HBQ is enabled, buffers are searched based on tags. This function
10990 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10991 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10992 * does not conflict with tags of buffer posted for unsolicited events.
10993 * The function returns the allocated tag. The function is called with
10997 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10999 spin_lock_irq(&phba->hbalock);
11000 phba->buffer_tag_count++;
11002 * Always set the QUE_BUFTAG_BIT to distiguish between
11003 * a tag assigned by HBQ.
11005 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
11006 spin_unlock_irq(&phba->hbalock);
11007 return phba->buffer_tag_count;
11011 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
11012 * @phba: Pointer to HBA context object.
11013 * @pring: Pointer to driver SLI ring object.
11014 * @tag: Buffer tag.
11016 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
11017 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
11018 * iocb is posted to the response ring with the tag of the buffer.
11019 * This function searches the pring->postbufq list using the tag
11020 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
11021 * iocb. If the buffer is found then lpfc_dmabuf object of the
11022 * buffer is returned to the caller else NULL is returned.
11023 * This function is called with no lock held.
11025 struct lpfc_dmabuf *
11026 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11029 struct lpfc_dmabuf *mp, *next_mp;
11030 struct list_head *slp = &pring->postbufq;
11032 /* Search postbufq, from the beginning, looking for a match on tag */
11033 spin_lock_irq(&phba->hbalock);
11034 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11035 if (mp->buffer_tag == tag) {
11036 list_del_init(&mp->list);
11037 pring->postbufq_cnt--;
11038 spin_unlock_irq(&phba->hbalock);
11043 spin_unlock_irq(&phba->hbalock);
11044 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11045 "0402 Cannot find virtual addr for buffer tag on "
11046 "ring %d Data x%lx x%px x%px x%x\n",
11047 pring->ringno, (unsigned long) tag,
11048 slp->next, slp->prev, pring->postbufq_cnt);
11054 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
11055 * @phba: Pointer to HBA context object.
11056 * @pring: Pointer to driver SLI ring object.
11057 * @phys: DMA address of the buffer.
11059 * This function searches the buffer list using the dma_address
11060 * of unsolicited event to find the driver's lpfc_dmabuf object
11061 * corresponding to the dma_address. The function returns the
11062 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11063 * This function is called by the ct and els unsolicited event
11064 * handlers to get the buffer associated with the unsolicited
11067 * This function is called with no lock held.
11069 struct lpfc_dmabuf *
11070 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11073 struct lpfc_dmabuf *mp, *next_mp;
11074 struct list_head *slp = &pring->postbufq;
11076 /* Search postbufq, from the beginning, looking for a match on phys */
11077 spin_lock_irq(&phba->hbalock);
11078 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11079 if (mp->phys == phys) {
11080 list_del_init(&mp->list);
11081 pring->postbufq_cnt--;
11082 spin_unlock_irq(&phba->hbalock);
11087 spin_unlock_irq(&phba->hbalock);
11088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11089 "0410 Cannot find virtual addr for mapped buf on "
11090 "ring %d Data x%llx x%px x%px x%x\n",
11091 pring->ringno, (unsigned long long)phys,
11092 slp->next, slp->prev, pring->postbufq_cnt);
11097 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
11098 * @phba: Pointer to HBA context object.
11099 * @cmdiocb: Pointer to driver command iocb object.
11100 * @rspiocb: Pointer to driver response iocb object.
11102 * This function is the completion handler for the abort iocbs for
11103 * ELS commands. This function is called from the ELS ring event
11104 * handler with no lock held. This function frees memory resources
11105 * associated with the abort iocb.
11108 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11109 struct lpfc_iocbq *rspiocb)
11111 IOCB_t *irsp = &rspiocb->iocb;
11112 uint16_t abort_iotag, abort_context;
11113 struct lpfc_iocbq *abort_iocb = NULL;
11115 if (irsp->ulpStatus) {
11118 * Assume that the port already completed and returned, or
11119 * will return the iocb. Just Log the message.
11121 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11122 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11124 spin_lock_irq(&phba->hbalock);
11125 if (phba->sli_rev < LPFC_SLI_REV4) {
11126 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11127 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11128 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11129 spin_unlock_irq(&phba->hbalock);
11132 if (abort_iotag != 0 &&
11133 abort_iotag <= phba->sli.last_iotag)
11135 phba->sli.iocbq_lookup[abort_iotag];
11137 /* For sli4 the abort_tag is the XRI,
11138 * so the abort routine puts the iotag of the iocb
11139 * being aborted in the context field of the abort
11142 abort_iocb = phba->sli.iocbq_lookup[abort_context];
11144 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11145 "0327 Cannot abort els iocb x%px "
11146 "with tag %x context %x, abort status %x, "
11148 abort_iocb, abort_iotag, abort_context,
11149 irsp->ulpStatus, irsp->un.ulpWord[4]);
11151 spin_unlock_irq(&phba->hbalock);
11154 lpfc_sli_release_iocbq(phba, cmdiocb);
11159 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
11160 * @phba: Pointer to HBA context object.
11161 * @cmdiocb: Pointer to driver command iocb object.
11162 * @rspiocb: Pointer to driver response iocb object.
11164 * The function is called from SLI ring event handler with no
11165 * lock held. This function is the completion handler for ELS commands
11166 * which are aborted. The function frees memory resources used for
11167 * the aborted ELS commands.
11170 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11171 struct lpfc_iocbq *rspiocb)
11173 IOCB_t *irsp = &rspiocb->iocb;
11175 /* ELS cmd tag <ulpIoTag> completes */
11176 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11177 "0139 Ignoring ELS cmd tag x%x completion Data: "
11179 irsp->ulpIoTag, irsp->ulpStatus,
11180 irsp->un.ulpWord[4], irsp->ulpTimeout);
11181 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11182 lpfc_ct_free_iocb(phba, cmdiocb);
11184 lpfc_els_free_iocb(phba, cmdiocb);
11189 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
11190 * @phba: Pointer to HBA context object.
11191 * @pring: Pointer to driver SLI ring object.
11192 * @cmdiocb: Pointer to driver command iocb object.
11194 * This function issues an abort iocb for the provided command iocb down to
11195 * the port. Other than the case the outstanding command iocb is an abort
11196 * request, this function issues abort out unconditionally. This function is
11197 * called with hbalock held. The function returns 0 when it fails due to
11198 * memory allocation failure or when the command iocb is an abort request.
11201 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11202 struct lpfc_iocbq *cmdiocb)
11204 struct lpfc_vport *vport = cmdiocb->vport;
11205 struct lpfc_iocbq *abtsiocbp;
11206 IOCB_t *icmd = NULL;
11207 IOCB_t *iabt = NULL;
11209 unsigned long iflags;
11210 struct lpfc_nodelist *ndlp;
11212 lockdep_assert_held(&phba->hbalock);
11215 * There are certain command types we don't want to abort. And we
11216 * don't want to abort commands that are already in the process of
11219 icmd = &cmdiocb->iocb;
11220 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11221 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11222 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11225 /* issue ABTS for this IOCB based on iotag */
11226 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11227 if (abtsiocbp == NULL)
11230 /* This signals the response to set the correct status
11231 * before calling the completion handler
11233 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11235 iabt = &abtsiocbp->iocb;
11236 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11237 iabt->un.acxri.abortContextTag = icmd->ulpContext;
11238 if (phba->sli_rev == LPFC_SLI_REV4) {
11239 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11240 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11242 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11243 if (pring->ringno == LPFC_ELS_RING) {
11244 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11245 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11249 iabt->ulpClass = icmd->ulpClass;
11251 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11252 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11253 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11254 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11255 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11256 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11258 if (phba->link_state >= LPFC_LINK_UP)
11259 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11261 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11263 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11264 abtsiocbp->vport = vport;
11266 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11267 "0339 Abort xri x%x, original iotag x%x, "
11268 "abort cmd iotag x%x\n",
11269 iabt->un.acxri.abortIoTag,
11270 iabt->un.acxri.abortContextTag,
11273 if (phba->sli_rev == LPFC_SLI_REV4) {
11274 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11275 if (unlikely(pring == NULL))
11277 /* Note: both hbalock and ring_lock need to be set here */
11278 spin_lock_irqsave(&pring->ring_lock, iflags);
11279 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11281 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11283 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11288 __lpfc_sli_release_iocbq(phba, abtsiocbp);
11291 * Caller to this routine should check for IOCB_ERROR
11292 * and handle it properly. This routine no longer removes
11293 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11299 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11300 * @phba: Pointer to HBA context object.
11301 * @pring: Pointer to driver SLI ring object.
11302 * @cmdiocb: Pointer to driver command iocb object.
11304 * This function issues an abort iocb for the provided command iocb. In case
11305 * of unloading, the abort iocb will not be issued to commands on the ELS
11306 * ring. Instead, the callback function shall be changed to those commands
11307 * so that nothing happens when them finishes. This function is called with
11308 * hbalock held. The function returns 0 when the command iocb is an abort
11312 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11313 struct lpfc_iocbq *cmdiocb)
11315 struct lpfc_vport *vport = cmdiocb->vport;
11316 int retval = IOCB_ERROR;
11317 IOCB_t *icmd = NULL;
11319 lockdep_assert_held(&phba->hbalock);
11322 * There are certain command types we don't want to abort. And we
11323 * don't want to abort commands that are already in the process of
11326 icmd = &cmdiocb->iocb;
11327 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11328 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11329 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11333 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11334 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11336 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11337 goto abort_iotag_exit;
11341 * If we're unloading, don't abort iocb on the ELS ring, but change
11342 * the callback so that nothing happens when it finishes.
11344 if ((vport->load_flag & FC_UNLOADING) &&
11345 (pring->ringno == LPFC_ELS_RING)) {
11346 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11347 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11349 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11350 goto abort_iotag_exit;
11353 /* Now, we try to issue the abort to the cmdiocb out */
11354 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11358 * Caller to this routine should check for IOCB_ERROR
11359 * and handle it properly. This routine no longer removes
11360 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11366 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11367 * @phba: pointer to lpfc HBA data structure.
11369 * This routine will abort all pending and outstanding iocbs to an HBA.
11372 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11374 struct lpfc_sli *psli = &phba->sli;
11375 struct lpfc_sli_ring *pring;
11376 struct lpfc_queue *qp = NULL;
11379 if (phba->sli_rev != LPFC_SLI_REV4) {
11380 for (i = 0; i < psli->num_rings; i++) {
11381 pring = &psli->sli3_ring[i];
11382 lpfc_sli_abort_iocb_ring(phba, pring);
11386 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11390 lpfc_sli_abort_iocb_ring(phba, pring);
11395 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11396 * @iocbq: Pointer to driver iocb object.
11397 * @vport: Pointer to driver virtual port object.
11398 * @tgt_id: SCSI ID of the target.
11399 * @lun_id: LUN ID of the scsi device.
11400 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11402 * This function acts as an iocb filter for functions which abort or count
11403 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11404 * 0 if the filtering criteria is met for the given iocb and will return
11405 * 1 if the filtering criteria is not met.
11406 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11407 * given iocb is for the SCSI device specified by vport, tgt_id and
11408 * lun_id parameter.
11409 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11410 * given iocb is for the SCSI target specified by vport and tgt_id
11412 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11413 * given iocb is for the SCSI host associated with the given vport.
11414 * This function is called with no locks held.
11417 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11418 uint16_t tgt_id, uint64_t lun_id,
11419 lpfc_ctx_cmd ctx_cmd)
11421 struct lpfc_io_buf *lpfc_cmd;
11424 if (iocbq->vport != vport)
11427 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11428 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
11431 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11433 if (lpfc_cmd->pCmd == NULL)
11438 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11439 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11440 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11444 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11445 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11448 case LPFC_CTX_HOST:
11452 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11453 __func__, ctx_cmd);
11461 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11462 * @vport: Pointer to virtual port.
11463 * @tgt_id: SCSI ID of the target.
11464 * @lun_id: LUN ID of the scsi device.
11465 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11467 * This function returns number of FCP commands pending for the vport.
11468 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11469 * commands pending on the vport associated with SCSI device specified
11470 * by tgt_id and lun_id parameters.
11471 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11472 * commands pending on the vport associated with SCSI target specified
11473 * by tgt_id parameter.
11474 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11475 * commands pending on the vport.
11476 * This function returns the number of iocbs which satisfy the filter.
11477 * This function is called without any lock held.
11480 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11481 lpfc_ctx_cmd ctx_cmd)
11483 struct lpfc_hba *phba = vport->phba;
11484 struct lpfc_iocbq *iocbq;
11487 spin_lock_irq(&phba->hbalock);
11488 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11489 iocbq = phba->sli.iocbq_lookup[i];
11491 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11495 spin_unlock_irq(&phba->hbalock);
11501 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11502 * @phba: Pointer to HBA context object
11503 * @cmdiocb: Pointer to command iocb object.
11504 * @rspiocb: Pointer to response iocb object.
11506 * This function is called when an aborted FCP iocb completes. This
11507 * function is called by the ring event handler with no lock held.
11508 * This function frees the iocb.
11511 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11512 struct lpfc_iocbq *rspiocb)
11514 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11515 "3096 ABORT_XRI_CN completing on rpi x%x "
11516 "original iotag x%x, abort cmd iotag x%x "
11517 "status 0x%x, reason 0x%x\n",
11518 cmdiocb->iocb.un.acxri.abortContextTag,
11519 cmdiocb->iocb.un.acxri.abortIoTag,
11520 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11521 rspiocb->iocb.un.ulpWord[4]);
11522 lpfc_sli_release_iocbq(phba, cmdiocb);
11527 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11528 * @vport: Pointer to virtual port.
11529 * @pring: Pointer to driver SLI ring object.
11530 * @tgt_id: SCSI ID of the target.
11531 * @lun_id: LUN ID of the scsi device.
11532 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11534 * This function sends an abort command for every SCSI command
11535 * associated with the given virtual port pending on the ring
11536 * filtered by lpfc_sli_validate_fcp_iocb function.
11537 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11538 * FCP iocbs associated with lun specified by tgt_id and lun_id
11540 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11541 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11542 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11543 * FCP iocbs associated with virtual port.
11544 * This function returns number of iocbs it failed to abort.
11545 * This function is called with no locks held.
11548 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11549 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11551 struct lpfc_hba *phba = vport->phba;
11552 struct lpfc_iocbq *iocbq;
11553 struct lpfc_iocbq *abtsiocb;
11554 struct lpfc_sli_ring *pring_s4;
11555 IOCB_t *cmd = NULL;
11556 int errcnt = 0, ret_val = 0;
11559 /* all I/Os are in process of being flushed */
11560 if (phba->hba_flag & HBA_IOQ_FLUSH)
11563 for (i = 1; i <= phba->sli.last_iotag; i++) {
11564 iocbq = phba->sli.iocbq_lookup[i];
11566 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11571 * If the iocbq is already being aborted, don't take a second
11572 * action, but do count it.
11574 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11577 /* issue ABTS for this IOCB based on iotag */
11578 abtsiocb = lpfc_sli_get_iocbq(phba);
11579 if (abtsiocb == NULL) {
11584 /* indicate the IO is being aborted by the driver. */
11585 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11587 cmd = &iocbq->iocb;
11588 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11589 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11590 if (phba->sli_rev == LPFC_SLI_REV4)
11591 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11593 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11594 abtsiocb->iocb.ulpLe = 1;
11595 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11596 abtsiocb->vport = vport;
11598 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11599 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11600 if (iocbq->iocb_flag & LPFC_IO_FCP)
11601 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11602 if (iocbq->iocb_flag & LPFC_IO_FOF)
11603 abtsiocb->iocb_flag |= LPFC_IO_FOF;
11605 if (lpfc_is_link_up(phba))
11606 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11608 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11610 /* Setup callback routine and issue the command. */
11611 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11612 if (phba->sli_rev == LPFC_SLI_REV4) {
11613 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11616 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11619 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11621 if (ret_val == IOCB_ERROR) {
11622 lpfc_sli_release_iocbq(phba, abtsiocb);
11632 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11633 * @vport: Pointer to virtual port.
11634 * @pring: Pointer to driver SLI ring object.
11635 * @tgt_id: SCSI ID of the target.
11636 * @lun_id: LUN ID of the scsi device.
11637 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11639 * This function sends an abort command for every SCSI command
11640 * associated with the given virtual port pending on the ring
11641 * filtered by lpfc_sli_validate_fcp_iocb function.
11642 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11643 * FCP iocbs associated with lun specified by tgt_id and lun_id
11645 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11646 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11647 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11648 * FCP iocbs associated with virtual port.
11649 * This function returns number of iocbs it aborted .
11650 * This function is called with no locks held right after a taskmgmt
11654 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11655 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11657 struct lpfc_hba *phba = vport->phba;
11658 struct lpfc_io_buf *lpfc_cmd;
11659 struct lpfc_iocbq *abtsiocbq;
11660 struct lpfc_nodelist *ndlp;
11661 struct lpfc_iocbq *iocbq;
11663 int sum, i, ret_val;
11664 unsigned long iflags;
11665 struct lpfc_sli_ring *pring_s4 = NULL;
11667 spin_lock_irqsave(&phba->hbalock, iflags);
11669 /* all I/Os are in process of being flushed */
11670 if (phba->hba_flag & HBA_IOQ_FLUSH) {
11671 spin_unlock_irqrestore(&phba->hbalock, iflags);
11676 for (i = 1; i <= phba->sli.last_iotag; i++) {
11677 iocbq = phba->sli.iocbq_lookup[i];
11679 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11683 /* Guard against IO completion being called at same time */
11684 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11685 spin_lock(&lpfc_cmd->buf_lock);
11687 if (!lpfc_cmd->pCmd) {
11688 spin_unlock(&lpfc_cmd->buf_lock);
11692 if (phba->sli_rev == LPFC_SLI_REV4) {
11694 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
11696 spin_unlock(&lpfc_cmd->buf_lock);
11699 /* Note: both hbalock and ring_lock must be set here */
11700 spin_lock(&pring_s4->ring_lock);
11704 * If the iocbq is already being aborted, don't take a second
11705 * action, but do count it.
11707 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11708 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11709 if (phba->sli_rev == LPFC_SLI_REV4)
11710 spin_unlock(&pring_s4->ring_lock);
11711 spin_unlock(&lpfc_cmd->buf_lock);
11715 /* issue ABTS for this IOCB based on iotag */
11716 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11718 if (phba->sli_rev == LPFC_SLI_REV4)
11719 spin_unlock(&pring_s4->ring_lock);
11720 spin_unlock(&lpfc_cmd->buf_lock);
11724 icmd = &iocbq->iocb;
11725 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11726 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11727 if (phba->sli_rev == LPFC_SLI_REV4)
11728 abtsiocbq->iocb.un.acxri.abortIoTag =
11729 iocbq->sli4_xritag;
11731 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11732 abtsiocbq->iocb.ulpLe = 1;
11733 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11734 abtsiocbq->vport = vport;
11736 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11737 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11738 if (iocbq->iocb_flag & LPFC_IO_FCP)
11739 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11740 if (iocbq->iocb_flag & LPFC_IO_FOF)
11741 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11743 ndlp = lpfc_cmd->rdata->pnode;
11745 if (lpfc_is_link_up(phba) &&
11746 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11747 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11749 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11751 /* Setup callback routine and issue the command. */
11752 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11755 * Indicate the IO is being aborted by the driver and set
11756 * the caller's flag into the aborted IO.
11758 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11760 if (phba->sli_rev == LPFC_SLI_REV4) {
11761 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11763 spin_unlock(&pring_s4->ring_lock);
11765 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11769 spin_unlock(&lpfc_cmd->buf_lock);
11771 if (ret_val == IOCB_ERROR)
11772 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11776 spin_unlock_irqrestore(&phba->hbalock, iflags);
11781 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
11782 * @phba: Pointer to HBA context object.
11783 * @cmdiocbq: Pointer to command iocb.
11784 * @rspiocbq: Pointer to response iocb.
11786 * This function is the completion handler for iocbs issued using
11787 * lpfc_sli_issue_iocb_wait function. This function is called by the
11788 * ring event handler function without any lock held. This function
11789 * can be called from both worker thread context and interrupt
11790 * context. This function also can be called from other thread which
11791 * cleans up the SLI layer objects.
11792 * This function copy the contents of the response iocb to the
11793 * response iocb memory object provided by the caller of
11794 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11795 * sleeps for the iocb completion.
11798 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11799 struct lpfc_iocbq *cmdiocbq,
11800 struct lpfc_iocbq *rspiocbq)
11802 wait_queue_head_t *pdone_q;
11803 unsigned long iflags;
11804 struct lpfc_io_buf *lpfc_cmd;
11806 spin_lock_irqsave(&phba->hbalock, iflags);
11807 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11810 * A time out has occurred for the iocb. If a time out
11811 * completion handler has been supplied, call it. Otherwise,
11812 * just free the iocbq.
11815 spin_unlock_irqrestore(&phba->hbalock, iflags);
11816 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11817 cmdiocbq->wait_iocb_cmpl = NULL;
11818 if (cmdiocbq->iocb_cmpl)
11819 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11821 lpfc_sli_release_iocbq(phba, cmdiocbq);
11825 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11826 if (cmdiocbq->context2 && rspiocbq)
11827 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11828 &rspiocbq->iocb, sizeof(IOCB_t));
11830 /* Set the exchange busy flag for task management commands */
11831 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11832 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11833 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
11835 if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
11836 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
11838 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
11841 pdone_q = cmdiocbq->context_un.wait_queue;
11844 spin_unlock_irqrestore(&phba->hbalock, iflags);
11849 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11850 * @phba: Pointer to HBA context object..
11851 * @piocbq: Pointer to command iocb.
11852 * @flag: Flag to test.
11854 * This routine grabs the hbalock and then test the iocb_flag to
11855 * see if the passed in flag is set.
11857 * 1 if flag is set.
11858 * 0 if flag is not set.
11861 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11862 struct lpfc_iocbq *piocbq, uint32_t flag)
11864 unsigned long iflags;
11867 spin_lock_irqsave(&phba->hbalock, iflags);
11868 ret = piocbq->iocb_flag & flag;
11869 spin_unlock_irqrestore(&phba->hbalock, iflags);
11875 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
11876 * @phba: Pointer to HBA context object..
11877 * @pring: Pointer to sli ring.
11878 * @piocb: Pointer to command iocb.
11879 * @prspiocbq: Pointer to response iocb.
11880 * @timeout: Timeout in number of seconds.
11882 * This function issues the iocb to firmware and waits for the
11883 * iocb to complete. The iocb_cmpl field of the shall be used
11884 * to handle iocbs which time out. If the field is NULL, the
11885 * function shall free the iocbq structure. If more clean up is
11886 * needed, the caller is expected to provide a completion function
11887 * that will provide the needed clean up. If the iocb command is
11888 * not completed within timeout seconds, the function will either
11889 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11890 * completion function set in the iocb_cmpl field and then return
11891 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11892 * resources if this function returns IOCB_TIMEDOUT.
11893 * The function waits for the iocb completion using an
11894 * non-interruptible wait.
11895 * This function will sleep while waiting for iocb completion.
11896 * So, this function should not be called from any context which
11897 * does not allow sleeping. Due to the same reason, this function
11898 * cannot be called with interrupt disabled.
11899 * This function assumes that the iocb completions occur while
11900 * this function sleep. So, this function cannot be called from
11901 * the thread which process iocb completion for this ring.
11902 * This function clears the iocb_flag of the iocb object before
11903 * issuing the iocb and the iocb completion handler sets this
11904 * flag and wakes this thread when the iocb completes.
11905 * The contents of the response iocb will be copied to prspiocbq
11906 * by the completion handler when the command completes.
11907 * This function returns IOCB_SUCCESS when success.
11908 * This function is called with no lock held.
11911 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
11912 uint32_t ring_number,
11913 struct lpfc_iocbq *piocb,
11914 struct lpfc_iocbq *prspiocbq,
11917 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11918 long timeleft, timeout_req = 0;
11919 int retval = IOCB_SUCCESS;
11921 struct lpfc_iocbq *iocb;
11923 int txcmplq_cnt = 0;
11924 struct lpfc_sli_ring *pring;
11925 unsigned long iflags;
11926 bool iocb_completed = true;
11928 if (phba->sli_rev >= LPFC_SLI_REV4)
11929 pring = lpfc_sli4_calc_ring(phba, piocb);
11931 pring = &phba->sli.sli3_ring[ring_number];
11933 * If the caller has provided a response iocbq buffer, then context2
11934 * is NULL or its an error.
11937 if (piocb->context2)
11939 piocb->context2 = prspiocbq;
11942 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
11943 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11944 piocb->context_un.wait_queue = &done_q;
11945 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
11947 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11948 if (lpfc_readl(phba->HCregaddr, &creg_val))
11950 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11951 writel(creg_val, phba->HCregaddr);
11952 readl(phba->HCregaddr); /* flush */
11955 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11956 SLI_IOCB_RET_IOCB);
11957 if (retval == IOCB_SUCCESS) {
11958 timeout_req = msecs_to_jiffies(timeout * 1000);
11959 timeleft = wait_event_timeout(done_q,
11960 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
11962 spin_lock_irqsave(&phba->hbalock, iflags);
11963 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11966 * IOCB timed out. Inform the wake iocb wait
11967 * completion function and set local status
11970 iocb_completed = false;
11971 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11973 spin_unlock_irqrestore(&phba->hbalock, iflags);
11974 if (iocb_completed) {
11975 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11976 "0331 IOCB wake signaled\n");
11977 /* Note: we are not indicating if the IOCB has a success
11978 * status or not - that's for the caller to check.
11979 * IOCB_SUCCESS means just that the command was sent and
11980 * completed. Not that it completed successfully.
11982 } else if (timeleft == 0) {
11983 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11984 "0338 IOCB wait timeout error - no "
11985 "wake response Data x%x\n", timeout);
11986 retval = IOCB_TIMEDOUT;
11988 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11989 "0330 IOCB wake NOT set, "
11991 timeout, (timeleft / jiffies));
11992 retval = IOCB_TIMEDOUT;
11994 } else if (retval == IOCB_BUSY) {
11995 if (phba->cfg_log_verbose & LOG_SLI) {
11996 list_for_each_entry(iocb, &pring->txq, list) {
11999 list_for_each_entry(iocb, &pring->txcmplq, list) {
12002 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12003 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12004 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12008 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12009 "0332 IOCB wait issue failed, Data x%x\n",
12011 retval = IOCB_ERROR;
12014 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12015 if (lpfc_readl(phba->HCregaddr, &creg_val))
12017 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12018 writel(creg_val, phba->HCregaddr);
12019 readl(phba->HCregaddr); /* flush */
12023 piocb->context2 = NULL;
12025 piocb->context_un.wait_queue = NULL;
12026 piocb->iocb_cmpl = NULL;
12031 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
12032 * @phba: Pointer to HBA context object.
12033 * @pmboxq: Pointer to driver mailbox object.
12034 * @timeout: Timeout in number of seconds.
12036 * This function issues the mailbox to firmware and waits for the
12037 * mailbox command to complete. If the mailbox command is not
12038 * completed within timeout seconds, it returns MBX_TIMEOUT.
12039 * The function waits for the mailbox completion using an
12040 * interruptible wait. If the thread is woken up due to a
12041 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12042 * should not free the mailbox resources, if this function returns
12044 * This function will sleep while waiting for mailbox completion.
12045 * So, this function should not be called from any context which
12046 * does not allow sleeping. Due to the same reason, this function
12047 * cannot be called with interrupt disabled.
12048 * This function assumes that the mailbox completion occurs while
12049 * this function sleep. So, this function cannot be called from
12050 * the worker thread which processes mailbox completion.
12051 * This function is called in the context of HBA management
12053 * This function returns MBX_SUCCESS when successful.
12054 * This function is called with no lock held.
12057 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
12060 struct completion mbox_done;
12062 unsigned long flag;
12064 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12065 /* setup wake call as IOCB callback */
12066 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12068 /* setup context3 field to pass wait_queue pointer to wake function */
12069 init_completion(&mbox_done);
12070 pmboxq->context3 = &mbox_done;
12071 /* now issue the command */
12072 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12073 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
12074 wait_for_completion_timeout(&mbox_done,
12075 msecs_to_jiffies(timeout * 1000));
12077 spin_lock_irqsave(&phba->hbalock, flag);
12078 pmboxq->context3 = NULL;
12080 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12081 * else do not free the resources.
12083 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12084 retval = MBX_SUCCESS;
12086 retval = MBX_TIMEOUT;
12087 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12089 spin_unlock_irqrestore(&phba->hbalock, flag);
12095 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
12096 * @phba: Pointer to HBA context.
12098 * This function is called to shutdown the driver's mailbox sub-system.
12099 * It first marks the mailbox sub-system is in a block state to prevent
12100 * the asynchronous mailbox command from issued off the pending mailbox
12101 * command queue. If the mailbox command sub-system shutdown is due to
12102 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12103 * the mailbox sub-system flush routine to forcefully bring down the
12104 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12105 * as with offline or HBA function reset), this routine will wait for the
12106 * outstanding mailbox command to complete before invoking the mailbox
12107 * sub-system flush routine to gracefully bring down mailbox sub-system.
12110 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12112 struct lpfc_sli *psli = &phba->sli;
12113 unsigned long timeout;
12115 if (mbx_action == LPFC_MBX_NO_WAIT) {
12116 /* delay 100ms for port state */
12118 lpfc_sli_mbox_sys_flush(phba);
12121 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12123 /* Disable softirqs, including timers from obtaining phba->hbalock */
12124 local_bh_disable();
12126 spin_lock_irq(&phba->hbalock);
12127 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12129 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12130 /* Determine how long we might wait for the active mailbox
12131 * command to be gracefully completed by firmware.
12133 if (phba->sli.mbox_active)
12134 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12135 phba->sli.mbox_active) *
12137 spin_unlock_irq(&phba->hbalock);
12139 /* Enable softirqs again, done with phba->hbalock */
12142 while (phba->sli.mbox_active) {
12143 /* Check active mailbox complete status every 2ms */
12145 if (time_after(jiffies, timeout))
12146 /* Timeout, let the mailbox flush routine to
12147 * forcefully release active mailbox command
12152 spin_unlock_irq(&phba->hbalock);
12154 /* Enable softirqs again, done with phba->hbalock */
12158 lpfc_sli_mbox_sys_flush(phba);
12162 * lpfc_sli_eratt_read - read sli-3 error attention events
12163 * @phba: Pointer to HBA context.
12165 * This function is called to read the SLI3 device error attention registers
12166 * for possible error attention events. The caller must hold the hostlock
12167 * with spin_lock_irq().
12169 * This function returns 1 when there is Error Attention in the Host Attention
12170 * Register and returns 0 otherwise.
12173 lpfc_sli_eratt_read(struct lpfc_hba *phba)
12177 /* Read chip Host Attention (HA) register */
12178 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12181 if (ha_copy & HA_ERATT) {
12182 /* Read host status register to retrieve error event */
12183 if (lpfc_sli_read_hs(phba))
12186 /* Check if there is a deferred error condition is active */
12187 if ((HS_FFER1 & phba->work_hs) &&
12188 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12189 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12190 phba->hba_flag |= DEFER_ERATT;
12191 /* Clear all interrupt enable conditions */
12192 writel(0, phba->HCregaddr);
12193 readl(phba->HCregaddr);
12196 /* Set the driver HA work bitmap */
12197 phba->work_ha |= HA_ERATT;
12198 /* Indicate polling handles this ERATT */
12199 phba->hba_flag |= HBA_ERATT_HANDLED;
12205 /* Set the driver HS work bitmap */
12206 phba->work_hs |= UNPLUG_ERR;
12207 /* Set the driver HA work bitmap */
12208 phba->work_ha |= HA_ERATT;
12209 /* Indicate polling handles this ERATT */
12210 phba->hba_flag |= HBA_ERATT_HANDLED;
12215 * lpfc_sli4_eratt_read - read sli-4 error attention events
12216 * @phba: Pointer to HBA context.
12218 * This function is called to read the SLI4 device error attention registers
12219 * for possible error attention events. The caller must hold the hostlock
12220 * with spin_lock_irq().
12222 * This function returns 1 when there is Error Attention in the Host Attention
12223 * Register and returns 0 otherwise.
12226 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12228 uint32_t uerr_sta_hi, uerr_sta_lo;
12229 uint32_t if_type, portsmphr;
12230 struct lpfc_register portstat_reg;
12233 * For now, use the SLI4 device internal unrecoverable error
12234 * registers for error attention. This can be changed later.
12236 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12238 case LPFC_SLI_INTF_IF_TYPE_0:
12239 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12241 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12243 phba->work_hs |= UNPLUG_ERR;
12244 phba->work_ha |= HA_ERATT;
12245 phba->hba_flag |= HBA_ERATT_HANDLED;
12248 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12249 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12250 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12251 "1423 HBA Unrecoverable error: "
12252 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12253 "ue_mask_lo_reg=0x%x, "
12254 "ue_mask_hi_reg=0x%x\n",
12255 uerr_sta_lo, uerr_sta_hi,
12256 phba->sli4_hba.ue_mask_lo,
12257 phba->sli4_hba.ue_mask_hi);
12258 phba->work_status[0] = uerr_sta_lo;
12259 phba->work_status[1] = uerr_sta_hi;
12260 phba->work_ha |= HA_ERATT;
12261 phba->hba_flag |= HBA_ERATT_HANDLED;
12265 case LPFC_SLI_INTF_IF_TYPE_2:
12266 case LPFC_SLI_INTF_IF_TYPE_6:
12267 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12268 &portstat_reg.word0) ||
12269 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12271 phba->work_hs |= UNPLUG_ERR;
12272 phba->work_ha |= HA_ERATT;
12273 phba->hba_flag |= HBA_ERATT_HANDLED;
12276 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12277 phba->work_status[0] =
12278 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12279 phba->work_status[1] =
12280 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12281 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12282 "2885 Port Status Event: "
12283 "port status reg 0x%x, "
12284 "port smphr reg 0x%x, "
12285 "error 1=0x%x, error 2=0x%x\n",
12286 portstat_reg.word0,
12288 phba->work_status[0],
12289 phba->work_status[1]);
12290 phba->work_ha |= HA_ERATT;
12291 phba->hba_flag |= HBA_ERATT_HANDLED;
12295 case LPFC_SLI_INTF_IF_TYPE_1:
12297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12298 "2886 HBA Error Attention on unsupported "
12299 "if type %d.", if_type);
12307 * lpfc_sli_check_eratt - check error attention events
12308 * @phba: Pointer to HBA context.
12310 * This function is called from timer soft interrupt context to check HBA's
12311 * error attention register bit for error attention events.
12313 * This function returns 1 when there is Error Attention in the Host Attention
12314 * Register and returns 0 otherwise.
12317 lpfc_sli_check_eratt(struct lpfc_hba *phba)
12321 /* If somebody is waiting to handle an eratt, don't process it
12322 * here. The brdkill function will do this.
12324 if (phba->link_flag & LS_IGNORE_ERATT)
12327 /* Check if interrupt handler handles this ERATT */
12328 spin_lock_irq(&phba->hbalock);
12329 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12330 /* Interrupt handler has handled ERATT */
12331 spin_unlock_irq(&phba->hbalock);
12336 * If there is deferred error attention, do not check for error
12339 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12340 spin_unlock_irq(&phba->hbalock);
12344 /* If PCI channel is offline, don't process it */
12345 if (unlikely(pci_channel_offline(phba->pcidev))) {
12346 spin_unlock_irq(&phba->hbalock);
12350 switch (phba->sli_rev) {
12351 case LPFC_SLI_REV2:
12352 case LPFC_SLI_REV3:
12353 /* Read chip Host Attention (HA) register */
12354 ha_copy = lpfc_sli_eratt_read(phba);
12356 case LPFC_SLI_REV4:
12357 /* Read device Uncoverable Error (UERR) registers */
12358 ha_copy = lpfc_sli4_eratt_read(phba);
12361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12362 "0299 Invalid SLI revision (%d)\n",
12367 spin_unlock_irq(&phba->hbalock);
12373 * lpfc_intr_state_check - Check device state for interrupt handling
12374 * @phba: Pointer to HBA context.
12376 * This inline routine checks whether a device or its PCI slot is in a state
12377 * that the interrupt should be handled.
12379 * This function returns 0 if the device or the PCI slot is in a state that
12380 * interrupt should be handled, otherwise -EIO.
12383 lpfc_intr_state_check(struct lpfc_hba *phba)
12385 /* If the pci channel is offline, ignore all the interrupts */
12386 if (unlikely(pci_channel_offline(phba->pcidev)))
12389 /* Update device level interrupt statistics */
12390 phba->sli.slistat.sli_intr++;
12392 /* Ignore all interrupts during initialization. */
12393 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12400 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
12401 * @irq: Interrupt number.
12402 * @dev_id: The device context pointer.
12404 * This function is directly called from the PCI layer as an interrupt
12405 * service routine when device with SLI-3 interface spec is enabled with
12406 * MSI-X multi-message interrupt mode and there are slow-path events in
12407 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12408 * interrupt mode, this function is called as part of the device-level
12409 * interrupt handler. When the PCI slot is in error recovery or the HBA
12410 * is undergoing initialization, the interrupt handler will not process
12411 * the interrupt. The link attention and ELS ring attention events are
12412 * handled by the worker thread. The interrupt handler signals the worker
12413 * thread and returns for these events. This function is called without
12414 * any lock held. It gets the hbalock to access and update SLI data
12417 * This function returns IRQ_HANDLED when interrupt is handled else it
12418 * returns IRQ_NONE.
12421 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12423 struct lpfc_hba *phba;
12424 uint32_t ha_copy, hc_copy;
12425 uint32_t work_ha_copy;
12426 unsigned long status;
12427 unsigned long iflag;
12430 MAILBOX_t *mbox, *pmbox;
12431 struct lpfc_vport *vport;
12432 struct lpfc_nodelist *ndlp;
12433 struct lpfc_dmabuf *mp;
12438 * Get the driver's phba structure from the dev_id and
12439 * assume the HBA is not interrupting.
12441 phba = (struct lpfc_hba *)dev_id;
12443 if (unlikely(!phba))
12447 * Stuff needs to be attented to when this function is invoked as an
12448 * individual interrupt handler in MSI-X multi-message interrupt mode
12450 if (phba->intr_type == MSIX) {
12451 /* Check device state for handling interrupt */
12452 if (lpfc_intr_state_check(phba))
12454 /* Need to read HA REG for slow-path events */
12455 spin_lock_irqsave(&phba->hbalock, iflag);
12456 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12458 /* If somebody is waiting to handle an eratt don't process it
12459 * here. The brdkill function will do this.
12461 if (phba->link_flag & LS_IGNORE_ERATT)
12462 ha_copy &= ~HA_ERATT;
12463 /* Check the need for handling ERATT in interrupt handler */
12464 if (ha_copy & HA_ERATT) {
12465 if (phba->hba_flag & HBA_ERATT_HANDLED)
12466 /* ERATT polling has handled ERATT */
12467 ha_copy &= ~HA_ERATT;
12469 /* Indicate interrupt handler handles ERATT */
12470 phba->hba_flag |= HBA_ERATT_HANDLED;
12474 * If there is deferred error attention, do not check for any
12477 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12478 spin_unlock_irqrestore(&phba->hbalock, iflag);
12482 /* Clear up only attention source related to slow-path */
12483 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12486 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12487 HC_LAINT_ENA | HC_ERINT_ENA),
12489 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12491 writel(hc_copy, phba->HCregaddr);
12492 readl(phba->HAregaddr); /* flush */
12493 spin_unlock_irqrestore(&phba->hbalock, iflag);
12495 ha_copy = phba->ha_copy;
12497 work_ha_copy = ha_copy & phba->work_ha_mask;
12499 if (work_ha_copy) {
12500 if (work_ha_copy & HA_LATT) {
12501 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12503 * Turn off Link Attention interrupts
12504 * until CLEAR_LA done
12506 spin_lock_irqsave(&phba->hbalock, iflag);
12507 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12508 if (lpfc_readl(phba->HCregaddr, &control))
12510 control &= ~HC_LAINT_ENA;
12511 writel(control, phba->HCregaddr);
12512 readl(phba->HCregaddr); /* flush */
12513 spin_unlock_irqrestore(&phba->hbalock, iflag);
12516 work_ha_copy &= ~HA_LATT;
12519 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12521 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12522 * the only slow ring.
12524 status = (work_ha_copy &
12525 (HA_RXMASK << (4*LPFC_ELS_RING)));
12526 status >>= (4*LPFC_ELS_RING);
12527 if (status & HA_RXMASK) {
12528 spin_lock_irqsave(&phba->hbalock, iflag);
12529 if (lpfc_readl(phba->HCregaddr, &control))
12532 lpfc_debugfs_slow_ring_trc(phba,
12533 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12535 (uint32_t)phba->sli.slistat.sli_intr);
12537 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12538 lpfc_debugfs_slow_ring_trc(phba,
12539 "ISR Disable ring:"
12540 "pwork:x%x hawork:x%x wait:x%x",
12541 phba->work_ha, work_ha_copy,
12542 (uint32_t)((unsigned long)
12543 &phba->work_waitq));
12546 ~(HC_R0INT_ENA << LPFC_ELS_RING);
12547 writel(control, phba->HCregaddr);
12548 readl(phba->HCregaddr); /* flush */
12551 lpfc_debugfs_slow_ring_trc(phba,
12552 "ISR slow ring: pwork:"
12553 "x%x hawork:x%x wait:x%x",
12554 phba->work_ha, work_ha_copy,
12555 (uint32_t)((unsigned long)
12556 &phba->work_waitq));
12558 spin_unlock_irqrestore(&phba->hbalock, iflag);
12561 spin_lock_irqsave(&phba->hbalock, iflag);
12562 if (work_ha_copy & HA_ERATT) {
12563 if (lpfc_sli_read_hs(phba))
12566 * Check if there is a deferred error condition
12569 if ((HS_FFER1 & phba->work_hs) &&
12570 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12571 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12573 phba->hba_flag |= DEFER_ERATT;
12574 /* Clear all interrupt enable conditions */
12575 writel(0, phba->HCregaddr);
12576 readl(phba->HCregaddr);
12580 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12581 pmb = phba->sli.mbox_active;
12582 pmbox = &pmb->u.mb;
12584 vport = pmb->vport;
12586 /* First check out the status word */
12587 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12588 if (pmbox->mbxOwner != OWN_HOST) {
12589 spin_unlock_irqrestore(&phba->hbalock, iflag);
12591 * Stray Mailbox Interrupt, mbxCommand <cmd>
12592 * mbxStatus <status>
12594 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12596 "(%d):0304 Stray Mailbox "
12597 "Interrupt mbxCommand x%x "
12599 (vport ? vport->vpi : 0),
12602 /* clear mailbox attention bit */
12603 work_ha_copy &= ~HA_MBATT;
12605 phba->sli.mbox_active = NULL;
12606 spin_unlock_irqrestore(&phba->hbalock, iflag);
12607 phba->last_completion_time = jiffies;
12608 del_timer(&phba->sli.mbox_tmo);
12609 if (pmb->mbox_cmpl) {
12610 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12612 if (pmb->out_ext_byte_len &&
12614 lpfc_sli_pcimem_bcopy(
12617 pmb->out_ext_byte_len);
12619 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12620 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12622 lpfc_debugfs_disc_trc(vport,
12623 LPFC_DISC_TRC_MBOX_VPORT,
12624 "MBOX dflt rpi: : "
12625 "status:x%x rpi:x%x",
12626 (uint32_t)pmbox->mbxStatus,
12627 pmbox->un.varWords[0], 0);
12629 if (!pmbox->mbxStatus) {
12630 mp = (struct lpfc_dmabuf *)
12632 ndlp = (struct lpfc_nodelist *)
12635 /* Reg_LOGIN of dflt RPI was
12636 * successful. new lets get
12637 * rid of the RPI using the
12638 * same mbox buffer.
12640 lpfc_unreg_login(phba,
12642 pmbox->un.varWords[0],
12645 lpfc_mbx_cmpl_dflt_rpi;
12647 pmb->ctx_ndlp = ndlp;
12648 pmb->vport = vport;
12649 rc = lpfc_sli_issue_mbox(phba,
12652 if (rc != MBX_BUSY)
12653 lpfc_printf_log(phba,
12655 LOG_MBOX | LOG_SLI,
12656 "0350 rc should have"
12657 "been MBX_BUSY\n");
12658 if (rc != MBX_NOT_FINISHED)
12659 goto send_current_mbox;
12663 &phba->pport->work_port_lock,
12665 phba->pport->work_port_events &=
12667 spin_unlock_irqrestore(
12668 &phba->pport->work_port_lock,
12670 lpfc_mbox_cmpl_put(phba, pmb);
12673 spin_unlock_irqrestore(&phba->hbalock, iflag);
12675 if ((work_ha_copy & HA_MBATT) &&
12676 (phba->sli.mbox_active == NULL)) {
12678 /* Process next mailbox command if there is one */
12680 rc = lpfc_sli_issue_mbox(phba, NULL,
12682 } while (rc == MBX_NOT_FINISHED);
12683 if (rc != MBX_SUCCESS)
12684 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12685 LOG_SLI, "0349 rc should be "
12689 spin_lock_irqsave(&phba->hbalock, iflag);
12690 phba->work_ha |= work_ha_copy;
12691 spin_unlock_irqrestore(&phba->hbalock, iflag);
12692 lpfc_worker_wake_up(phba);
12694 return IRQ_HANDLED;
12696 spin_unlock_irqrestore(&phba->hbalock, iflag);
12697 return IRQ_HANDLED;
12699 } /* lpfc_sli_sp_intr_handler */
12702 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
12703 * @irq: Interrupt number.
12704 * @dev_id: The device context pointer.
12706 * This function is directly called from the PCI layer as an interrupt
12707 * service routine when device with SLI-3 interface spec is enabled with
12708 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12709 * ring event in the HBA. However, when the device is enabled with either
12710 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12711 * device-level interrupt handler. When the PCI slot is in error recovery
12712 * or the HBA is undergoing initialization, the interrupt handler will not
12713 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12714 * the intrrupt context. This function is called without any lock held.
12715 * It gets the hbalock to access and update SLI data structures.
12717 * This function returns IRQ_HANDLED when interrupt is handled else it
12718 * returns IRQ_NONE.
12721 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12723 struct lpfc_hba *phba;
12725 unsigned long status;
12726 unsigned long iflag;
12727 struct lpfc_sli_ring *pring;
12729 /* Get the driver's phba structure from the dev_id and
12730 * assume the HBA is not interrupting.
12732 phba = (struct lpfc_hba *) dev_id;
12734 if (unlikely(!phba))
12738 * Stuff needs to be attented to when this function is invoked as an
12739 * individual interrupt handler in MSI-X multi-message interrupt mode
12741 if (phba->intr_type == MSIX) {
12742 /* Check device state for handling interrupt */
12743 if (lpfc_intr_state_check(phba))
12745 /* Need to read HA REG for FCP ring and other ring events */
12746 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12747 return IRQ_HANDLED;
12748 /* Clear up only attention source related to fast-path */
12749 spin_lock_irqsave(&phba->hbalock, iflag);
12751 * If there is deferred error attention, do not check for
12754 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12755 spin_unlock_irqrestore(&phba->hbalock, iflag);
12758 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12760 readl(phba->HAregaddr); /* flush */
12761 spin_unlock_irqrestore(&phba->hbalock, iflag);
12763 ha_copy = phba->ha_copy;
12766 * Process all events on FCP ring. Take the optimized path for FCP IO.
12768 ha_copy &= ~(phba->work_ha_mask);
12770 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12771 status >>= (4*LPFC_FCP_RING);
12772 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12773 if (status & HA_RXMASK)
12774 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12776 if (phba->cfg_multi_ring_support == 2) {
12778 * Process all events on extra ring. Take the optimized path
12779 * for extra ring IO.
12781 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12782 status >>= (4*LPFC_EXTRA_RING);
12783 if (status & HA_RXMASK) {
12784 lpfc_sli_handle_fast_ring_event(phba,
12785 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12789 return IRQ_HANDLED;
12790 } /* lpfc_sli_fp_intr_handler */
12793 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
12794 * @irq: Interrupt number.
12795 * @dev_id: The device context pointer.
12797 * This function is the HBA device-level interrupt handler to device with
12798 * SLI-3 interface spec, called from the PCI layer when either MSI or
12799 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12800 * requires driver attention. This function invokes the slow-path interrupt
12801 * attention handling function and fast-path interrupt attention handling
12802 * function in turn to process the relevant HBA attention events. This
12803 * function is called without any lock held. It gets the hbalock to access
12804 * and update SLI data structures.
12806 * This function returns IRQ_HANDLED when interrupt is handled, else it
12807 * returns IRQ_NONE.
12810 lpfc_sli_intr_handler(int irq, void *dev_id)
12812 struct lpfc_hba *phba;
12813 irqreturn_t sp_irq_rc, fp_irq_rc;
12814 unsigned long status1, status2;
12818 * Get the driver's phba structure from the dev_id and
12819 * assume the HBA is not interrupting.
12821 phba = (struct lpfc_hba *) dev_id;
12823 if (unlikely(!phba))
12826 /* Check device state for handling interrupt */
12827 if (lpfc_intr_state_check(phba))
12830 spin_lock(&phba->hbalock);
12831 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12832 spin_unlock(&phba->hbalock);
12833 return IRQ_HANDLED;
12836 if (unlikely(!phba->ha_copy)) {
12837 spin_unlock(&phba->hbalock);
12839 } else if (phba->ha_copy & HA_ERATT) {
12840 if (phba->hba_flag & HBA_ERATT_HANDLED)
12841 /* ERATT polling has handled ERATT */
12842 phba->ha_copy &= ~HA_ERATT;
12844 /* Indicate interrupt handler handles ERATT */
12845 phba->hba_flag |= HBA_ERATT_HANDLED;
12849 * If there is deferred error attention, do not check for any interrupt.
12851 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12852 spin_unlock(&phba->hbalock);
12856 /* Clear attention sources except link and error attentions */
12857 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12858 spin_unlock(&phba->hbalock);
12859 return IRQ_HANDLED;
12861 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12862 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12864 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
12865 writel(hc_copy, phba->HCregaddr);
12866 readl(phba->HAregaddr); /* flush */
12867 spin_unlock(&phba->hbalock);
12870 * Invokes slow-path host attention interrupt handling as appropriate.
12873 /* status of events with mailbox and link attention */
12874 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12876 /* status of events with ELS ring */
12877 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12878 status2 >>= (4*LPFC_ELS_RING);
12880 if (status1 || (status2 & HA_RXMASK))
12881 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
12883 sp_irq_rc = IRQ_NONE;
12886 * Invoke fast-path host attention interrupt handling as appropriate.
12889 /* status of events with FCP ring */
12890 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12891 status1 >>= (4*LPFC_FCP_RING);
12893 /* status of events with extra ring */
12894 if (phba->cfg_multi_ring_support == 2) {
12895 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12896 status2 >>= (4*LPFC_EXTRA_RING);
12900 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
12901 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
12903 fp_irq_rc = IRQ_NONE;
12905 /* Return device-level interrupt handling status */
12906 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
12907 } /* lpfc_sli_intr_handler */
12910 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12911 * @phba: pointer to lpfc hba data structure.
12913 * This routine is invoked by the worker thread to process all the pending
12914 * SLI4 els abort xri events.
12916 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12918 struct lpfc_cq_event *cq_event;
12920 /* First, declare the els xri abort event has been handled */
12921 spin_lock_irq(&phba->hbalock);
12922 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12923 spin_unlock_irq(&phba->hbalock);
12924 /* Now, handle all the els xri abort events */
12925 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12926 /* Get the first event from the head of the event queue */
12927 spin_lock_irq(&phba->hbalock);
12928 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12929 cq_event, struct lpfc_cq_event, list);
12930 spin_unlock_irq(&phba->hbalock);
12931 /* Notify aborted XRI for ELS work queue */
12932 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12933 /* Free the event processed back to the free pool */
12934 lpfc_sli4_cq_event_release(phba, cq_event);
12939 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12940 * @phba: pointer to lpfc hba data structure
12941 * @pIocbIn: pointer to the rspiocbq
12942 * @pIocbOut: pointer to the cmdiocbq
12943 * @wcqe: pointer to the complete wcqe
12945 * This routine transfers the fields of a command iocbq to a response iocbq
12946 * by copying all the IOCB fields from command iocbq and transferring the
12947 * completion status information from the complete wcqe.
12950 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12951 struct lpfc_iocbq *pIocbIn,
12952 struct lpfc_iocbq *pIocbOut,
12953 struct lpfc_wcqe_complete *wcqe)
12956 unsigned long iflags;
12957 uint32_t status, max_response;
12958 struct lpfc_dmabuf *dmabuf;
12959 struct ulp_bde64 *bpl, bde;
12960 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12962 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12963 sizeof(struct lpfc_iocbq) - offset);
12964 /* Map WCQE parameters into irspiocb parameters */
12965 status = bf_get(lpfc_wcqe_c_status, wcqe);
12966 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
12967 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12968 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12969 pIocbIn->iocb.un.fcpi.fcpi_parm =
12970 pIocbOut->iocb.un.fcpi.fcpi_parm -
12971 wcqe->total_data_placed;
12973 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12975 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12976 switch (pIocbOut->iocb.ulpCommand) {
12977 case CMD_ELS_REQUEST64_CR:
12978 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12979 bpl = (struct ulp_bde64 *)dmabuf->virt;
12980 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12981 max_response = bde.tus.f.bdeSize;
12983 case CMD_GEN_REQUEST64_CR:
12985 if (!pIocbOut->context3)
12987 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12988 sizeof(struct ulp_bde64);
12989 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12990 bpl = (struct ulp_bde64 *)dmabuf->virt;
12991 for (i = 0; i < numBdes; i++) {
12992 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12993 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12994 max_response += bde.tus.f.bdeSize;
12998 max_response = wcqe->total_data_placed;
13001 if (max_response < wcqe->total_data_placed)
13002 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
13004 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
13005 wcqe->total_data_placed;
13008 /* Convert BG errors for completion status */
13009 if (status == CQE_STATUS_DI_ERROR) {
13010 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
13012 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
13013 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
13015 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
13017 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
13018 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
13019 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13020 BGS_GUARD_ERR_MASK;
13021 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
13022 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13023 BGS_APPTAG_ERR_MASK;
13024 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
13025 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13026 BGS_REFTAG_ERR_MASK;
13028 /* Check to see if there was any good data before the error */
13029 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13030 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13031 BGS_HI_WATER_MARK_PRESENT_MASK;
13032 pIocbIn->iocb.unsli3.sli3_bg.bghm =
13033 wcqe->total_data_placed;
13037 * Set ALL the error bits to indicate we don't know what
13038 * type of error it is.
13040 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13041 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13042 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13043 BGS_GUARD_ERR_MASK);
13046 /* Pick up HBA exchange busy condition */
13047 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13048 spin_lock_irqsave(&phba->hbalock, iflags);
13049 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13050 spin_unlock_irqrestore(&phba->hbalock, iflags);
13055 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13056 * @phba: Pointer to HBA context object.
13057 * @wcqe: Pointer to work-queue completion queue entry.
13059 * This routine handles an ELS work-queue completion event and construct
13060 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13061 * discovery engine to handle.
13063 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13065 static struct lpfc_iocbq *
13066 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13067 struct lpfc_iocbq *irspiocbq)
13069 struct lpfc_sli_ring *pring;
13070 struct lpfc_iocbq *cmdiocbq;
13071 struct lpfc_wcqe_complete *wcqe;
13072 unsigned long iflags;
13074 pring = lpfc_phba_elsring(phba);
13075 if (unlikely(!pring))
13078 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13079 pring->stats.iocb_event++;
13080 /* Look up the ELS command IOCB and create pseudo response IOCB */
13081 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13082 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13083 if (unlikely(!cmdiocbq)) {
13084 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13085 "0386 ELS complete with no corresponding "
13086 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13087 wcqe->word0, wcqe->total_data_placed,
13088 wcqe->parameter, wcqe->word3);
13089 lpfc_sli_release_iocbq(phba, irspiocbq);
13093 spin_lock_irqsave(&pring->ring_lock, iflags);
13094 /* Put the iocb back on the txcmplq */
13095 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13096 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13098 /* Fake the irspiocbq and copy necessary response information */
13099 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13104 inline struct lpfc_cq_event *
13105 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13107 struct lpfc_cq_event *cq_event;
13109 /* Allocate a new internal CQ_EVENT entry */
13110 cq_event = lpfc_sli4_cq_event_alloc(phba);
13112 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13113 "0602 Failed to alloc CQ_EVENT entry\n");
13117 /* Move the CQE into the event */
13118 memcpy(&cq_event->cqe, entry, size);
13123 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
13124 * @phba: Pointer to HBA context object.
13125 * @cqe: Pointer to mailbox completion queue entry.
13127 * This routine process a mailbox completion queue entry with asynchronous
13130 * Return: true if work posted to worker thread, otherwise false.
13133 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13135 struct lpfc_cq_event *cq_event;
13136 unsigned long iflags;
13138 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13139 "0392 Async Event: word0:x%x, word1:x%x, "
13140 "word2:x%x, word3:x%x\n", mcqe->word0,
13141 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13143 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13146 spin_lock_irqsave(&phba->hbalock, iflags);
13147 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13148 /* Set the async event flag */
13149 phba->hba_flag |= ASYNC_EVENT;
13150 spin_unlock_irqrestore(&phba->hbalock, iflags);
13156 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13157 * @phba: Pointer to HBA context object.
13158 * @cqe: Pointer to mailbox completion queue entry.
13160 * This routine process a mailbox completion queue entry with mailbox
13161 * completion event.
13163 * Return: true if work posted to worker thread, otherwise false.
13166 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13168 uint32_t mcqe_status;
13169 MAILBOX_t *mbox, *pmbox;
13170 struct lpfc_mqe *mqe;
13171 struct lpfc_vport *vport;
13172 struct lpfc_nodelist *ndlp;
13173 struct lpfc_dmabuf *mp;
13174 unsigned long iflags;
13176 bool workposted = false;
13179 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13180 if (!bf_get(lpfc_trailer_completed, mcqe))
13181 goto out_no_mqe_complete;
13183 /* Get the reference to the active mbox command */
13184 spin_lock_irqsave(&phba->hbalock, iflags);
13185 pmb = phba->sli.mbox_active;
13186 if (unlikely(!pmb)) {
13187 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13188 "1832 No pending MBOX command to handle\n");
13189 spin_unlock_irqrestore(&phba->hbalock, iflags);
13190 goto out_no_mqe_complete;
13192 spin_unlock_irqrestore(&phba->hbalock, iflags);
13194 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13196 vport = pmb->vport;
13198 /* Reset heartbeat timer */
13199 phba->last_completion_time = jiffies;
13200 del_timer(&phba->sli.mbox_tmo);
13202 /* Move mbox data to caller's mailbox region, do endian swapping */
13203 if (pmb->mbox_cmpl && mbox)
13204 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13207 * For mcqe errors, conditionally move a modified error code to
13208 * the mbox so that the error will not be missed.
13210 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13211 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13212 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13213 bf_set(lpfc_mqe_status, mqe,
13214 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13216 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13217 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13218 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13219 "MBOX dflt rpi: status:x%x rpi:x%x",
13221 pmbox->un.varWords[0], 0);
13222 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13223 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13224 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13225 /* Reg_LOGIN of dflt RPI was successful. Now lets get
13226 * RID of the PPI using the same mbox buffer.
13228 lpfc_unreg_login(phba, vport->vpi,
13229 pmbox->un.varWords[0], pmb);
13230 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13232 pmb->ctx_ndlp = ndlp;
13233 pmb->vport = vport;
13234 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13235 if (rc != MBX_BUSY)
13236 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
13237 LOG_SLI, "0385 rc should "
13238 "have been MBX_BUSY\n");
13239 if (rc != MBX_NOT_FINISHED)
13240 goto send_current_mbox;
13243 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13244 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13245 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13247 /* There is mailbox completion work to do */
13248 spin_lock_irqsave(&phba->hbalock, iflags);
13249 __lpfc_mbox_cmpl_put(phba, pmb);
13250 phba->work_ha |= HA_MBATT;
13251 spin_unlock_irqrestore(&phba->hbalock, iflags);
13255 spin_lock_irqsave(&phba->hbalock, iflags);
13256 /* Release the mailbox command posting token */
13257 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13258 /* Setting active mailbox pointer need to be in sync to flag clear */
13259 phba->sli.mbox_active = NULL;
13260 if (bf_get(lpfc_trailer_consumed, mcqe))
13261 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13262 spin_unlock_irqrestore(&phba->hbalock, iflags);
13263 /* Wake up worker thread to post the next pending mailbox command */
13264 lpfc_worker_wake_up(phba);
13267 out_no_mqe_complete:
13268 spin_lock_irqsave(&phba->hbalock, iflags);
13269 if (bf_get(lpfc_trailer_consumed, mcqe))
13270 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13271 spin_unlock_irqrestore(&phba->hbalock, iflags);
13276 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13277 * @phba: Pointer to HBA context object.
13278 * @cqe: Pointer to mailbox completion queue entry.
13280 * This routine process a mailbox completion queue entry, it invokes the
13281 * proper mailbox complete handling or asynchronous event handling routine
13282 * according to the MCQE's async bit.
13284 * Return: true if work posted to worker thread, otherwise false.
13287 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13288 struct lpfc_cqe *cqe)
13290 struct lpfc_mcqe mcqe;
13295 /* Copy the mailbox MCQE and convert endian order as needed */
13296 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13298 /* Invoke the proper event handling routine */
13299 if (!bf_get(lpfc_trailer_async, &mcqe))
13300 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13302 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13307 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13308 * @phba: Pointer to HBA context object.
13309 * @cq: Pointer to associated CQ
13310 * @wcqe: Pointer to work-queue completion queue entry.
13312 * This routine handles an ELS work-queue completion event.
13314 * Return: true if work posted to worker thread, otherwise false.
13317 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13318 struct lpfc_wcqe_complete *wcqe)
13320 struct lpfc_iocbq *irspiocbq;
13321 unsigned long iflags;
13322 struct lpfc_sli_ring *pring = cq->pring;
13324 int txcmplq_cnt = 0;
13326 /* Check for response status */
13327 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13328 /* Log the error status */
13329 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13330 "0357 ELS CQE error: status=x%x: "
13331 "CQE: %08x %08x %08x %08x\n",
13332 bf_get(lpfc_wcqe_c_status, wcqe),
13333 wcqe->word0, wcqe->total_data_placed,
13334 wcqe->parameter, wcqe->word3);
13337 /* Get an irspiocbq for later ELS response processing use */
13338 irspiocbq = lpfc_sli_get_iocbq(phba);
13340 if (!list_empty(&pring->txq))
13342 if (!list_empty(&pring->txcmplq))
13344 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13345 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13346 "els_txcmplq_cnt=%d\n",
13347 txq_cnt, phba->iocb_cnt,
13352 /* Save off the slow-path queue event for work thread to process */
13353 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13354 spin_lock_irqsave(&phba->hbalock, iflags);
13355 list_add_tail(&irspiocbq->cq_event.list,
13356 &phba->sli4_hba.sp_queue_event);
13357 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13358 spin_unlock_irqrestore(&phba->hbalock, iflags);
13364 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13365 * @phba: Pointer to HBA context object.
13366 * @wcqe: Pointer to work-queue completion queue entry.
13368 * This routine handles slow-path WQ entry consumed event by invoking the
13369 * proper WQ release routine to the slow-path WQ.
13372 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13373 struct lpfc_wcqe_release *wcqe)
13375 /* sanity check on queue memory */
13376 if (unlikely(!phba->sli4_hba.els_wq))
13378 /* Check for the slow-path ELS work queue */
13379 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13380 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13381 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13383 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13384 "2579 Slow-path wqe consume event carries "
13385 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13386 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13387 phba->sli4_hba.els_wq->queue_id);
13391 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13392 * @phba: Pointer to HBA context object.
13393 * @cq: Pointer to a WQ completion queue.
13394 * @wcqe: Pointer to work-queue completion queue entry.
13396 * This routine handles an XRI abort event.
13398 * Return: true if work posted to worker thread, otherwise false.
13401 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13402 struct lpfc_queue *cq,
13403 struct sli4_wcqe_xri_aborted *wcqe)
13405 bool workposted = false;
13406 struct lpfc_cq_event *cq_event;
13407 unsigned long iflags;
13409 switch (cq->subtype) {
13411 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
13412 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13413 /* Notify aborted XRI for NVME work queue */
13414 if (phba->nvmet_support)
13415 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13417 workposted = false;
13419 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
13421 cq_event = lpfc_cq_event_setup(
13422 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13425 cq_event->hdwq = cq->hdwq;
13426 spin_lock_irqsave(&phba->hbalock, iflags);
13427 list_add_tail(&cq_event->list,
13428 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13429 /* Set the els xri abort event flag */
13430 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13431 spin_unlock_irqrestore(&phba->hbalock, iflags);
13435 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13436 "0603 Invalid CQ subtype %d: "
13437 "%08x %08x %08x %08x\n",
13438 cq->subtype, wcqe->word0, wcqe->parameter,
13439 wcqe->word2, wcqe->word3);
13440 workposted = false;
13446 #define FC_RCTL_MDS_DIAGS 0xF4
13449 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13450 * @phba: Pointer to HBA context object.
13451 * @rcqe: Pointer to receive-queue completion queue entry.
13453 * This routine process a receive-queue completion queue entry.
13455 * Return: true if work posted to worker thread, otherwise false.
13458 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13460 bool workposted = false;
13461 struct fc_frame_header *fc_hdr;
13462 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13463 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13464 struct lpfc_nvmet_tgtport *tgtp;
13465 struct hbq_dmabuf *dma_buf;
13466 uint32_t status, rq_id;
13467 unsigned long iflags;
13469 /* sanity check on queue memory */
13470 if (unlikely(!hrq) || unlikely(!drq))
13473 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13474 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13476 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13477 if (rq_id != hrq->queue_id)
13480 status = bf_get(lpfc_rcqe_status, rcqe);
13482 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13483 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13484 "2537 Receive Frame Truncated!!\n");
13486 case FC_STATUS_RQ_SUCCESS:
13487 spin_lock_irqsave(&phba->hbalock, iflags);
13488 lpfc_sli4_rq_release(hrq, drq);
13489 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13491 hrq->RQ_no_buf_found++;
13492 spin_unlock_irqrestore(&phba->hbalock, iflags);
13496 hrq->RQ_buf_posted--;
13497 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13499 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13501 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13502 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13503 spin_unlock_irqrestore(&phba->hbalock, iflags);
13504 /* Handle MDS Loopback frames */
13505 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
13509 /* save off the frame for the work thread to process */
13510 list_add_tail(&dma_buf->cq_event.list,
13511 &phba->sli4_hba.sp_queue_event);
13512 /* Frame received */
13513 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13514 spin_unlock_irqrestore(&phba->hbalock, iflags);
13517 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13518 if (phba->nvmet_support) {
13519 tgtp = phba->targetport->private;
13520 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13521 "6402 RQE Error x%x, posted %d err_cnt "
13523 status, hrq->RQ_buf_posted,
13524 hrq->RQ_no_posted_buf,
13525 atomic_read(&tgtp->rcv_fcp_cmd_in),
13526 atomic_read(&tgtp->rcv_fcp_cmd_out),
13527 atomic_read(&tgtp->xmt_fcp_release));
13531 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13532 hrq->RQ_no_posted_buf++;
13533 /* Post more buffers if possible */
13534 spin_lock_irqsave(&phba->hbalock, iflags);
13535 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13536 spin_unlock_irqrestore(&phba->hbalock, iflags);
13545 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13546 * @phba: Pointer to HBA context object.
13547 * @cq: Pointer to the completion queue.
13548 * @cqe: Pointer to a completion queue entry.
13550 * This routine process a slow-path work-queue or receive queue completion queue
13553 * Return: true if work posted to worker thread, otherwise false.
13556 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13557 struct lpfc_cqe *cqe)
13559 struct lpfc_cqe cqevt;
13560 bool workposted = false;
13562 /* Copy the work queue CQE and convert endian order if needed */
13563 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13565 /* Check and process for different type of WCQE and dispatch */
13566 switch (bf_get(lpfc_cqe_code, &cqevt)) {
13567 case CQE_CODE_COMPL_WQE:
13568 /* Process the WQ/RQ complete event */
13569 phba->last_completion_time = jiffies;
13570 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13571 (struct lpfc_wcqe_complete *)&cqevt);
13573 case CQE_CODE_RELEASE_WQE:
13574 /* Process the WQ release event */
13575 lpfc_sli4_sp_handle_rel_wcqe(phba,
13576 (struct lpfc_wcqe_release *)&cqevt);
13578 case CQE_CODE_XRI_ABORTED:
13579 /* Process the WQ XRI abort event */
13580 phba->last_completion_time = jiffies;
13581 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13582 (struct sli4_wcqe_xri_aborted *)&cqevt);
13584 case CQE_CODE_RECEIVE:
13585 case CQE_CODE_RECEIVE_V1:
13586 /* Process the RQ event */
13587 phba->last_completion_time = jiffies;
13588 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13589 (struct lpfc_rcqe *)&cqevt);
13592 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13593 "0388 Not a valid WCQE code: x%x\n",
13594 bf_get(lpfc_cqe_code, &cqevt));
13601 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13602 * @phba: Pointer to HBA context object.
13603 * @eqe: Pointer to fast-path event queue entry.
13605 * This routine process a event queue entry from the slow-path event queue.
13606 * It will check the MajorCode and MinorCode to determine this is for a
13607 * completion event on a completion queue, if not, an error shall be logged
13608 * and just return. Otherwise, it will get to the corresponding completion
13609 * queue and process all the entries on that completion queue, rearm the
13610 * completion queue, and then return.
13614 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13615 struct lpfc_queue *speq)
13617 struct lpfc_queue *cq = NULL, *childq;
13620 /* Get the reference to the corresponding CQ */
13621 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13623 list_for_each_entry(childq, &speq->child_list, list) {
13624 if (childq->queue_id == cqid) {
13629 if (unlikely(!cq)) {
13630 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13631 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13632 "0365 Slow-path CQ identifier "
13633 "(%d) does not exist\n", cqid);
13637 /* Save EQ associated with this CQ */
13638 cq->assoc_qp = speq;
13640 if (!queue_work_on(cq->chann, phba->wq, &cq->spwork))
13641 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13642 "0390 Cannot schedule soft IRQ "
13643 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13644 cqid, cq->queue_id, raw_smp_processor_id());
13648 * __lpfc_sli4_process_cq - Process elements of a CQ
13649 * @phba: Pointer to HBA context object.
13650 * @cq: Pointer to CQ to be processed
13651 * @handler: Routine to process each cqe
13652 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
13654 * This routine processes completion queue entries in a CQ. While a valid
13655 * queue element is found, the handler is called. During processing checks
13656 * are made for periodic doorbell writes to let the hardware know of
13657 * element consumption.
13659 * If the max limit on cqes to process is hit, or there are no more valid
13660 * entries, the loop stops. If we processed a sufficient number of elements,
13661 * meaning there is sufficient load, rather than rearming and generating
13662 * another interrupt, a cq rescheduling delay will be set. A delay of 0
13663 * indicates no rescheduling.
13665 * Returns True if work scheduled, False otherwise.
13668 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13669 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13670 struct lpfc_cqe *), unsigned long *delay)
13672 struct lpfc_cqe *cqe;
13673 bool workposted = false;
13674 int count = 0, consumed = 0;
13677 /* default - no reschedule */
13680 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13681 goto rearm_and_exit;
13683 /* Process all the entries to the CQ */
13685 cqe = lpfc_sli4_cq_get(cq);
13687 workposted |= handler(phba, cq, cqe);
13688 __lpfc_sli4_consume_cqe(phba, cq, cqe);
13691 if (!(++count % cq->max_proc_limit))
13694 if (!(count % cq->notify_interval)) {
13695 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13698 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
13701 if (count == LPFC_NVMET_CQ_NOTIFY)
13702 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
13704 cqe = lpfc_sli4_cq_get(cq);
13706 if (count >= phba->cfg_cq_poll_threshold) {
13711 /* Track the max number of CQEs processed in 1 EQ */
13712 if (count > cq->CQ_max_cqe)
13713 cq->CQ_max_cqe = count;
13715 cq->assoc_qp->EQ_cqe_cnt += count;
13717 /* Catch the no cq entry condition */
13718 if (unlikely(count == 0))
13719 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13720 "0369 No entry from completion queue "
13721 "qid=%d\n", cq->queue_id);
13723 cq->queue_claimed = 0;
13726 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13727 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13733 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13734 * @cq: pointer to CQ to process
13736 * This routine calls the cq processing routine with a handler specific
13737 * to the type of queue bound to it.
13739 * The CQ routine returns two values: the first is the calling status,
13740 * which indicates whether work was queued to the background discovery
13741 * thread. If true, the routine should wakeup the discovery thread;
13742 * the second is the delay parameter. If non-zero, rather than rearming
13743 * the CQ and yet another interrupt, the CQ handler should be queued so
13744 * that it is processed in a subsequent polling action. The value of
13745 * the delay indicates when to reschedule it.
13748 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13750 struct lpfc_hba *phba = cq->phba;
13751 unsigned long delay;
13752 bool workposted = false;
13754 /* Process and rearm the CQ */
13755 switch (cq->type) {
13757 workposted |= __lpfc_sli4_process_cq(phba, cq,
13758 lpfc_sli4_sp_handle_mcqe,
13762 if (cq->subtype == LPFC_IO)
13763 workposted |= __lpfc_sli4_process_cq(phba, cq,
13764 lpfc_sli4_fp_handle_cqe,
13767 workposted |= __lpfc_sli4_process_cq(phba, cq,
13768 lpfc_sli4_sp_handle_cqe,
13772 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13773 "0370 Invalid completion queue type (%d)\n",
13779 if (!queue_delayed_work_on(cq->chann, phba->wq,
13780 &cq->sched_spwork, delay))
13781 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13782 "0394 Cannot schedule soft IRQ "
13783 "for cqid=%d on CPU %d\n",
13784 cq->queue_id, cq->chann);
13787 /* wake up worker thread if there are works to be done */
13789 lpfc_worker_wake_up(phba);
13793 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
13795 * @work: pointer to work element
13797 * translates from the work handler and calls the slow-path handler.
13800 lpfc_sli4_sp_process_cq(struct work_struct *work)
13802 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
13804 __lpfc_sli4_sp_process_cq(cq);
13808 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
13809 * @work: pointer to work element
13811 * translates from the work handler and calls the slow-path handler.
13814 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
13816 struct lpfc_queue *cq = container_of(to_delayed_work(work),
13817 struct lpfc_queue, sched_spwork);
13819 __lpfc_sli4_sp_process_cq(cq);
13823 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
13824 * @phba: Pointer to HBA context object.
13825 * @cq: Pointer to associated CQ
13826 * @wcqe: Pointer to work-queue completion queue entry.
13828 * This routine process a fast-path work queue completion entry from fast-path
13829 * event queue for FCP command response completion.
13832 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13833 struct lpfc_wcqe_complete *wcqe)
13835 struct lpfc_sli_ring *pring = cq->pring;
13836 struct lpfc_iocbq *cmdiocbq;
13837 struct lpfc_iocbq irspiocbq;
13838 unsigned long iflags;
13840 /* Check for response status */
13841 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13842 /* If resource errors reported from HBA, reduce queue
13843 * depth of the SCSI device.
13845 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13846 IOSTAT_LOCAL_REJECT)) &&
13847 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13848 IOERR_NO_RESOURCES))
13849 phba->lpfc_rampdown_queue_depth(phba);
13851 /* Log the error status */
13852 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13853 "0373 FCP CQE error: status=x%x: "
13854 "CQE: %08x %08x %08x %08x\n",
13855 bf_get(lpfc_wcqe_c_status, wcqe),
13856 wcqe->word0, wcqe->total_data_placed,
13857 wcqe->parameter, wcqe->word3);
13860 /* Look up the FCP command IOCB and create pseudo response IOCB */
13861 spin_lock_irqsave(&pring->ring_lock, iflags);
13862 pring->stats.iocb_event++;
13863 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13864 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13865 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13866 if (unlikely(!cmdiocbq)) {
13867 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13868 "0374 FCP complete with no corresponding "
13869 "cmdiocb: iotag (%d)\n",
13870 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13873 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13874 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13876 if (cmdiocbq->iocb_cmpl == NULL) {
13877 if (cmdiocbq->wqe_cmpl) {
13878 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13879 spin_lock_irqsave(&phba->hbalock, iflags);
13880 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13881 spin_unlock_irqrestore(&phba->hbalock, iflags);
13884 /* Pass the cmd_iocb and the wcqe to the upper layer */
13885 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13888 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13889 "0375 FCP cmdiocb not callback function "
13891 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13895 /* Fake the irspiocb and copy necessary response information */
13896 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
13898 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13899 spin_lock_irqsave(&phba->hbalock, iflags);
13900 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13901 spin_unlock_irqrestore(&phba->hbalock, iflags);
13904 /* Pass the cmd_iocb and the rsp state to the upper layer */
13905 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13909 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13910 * @phba: Pointer to HBA context object.
13911 * @cq: Pointer to completion queue.
13912 * @wcqe: Pointer to work-queue completion queue entry.
13914 * This routine handles an fast-path WQ entry consumed event by invoking the
13915 * proper WQ release routine to the slow-path WQ.
13918 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13919 struct lpfc_wcqe_release *wcqe)
13921 struct lpfc_queue *childwq;
13922 bool wqid_matched = false;
13925 /* Check for fast-path FCP work queue release */
13926 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
13927 list_for_each_entry(childwq, &cq->child_list, list) {
13928 if (childwq->queue_id == hba_wqid) {
13929 lpfc_sli4_wq_release(childwq,
13930 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13931 if (childwq->q_flag & HBA_NVMET_WQFULL)
13932 lpfc_nvmet_wqfull_process(phba, childwq);
13933 wqid_matched = true;
13937 /* Report warning log message if no match found */
13938 if (wqid_matched != true)
13939 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13940 "2580 Fast-path wqe consume event carries "
13941 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
13945 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13946 * @phba: Pointer to HBA context object.
13947 * @rcqe: Pointer to receive-queue completion queue entry.
13949 * This routine process a receive-queue completion queue entry.
13951 * Return: true if work posted to worker thread, otherwise false.
13954 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13955 struct lpfc_rcqe *rcqe)
13957 bool workposted = false;
13958 struct lpfc_queue *hrq;
13959 struct lpfc_queue *drq;
13960 struct rqb_dmabuf *dma_buf;
13961 struct fc_frame_header *fc_hdr;
13962 struct lpfc_nvmet_tgtport *tgtp;
13963 uint32_t status, rq_id;
13964 unsigned long iflags;
13965 uint32_t fctl, idx;
13967 if ((phba->nvmet_support == 0) ||
13968 (phba->sli4_hba.nvmet_cqset == NULL))
13971 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13972 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13973 drq = phba->sli4_hba.nvmet_mrq_data[idx];
13975 /* sanity check on queue memory */
13976 if (unlikely(!hrq) || unlikely(!drq))
13979 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13980 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13982 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13984 if ((phba->nvmet_support == 0) ||
13985 (rq_id != hrq->queue_id))
13988 status = bf_get(lpfc_rcqe_status, rcqe);
13990 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13991 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13992 "6126 Receive Frame Truncated!!\n");
13994 case FC_STATUS_RQ_SUCCESS:
13995 spin_lock_irqsave(&phba->hbalock, iflags);
13996 lpfc_sli4_rq_release(hrq, drq);
13997 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13999 hrq->RQ_no_buf_found++;
14000 spin_unlock_irqrestore(&phba->hbalock, iflags);
14003 spin_unlock_irqrestore(&phba->hbalock, iflags);
14005 hrq->RQ_buf_posted--;
14006 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14008 /* Just some basic sanity checks on FCP Command frame */
14009 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
14010 fc_hdr->fh_f_ctl[1] << 8 |
14011 fc_hdr->fh_f_ctl[2]);
14013 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14014 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14015 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
14018 if (fc_hdr->fh_type == FC_TYPE_FCP) {
14019 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
14020 lpfc_nvmet_unsol_fcp_event(
14021 phba, idx, dma_buf, cq->isr_timestamp,
14022 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
14026 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
14028 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14029 if (phba->nvmet_support) {
14030 tgtp = phba->targetport->private;
14031 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
14032 "6401 RQE Error x%x, posted %d err_cnt "
14034 status, hrq->RQ_buf_posted,
14035 hrq->RQ_no_posted_buf,
14036 atomic_read(&tgtp->rcv_fcp_cmd_in),
14037 atomic_read(&tgtp->rcv_fcp_cmd_out),
14038 atomic_read(&tgtp->xmt_fcp_release));
14042 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14043 hrq->RQ_no_posted_buf++;
14044 /* Post more buffers if possible */
14052 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
14053 * @phba: adapter with cq
14054 * @cq: Pointer to the completion queue.
14055 * @eqe: Pointer to fast-path completion queue entry.
14057 * This routine process a fast-path work queue completion entry from fast-path
14058 * event queue for FCP command response completion.
14060 * Return: true if work posted to worker thread, otherwise false.
14063 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14064 struct lpfc_cqe *cqe)
14066 struct lpfc_wcqe_release wcqe;
14067 bool workposted = false;
14069 /* Copy the work queue CQE and convert endian order if needed */
14070 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14072 /* Check and process for different type of WCQE and dispatch */
14073 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14074 case CQE_CODE_COMPL_WQE:
14075 case CQE_CODE_NVME_ERSP:
14077 /* Process the WQ complete event */
14078 phba->last_completion_time = jiffies;
14079 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
14080 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14081 (struct lpfc_wcqe_complete *)&wcqe);
14083 case CQE_CODE_RELEASE_WQE:
14084 cq->CQ_release_wqe++;
14085 /* Process the WQ release event */
14086 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14087 (struct lpfc_wcqe_release *)&wcqe);
14089 case CQE_CODE_XRI_ABORTED:
14090 cq->CQ_xri_aborted++;
14091 /* Process the WQ XRI abort event */
14092 phba->last_completion_time = jiffies;
14093 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14094 (struct sli4_wcqe_xri_aborted *)&wcqe);
14096 case CQE_CODE_RECEIVE_V1:
14097 case CQE_CODE_RECEIVE:
14098 phba->last_completion_time = jiffies;
14099 if (cq->subtype == LPFC_NVMET) {
14100 workposted = lpfc_sli4_nvmet_handle_rcqe(
14101 phba, cq, (struct lpfc_rcqe *)&wcqe);
14105 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14106 "0144 Not a valid CQE code: x%x\n",
14107 bf_get(lpfc_wcqe_c_code, &wcqe));
14114 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
14115 * @phba: Pointer to HBA context object.
14116 * @eqe: Pointer to fast-path event queue entry.
14118 * This routine process a event queue entry from the fast-path event queue.
14119 * It will check the MajorCode and MinorCode to determine this is for a
14120 * completion event on a completion queue, if not, an error shall be logged
14121 * and just return. Otherwise, it will get to the corresponding completion
14122 * queue and process all the entries on the completion queue, rearm the
14123 * completion queue, and then return.
14126 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14127 struct lpfc_eqe *eqe)
14129 struct lpfc_queue *cq = NULL;
14130 uint32_t qidx = eq->hdwq;
14133 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14134 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14135 "0366 Not a valid completion "
14136 "event: majorcode=x%x, minorcode=x%x\n",
14137 bf_get_le32(lpfc_eqe_major_code, eqe),
14138 bf_get_le32(lpfc_eqe_minor_code, eqe));
14142 /* Get the reference to the corresponding CQ */
14143 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14145 /* Use the fast lookup method first */
14146 if (cqid <= phba->sli4_hba.cq_max) {
14147 cq = phba->sli4_hba.cq_lookup[cqid];
14152 /* Next check for NVMET completion */
14153 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14154 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14155 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14156 /* Process NVMET unsol rcv */
14157 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14162 if (phba->sli4_hba.nvmels_cq &&
14163 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14164 /* Process NVME unsol rcv */
14165 cq = phba->sli4_hba.nvmels_cq;
14168 /* Otherwise this is a Slow path event */
14170 lpfc_sli4_sp_handle_eqe(phba, eqe,
14171 phba->sli4_hba.hdwq[qidx].hba_eq);
14176 if (unlikely(cqid != cq->queue_id)) {
14177 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14178 "0368 Miss-matched fast-path completion "
14179 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14180 cqid, cq->queue_id);
14185 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14186 if (phba->ktime_on)
14187 cq->isr_timestamp = ktime_get_ns();
14189 cq->isr_timestamp = 0;
14191 if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
14192 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14193 "0363 Cannot schedule soft IRQ "
14194 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14195 cqid, cq->queue_id, raw_smp_processor_id());
14199 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14200 * @cq: Pointer to CQ to be processed
14202 * This routine calls the cq processing routine with the handler for
14205 * The CQ routine returns two values: the first is the calling status,
14206 * which indicates whether work was queued to the background discovery
14207 * thread. If true, the routine should wakeup the discovery thread;
14208 * the second is the delay parameter. If non-zero, rather than rearming
14209 * the CQ and yet another interrupt, the CQ handler should be queued so
14210 * that it is processed in a subsequent polling action. The value of
14211 * the delay indicates when to reschedule it.
14214 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
14216 struct lpfc_hba *phba = cq->phba;
14217 unsigned long delay;
14218 bool workposted = false;
14220 /* process and rearm the CQ */
14221 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14225 if (!queue_delayed_work_on(cq->chann, phba->wq,
14226 &cq->sched_irqwork, delay))
14227 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14228 "0367 Cannot schedule soft IRQ "
14229 "for cqid=%d on CPU %d\n",
14230 cq->queue_id, cq->chann);
14233 /* wake up worker thread if there are works to be done */
14235 lpfc_worker_wake_up(phba);
14239 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14241 * @work: pointer to work element
14243 * translates from the work handler and calls the fast-path handler.
14246 lpfc_sli4_hba_process_cq(struct work_struct *work)
14248 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
14250 __lpfc_sli4_hba_process_cq(cq);
14254 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
14255 * @work: pointer to work element
14257 * translates from the work handler and calls the fast-path handler.
14260 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
14262 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14263 struct lpfc_queue, sched_irqwork);
14265 __lpfc_sli4_hba_process_cq(cq);
14269 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
14270 * @irq: Interrupt number.
14271 * @dev_id: The device context pointer.
14273 * This function is directly called from the PCI layer as an interrupt
14274 * service routine when device with SLI-4 interface spec is enabled with
14275 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14276 * ring event in the HBA. However, when the device is enabled with either
14277 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14278 * device-level interrupt handler. When the PCI slot is in error recovery
14279 * or the HBA is undergoing initialization, the interrupt handler will not
14280 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14281 * the intrrupt context. This function is called without any lock held.
14282 * It gets the hbalock to access and update SLI data structures. Note that,
14283 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14284 * equal to that of FCP CQ index.
14286 * The link attention and ELS ring attention events are handled
14287 * by the worker thread. The interrupt handler signals the worker thread
14288 * and returns for these events. This function is called without any lock
14289 * held. It gets the hbalock to access and update SLI data structures.
14291 * This function returns IRQ_HANDLED when interrupt is handled else it
14292 * returns IRQ_NONE.
14295 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14297 struct lpfc_hba *phba;
14298 struct lpfc_hba_eq_hdl *hba_eq_hdl;
14299 struct lpfc_queue *fpeq;
14300 unsigned long iflag;
14303 struct lpfc_eq_intr_info *eqi;
14306 /* Get the driver's phba structure from the dev_id */
14307 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14308 phba = hba_eq_hdl->phba;
14309 hba_eqidx = hba_eq_hdl->idx;
14311 if (unlikely(!phba))
14313 if (unlikely(!phba->sli4_hba.hdwq))
14316 /* Get to the EQ struct associated with this vector */
14317 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
14318 if (unlikely(!fpeq))
14321 /* Check device state for handling interrupt */
14322 if (unlikely(lpfc_intr_state_check(phba))) {
14323 /* Check again for link_state with lock held */
14324 spin_lock_irqsave(&phba->hbalock, iflag);
14325 if (phba->link_state < LPFC_LINK_DOWN)
14326 /* Flush, clear interrupt, and rearm the EQ */
14327 lpfc_sli4_eqcq_flush(phba, fpeq);
14328 spin_unlock_irqrestore(&phba->hbalock, iflag);
14332 eqi = phba->sli4_hba.eq_info;
14333 icnt = this_cpu_inc_return(eqi->icnt);
14334 fpeq->last_cpu = raw_smp_processor_id();
14336 if (icnt > LPFC_EQD_ISR_TRIGGER &&
14337 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
14338 phba->cfg_auto_imax &&
14339 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14340 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14341 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14343 /* process and rearm the EQ */
14344 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
14346 if (unlikely(ecount == 0)) {
14347 fpeq->EQ_no_entry++;
14348 if (phba->intr_type == MSIX)
14349 /* MSI-X treated interrupt served as no EQ share INT */
14350 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14351 "0358 MSI-X interrupt with no EQE\n");
14353 /* Non MSI-X treated on interrupt as EQ share INT */
14357 return IRQ_HANDLED;
14358 } /* lpfc_sli4_fp_intr_handler */
14361 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14362 * @irq: Interrupt number.
14363 * @dev_id: The device context pointer.
14365 * This function is the device-level interrupt handler to device with SLI-4
14366 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14367 * interrupt mode is enabled and there is an event in the HBA which requires
14368 * driver attention. This function invokes the slow-path interrupt attention
14369 * handling function and fast-path interrupt attention handling function in
14370 * turn to process the relevant HBA attention events. This function is called
14371 * without any lock held. It gets the hbalock to access and update SLI data
14374 * This function returns IRQ_HANDLED when interrupt is handled, else it
14375 * returns IRQ_NONE.
14378 lpfc_sli4_intr_handler(int irq, void *dev_id)
14380 struct lpfc_hba *phba;
14381 irqreturn_t hba_irq_rc;
14382 bool hba_handled = false;
14385 /* Get the driver's phba structure from the dev_id */
14386 phba = (struct lpfc_hba *)dev_id;
14388 if (unlikely(!phba))
14392 * Invoke fast-path host attention interrupt handling as appropriate.
14394 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
14395 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14396 &phba->sli4_hba.hba_eq_hdl[qidx]);
14397 if (hba_irq_rc == IRQ_HANDLED)
14398 hba_handled |= true;
14401 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14402 } /* lpfc_sli4_intr_handler */
14404 void lpfc_sli4_poll_hbtimer(struct timer_list *t)
14406 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
14407 struct lpfc_queue *eq;
14412 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
14413 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
14414 if (!list_empty(&phba->poll_list))
14415 mod_timer(&phba->cpuhp_poll_timer,
14416 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14421 inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
14423 struct lpfc_hba *phba = eq->phba;
14427 * Unlocking an irq is one of the entry point to check
14428 * for re-schedule, but we are good for io submission
14429 * path as midlayer does a get_cpu to glue us in. Flush
14430 * out the invalidate queue so we can see the updated
14435 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
14436 /* We will not likely get the completion for the caller
14437 * during this iteration but i guess that's fine.
14438 * Future io's coming on this eq should be able to
14439 * pick it up. As for the case of single io's, they
14440 * will be handled through a sched from polling timer
14441 * function which is currently triggered every 1msec.
14443 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
14448 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
14450 struct lpfc_hba *phba = eq->phba;
14452 /* kickstart slowpath processing if needed */
14453 if (list_empty(&phba->poll_list))
14454 mod_timer(&phba->cpuhp_poll_timer,
14455 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14457 list_add_rcu(&eq->_poll_list, &phba->poll_list);
14461 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
14463 struct lpfc_hba *phba = eq->phba;
14465 /* Disable slowpath processing for this eq. Kick start the eq
14466 * by RE-ARMING the eq's ASAP
14468 list_del_rcu(&eq->_poll_list);
14471 if (list_empty(&phba->poll_list))
14472 del_timer_sync(&phba->cpuhp_poll_timer);
14475 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
14477 struct lpfc_queue *eq, *next;
14479 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
14480 list_del(&eq->_poll_list);
14482 INIT_LIST_HEAD(&phba->poll_list);
14487 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
14489 if (mode == eq->mode)
14492 * currently this function is only called during a hotplug
14493 * event and the cpu on which this function is executing
14494 * is going offline. By now the hotplug has instructed
14495 * the scheduler to remove this cpu from cpu active mask.
14496 * So we don't need to work about being put aside by the
14497 * scheduler for a high priority process. Yes, the inte-
14498 * rrupts could come but they are known to retire ASAP.
14501 /* Disable polling in the fastpath */
14502 WRITE_ONCE(eq->mode, mode);
14503 /* flush out the store buffer */
14507 * Add this eq to the polling list and start polling. For
14508 * a grace period both interrupt handler and poller will
14509 * try to process the eq _but_ that's fine. We have a
14510 * synchronization mechanism in place (queue_claimed) to
14511 * deal with it. This is just a draining phase for int-
14512 * errupt handler (not eq's) as we have guranteed through
14513 * barrier that all the CPUs have seen the new CQ_POLLED
14514 * state. which will effectively disable the REARMING of
14515 * the EQ. The whole idea is eq's die off eventually as
14516 * we are not rearming EQ's anymore.
14518 mode ? lpfc_sli4_add_to_poll_list(eq) :
14519 lpfc_sli4_remove_from_poll_list(eq);
14522 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
14524 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
14527 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
14529 struct lpfc_hba *phba = eq->phba;
14531 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
14533 /* Kick start for the pending io's in h/w.
14534 * Once we switch back to interrupt processing on a eq
14535 * the io path completion will only arm eq's when it
14536 * receives a completion. But since eq's are in disa-
14537 * rmed state it doesn't receive a completion. This
14538 * creates a deadlock scenaro.
14540 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
14544 * lpfc_sli4_queue_free - free a queue structure and associated memory
14545 * @queue: The queue structure to free.
14547 * This function frees a queue structure and the DMAable memory used for
14548 * the host resident queue. This function must be called after destroying the
14549 * queue on the HBA.
14552 lpfc_sli4_queue_free(struct lpfc_queue *queue)
14554 struct lpfc_dmabuf *dmabuf;
14559 if (!list_empty(&queue->wq_list))
14560 list_del(&queue->wq_list);
14562 while (!list_empty(&queue->page_list)) {
14563 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14565 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14566 dmabuf->virt, dmabuf->phys);
14570 lpfc_free_rq_buffer(queue->phba, queue);
14571 kfree(queue->rqbp);
14574 if (!list_empty(&queue->cpu_list))
14575 list_del(&queue->cpu_list);
14582 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14583 * @phba: The HBA that this queue is being created on.
14584 * @page_size: The size of a queue page
14585 * @entry_size: The size of each queue entry for this queue.
14586 * @entry count: The number of entries that this queue will handle.
14587 * @cpu: The cpu that will primarily utilize this queue.
14589 * This function allocates a queue structure and the DMAable memory used for
14590 * the host resident queue. This function must be called before creating the
14591 * queue on the HBA.
14593 struct lpfc_queue *
14594 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14595 uint32_t entry_size, uint32_t entry_count, int cpu)
14597 struct lpfc_queue *queue;
14598 struct lpfc_dmabuf *dmabuf;
14599 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14602 if (!phba->sli4_hba.pc_sli4_params.supported)
14603 hw_page_size = page_size;
14605 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
14607 /* If needed, Adjust page count to match the max the adapter supports */
14608 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
14609 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
14611 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
14612 GFP_KERNEL, cpu_to_node(cpu));
14616 INIT_LIST_HEAD(&queue->list);
14617 INIT_LIST_HEAD(&queue->_poll_list);
14618 INIT_LIST_HEAD(&queue->wq_list);
14619 INIT_LIST_HEAD(&queue->wqfull_list);
14620 INIT_LIST_HEAD(&queue->page_list);
14621 INIT_LIST_HEAD(&queue->child_list);
14622 INIT_LIST_HEAD(&queue->cpu_list);
14624 /* Set queue parameters now. If the system cannot provide memory
14625 * resources, the free routine needs to know what was allocated.
14627 queue->page_count = pgcnt;
14628 queue->q_pgs = (void **)&queue[1];
14629 queue->entry_cnt_per_pg = hw_page_size / entry_size;
14630 queue->entry_size = entry_size;
14631 queue->entry_count = entry_count;
14632 queue->page_size = hw_page_size;
14633 queue->phba = phba;
14635 for (x = 0; x < queue->page_count; x++) {
14636 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
14637 dev_to_node(&phba->pcidev->dev));
14640 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14641 hw_page_size, &dmabuf->phys,
14643 if (!dmabuf->virt) {
14647 dmabuf->buffer_tag = x;
14648 list_add_tail(&dmabuf->list, &queue->page_list);
14649 /* use lpfc_sli4_qe to index a paritcular entry in this page */
14650 queue->q_pgs[x] = dmabuf->virt;
14652 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14653 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14654 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14655 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
14657 /* notify_interval will be set during q creation */
14661 lpfc_sli4_queue_free(queue);
14666 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14667 * @phba: HBA structure that indicates port to create a queue on.
14668 * @pci_barset: PCI BAR set flag.
14670 * This function shall perform iomap of the specified PCI BAR address to host
14671 * memory address if not already done so and return it. The returned host
14672 * memory address can be NULL.
14674 static void __iomem *
14675 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14680 switch (pci_barset) {
14681 case WQ_PCI_BAR_0_AND_1:
14682 return phba->pci_bar0_memmap_p;
14683 case WQ_PCI_BAR_2_AND_3:
14684 return phba->pci_bar2_memmap_p;
14685 case WQ_PCI_BAR_4_AND_5:
14686 return phba->pci_bar4_memmap_p;
14694 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
14695 * @phba: HBA structure that EQs are on.
14696 * @startq: The starting EQ index to modify
14697 * @numq: The number of EQs (consecutive indexes) to modify
14698 * @usdelay: amount of delay
14700 * This function revises the EQ delay on 1 or more EQs. The EQ delay
14701 * is set either by writing to a register (if supported by the SLI Port)
14702 * or by mailbox command. The mailbox command allows several EQs to be
14705 * The @phba struct is used to send a mailbox command to HBA. The @startq
14706 * is used to get the starting EQ index to change. The @numq value is
14707 * used to specify how many consecutive EQ indexes, starting at EQ index,
14708 * are to be changed. This function is asynchronous and will wait for any
14709 * mailbox commands to finish before returning.
14711 * On success this function will return a zero. If unable to allocate
14712 * enough memory this function will return -ENOMEM. If a mailbox command
14713 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
14714 * have had their delay multipler changed.
14717 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14718 uint32_t numq, uint32_t usdelay)
14720 struct lpfc_mbx_modify_eq_delay *eq_delay;
14721 LPFC_MBOXQ_t *mbox;
14722 struct lpfc_queue *eq;
14723 int cnt = 0, rc, length;
14724 uint32_t shdr_status, shdr_add_status;
14727 union lpfc_sli4_cfg_shdr *shdr;
14729 if (startq >= phba->cfg_irq_chann)
14732 if (usdelay > 0xFFFF) {
14733 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
14734 "6429 usdelay %d too large. Scaled down to "
14735 "0xFFFF.\n", usdelay);
14739 /* set values by EQ_DELAY register if supported */
14740 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14741 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14742 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14746 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
14754 /* Otherwise, set values by mailbox cmd */
14756 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME,
14759 "6428 Failed allocating mailbox cmd buffer."
14760 " EQ delay was not set.\n");
14763 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14764 sizeof(struct lpfc_sli4_cfg_mhdr));
14765 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14766 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14767 length, LPFC_SLI4_MBX_EMBED);
14768 eq_delay = &mbox->u.mqe.un.eq_delay;
14770 /* Calculate delay multiper from maximum interrupt per second */
14771 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
14774 if (dmult > LPFC_DMULT_MAX)
14775 dmult = LPFC_DMULT_MAX;
14777 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14778 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14781 eq->q_mode = usdelay;
14782 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14783 eq_delay->u.request.eq[cnt].phase = 0;
14784 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14789 eq_delay->u.request.num_eq = cnt;
14791 mbox->vport = phba->pport;
14792 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14793 mbox->ctx_buf = NULL;
14794 mbox->ctx_ndlp = NULL;
14795 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14796 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14797 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14798 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14799 if (shdr_status || shdr_add_status || rc) {
14800 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14801 "2512 MODIFY_EQ_DELAY mailbox failed with "
14802 "status x%x add_status x%x, mbx status x%x\n",
14803 shdr_status, shdr_add_status, rc);
14805 mempool_free(mbox, phba->mbox_mem_pool);
14810 * lpfc_eq_create - Create an Event Queue on the HBA
14811 * @phba: HBA structure that indicates port to create a queue on.
14812 * @eq: The queue structure to use to create the event queue.
14813 * @imax: The maximum interrupt per second limit.
14815 * This function creates an event queue, as detailed in @eq, on a port,
14816 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14818 * The @phba struct is used to send mailbox command to HBA. The @eq struct
14819 * is used to get the entry count and entry size that are necessary to
14820 * determine the number of pages to allocate and use for this queue. This
14821 * function will send the EQ_CREATE mailbox command to the HBA to setup the
14822 * event queue. This function is asynchronous and will wait for the mailbox
14823 * command to finish before continuing.
14825 * On success this function will return a zero. If unable to allocate enough
14826 * memory this function will return -ENOMEM. If the queue create mailbox command
14827 * fails this function will return -ENXIO.
14830 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14832 struct lpfc_mbx_eq_create *eq_create;
14833 LPFC_MBOXQ_t *mbox;
14834 int rc, length, status = 0;
14835 struct lpfc_dmabuf *dmabuf;
14836 uint32_t shdr_status, shdr_add_status;
14837 union lpfc_sli4_cfg_shdr *shdr;
14839 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14841 /* sanity check on queue memory */
14844 if (!phba->sli4_hba.pc_sli4_params.supported)
14845 hw_page_size = SLI4_PAGE_SIZE;
14847 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14850 length = (sizeof(struct lpfc_mbx_eq_create) -
14851 sizeof(struct lpfc_sli4_cfg_mhdr));
14852 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14853 LPFC_MBOX_OPCODE_EQ_CREATE,
14854 length, LPFC_SLI4_MBX_EMBED);
14855 eq_create = &mbox->u.mqe.un.eq_create;
14856 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14857 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14859 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14861 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
14863 /* Use version 2 of CREATE_EQ if eqav is set */
14864 if (phba->sli4_hba.pc_sli4_params.eqav) {
14865 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14866 LPFC_Q_CREATE_VERSION_2);
14867 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14868 phba->sli4_hba.pc_sli4_params.eqav);
14871 /* don't setup delay multiplier using EQ_CREATE */
14873 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14875 switch (eq->entry_count) {
14877 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14878 "0360 Unsupported EQ count. (%d)\n",
14880 if (eq->entry_count < 256) {
14884 /* fall through - otherwise default to smallest count */
14886 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14890 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14894 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14898 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14902 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14906 list_for_each_entry(dmabuf, &eq->page_list, list) {
14907 memset(dmabuf->virt, 0, hw_page_size);
14908 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14909 putPaddrLow(dmabuf->phys);
14910 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14911 putPaddrHigh(dmabuf->phys);
14913 mbox->vport = phba->pport;
14914 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14915 mbox->ctx_buf = NULL;
14916 mbox->ctx_ndlp = NULL;
14917 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14918 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14919 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14920 if (shdr_status || shdr_add_status || rc) {
14921 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14922 "2500 EQ_CREATE mailbox failed with "
14923 "status x%x add_status x%x, mbx status x%x\n",
14924 shdr_status, shdr_add_status, rc);
14927 eq->type = LPFC_EQ;
14928 eq->subtype = LPFC_NONE;
14929 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14930 if (eq->queue_id == 0xFFFF)
14932 eq->host_index = 0;
14933 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
14934 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
14936 mempool_free(mbox, phba->mbox_mem_pool);
14941 * lpfc_cq_create - Create a Completion Queue on the HBA
14942 * @phba: HBA structure that indicates port to create a queue on.
14943 * @cq: The queue structure to use to create the completion queue.
14944 * @eq: The event queue to bind this completion queue to.
14946 * This function creates a completion queue, as detailed in @wq, on a port,
14947 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14949 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14950 * is used to get the entry count and entry size that are necessary to
14951 * determine the number of pages to allocate and use for this queue. The @eq
14952 * is used to indicate which event queue to bind this completion queue to. This
14953 * function will send the CQ_CREATE mailbox command to the HBA to setup the
14954 * completion queue. This function is asynchronous and will wait for the mailbox
14955 * command to finish before continuing.
14957 * On success this function will return a zero. If unable to allocate enough
14958 * memory this function will return -ENOMEM. If the queue create mailbox command
14959 * fails this function will return -ENXIO.
14962 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14963 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14965 struct lpfc_mbx_cq_create *cq_create;
14966 struct lpfc_dmabuf *dmabuf;
14967 LPFC_MBOXQ_t *mbox;
14968 int rc, length, status = 0;
14969 uint32_t shdr_status, shdr_add_status;
14970 union lpfc_sli4_cfg_shdr *shdr;
14972 /* sanity check on queue memory */
14976 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14979 length = (sizeof(struct lpfc_mbx_cq_create) -
14980 sizeof(struct lpfc_sli4_cfg_mhdr));
14981 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14982 LPFC_MBOX_OPCODE_CQ_CREATE,
14983 length, LPFC_SLI4_MBX_EMBED);
14984 cq_create = &mbox->u.mqe.un.cq_create;
14985 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
14986 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14988 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14989 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
14990 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14991 phba->sli4_hba.pc_sli4_params.cqv);
14992 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
14993 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14994 (cq->page_size / SLI4_PAGE_SIZE));
14995 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14997 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14998 phba->sli4_hba.pc_sli4_params.cqav);
15000 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
15003 switch (cq->entry_count) {
15006 if (phba->sli4_hba.pc_sli4_params.cqv ==
15007 LPFC_Q_CREATE_VERSION_2) {
15008 cq_create->u.request.context.lpfc_cq_context_count =
15010 bf_set(lpfc_cq_context_count,
15011 &cq_create->u.request.context,
15012 LPFC_CQ_CNT_WORD7);
15017 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15018 "0361 Unsupported CQ count: "
15019 "entry cnt %d sz %d pg cnt %d\n",
15020 cq->entry_count, cq->entry_size,
15022 if (cq->entry_count < 256) {
15026 /* fall through - otherwise default to smallest count */
15028 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15032 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15036 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15040 list_for_each_entry(dmabuf, &cq->page_list, list) {
15041 memset(dmabuf->virt, 0, cq->page_size);
15042 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15043 putPaddrLow(dmabuf->phys);
15044 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15045 putPaddrHigh(dmabuf->phys);
15047 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15049 /* The IOCTL status is embedded in the mailbox subheader. */
15050 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15051 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15052 if (shdr_status || shdr_add_status || rc) {
15053 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15054 "2501 CQ_CREATE mailbox failed with "
15055 "status x%x add_status x%x, mbx status x%x\n",
15056 shdr_status, shdr_add_status, rc);
15060 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15061 if (cq->queue_id == 0xFFFF) {
15065 /* link the cq onto the parent eq child list */
15066 list_add_tail(&cq->list, &eq->child_list);
15067 /* Set up completion queue's type and subtype */
15069 cq->subtype = subtype;
15070 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15071 cq->assoc_qid = eq->queue_id;
15073 cq->host_index = 0;
15074 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15075 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
15077 if (cq->queue_id > phba->sli4_hba.cq_max)
15078 phba->sli4_hba.cq_max = cq->queue_id;
15080 mempool_free(mbox, phba->mbox_mem_pool);
15085 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
15086 * @phba: HBA structure that indicates port to create a queue on.
15087 * @cqp: The queue structure array to use to create the completion queues.
15088 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
15090 * This function creates a set of completion queue, s to support MRQ
15091 * as detailed in @cqp, on a port,
15092 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
15094 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15095 * is used to get the entry count and entry size that are necessary to
15096 * determine the number of pages to allocate and use for this queue. The @eq
15097 * is used to indicate which event queue to bind this completion queue to. This
15098 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
15099 * completion queue. This function is asynchronous and will wait for the mailbox
15100 * command to finish before continuing.
15102 * On success this function will return a zero. If unable to allocate enough
15103 * memory this function will return -ENOMEM. If the queue create mailbox command
15104 * fails this function will return -ENXIO.
15107 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15108 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
15111 struct lpfc_queue *cq;
15112 struct lpfc_queue *eq;
15113 struct lpfc_mbx_cq_create_set *cq_set;
15114 struct lpfc_dmabuf *dmabuf;
15115 LPFC_MBOXQ_t *mbox;
15116 int rc, length, alloclen, status = 0;
15117 int cnt, idx, numcq, page_idx = 0;
15118 uint32_t shdr_status, shdr_add_status;
15119 union lpfc_sli4_cfg_shdr *shdr;
15120 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15122 /* sanity check on queue memory */
15123 numcq = phba->cfg_nvmet_mrq;
15124 if (!cqp || !hdwq || !numcq)
15127 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15131 length = sizeof(struct lpfc_mbx_cq_create_set);
15132 length += ((numcq * cqp[0]->page_count) *
15133 sizeof(struct dma_address));
15134 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15135 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15136 LPFC_SLI4_MBX_NEMBED);
15137 if (alloclen < length) {
15138 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15139 "3098 Allocated DMA memory size (%d) is "
15140 "less than the requested DMA memory size "
15141 "(%d)\n", alloclen, length);
15145 cq_set = mbox->sge_array->addr[0];
15146 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15147 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15149 for (idx = 0; idx < numcq; idx++) {
15151 eq = hdwq[idx].hba_eq;
15156 if (!phba->sli4_hba.pc_sli4_params.supported)
15157 hw_page_size = cq->page_size;
15161 bf_set(lpfc_mbx_cq_create_set_page_size,
15162 &cq_set->u.request,
15163 (hw_page_size / SLI4_PAGE_SIZE));
15164 bf_set(lpfc_mbx_cq_create_set_num_pages,
15165 &cq_set->u.request, cq->page_count);
15166 bf_set(lpfc_mbx_cq_create_set_evt,
15167 &cq_set->u.request, 1);
15168 bf_set(lpfc_mbx_cq_create_set_valid,
15169 &cq_set->u.request, 1);
15170 bf_set(lpfc_mbx_cq_create_set_cqe_size,
15171 &cq_set->u.request, 0);
15172 bf_set(lpfc_mbx_cq_create_set_num_cq,
15173 &cq_set->u.request, numcq);
15174 bf_set(lpfc_mbx_cq_create_set_autovalid,
15175 &cq_set->u.request,
15176 phba->sli4_hba.pc_sli4_params.cqav);
15177 switch (cq->entry_count) {
15180 if (phba->sli4_hba.pc_sli4_params.cqv ==
15181 LPFC_Q_CREATE_VERSION_2) {
15182 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15183 &cq_set->u.request,
15185 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15186 &cq_set->u.request,
15187 LPFC_CQ_CNT_WORD7);
15192 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15193 "3118 Bad CQ count. (%d)\n",
15195 if (cq->entry_count < 256) {
15199 /* fall through - otherwise default to smallest */
15201 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15202 &cq_set->u.request, LPFC_CQ_CNT_256);
15205 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15206 &cq_set->u.request, LPFC_CQ_CNT_512);
15209 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15210 &cq_set->u.request, LPFC_CQ_CNT_1024);
15213 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15214 &cq_set->u.request, eq->queue_id);
15217 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15218 &cq_set->u.request, eq->queue_id);
15221 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15222 &cq_set->u.request, eq->queue_id);
15225 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15226 &cq_set->u.request, eq->queue_id);
15229 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15230 &cq_set->u.request, eq->queue_id);
15233 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15234 &cq_set->u.request, eq->queue_id);
15237 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15238 &cq_set->u.request, eq->queue_id);
15241 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15242 &cq_set->u.request, eq->queue_id);
15245 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15246 &cq_set->u.request, eq->queue_id);
15249 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15250 &cq_set->u.request, eq->queue_id);
15253 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15254 &cq_set->u.request, eq->queue_id);
15257 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15258 &cq_set->u.request, eq->queue_id);
15261 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15262 &cq_set->u.request, eq->queue_id);
15265 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15266 &cq_set->u.request, eq->queue_id);
15269 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15270 &cq_set->u.request, eq->queue_id);
15273 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15274 &cq_set->u.request, eq->queue_id);
15278 /* link the cq onto the parent eq child list */
15279 list_add_tail(&cq->list, &eq->child_list);
15280 /* Set up completion queue's type and subtype */
15282 cq->subtype = subtype;
15283 cq->assoc_qid = eq->queue_id;
15285 cq->host_index = 0;
15286 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15287 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15292 list_for_each_entry(dmabuf, &cq->page_list, list) {
15293 memset(dmabuf->virt, 0, hw_page_size);
15294 cnt = page_idx + dmabuf->buffer_tag;
15295 cq_set->u.request.page[cnt].addr_lo =
15296 putPaddrLow(dmabuf->phys);
15297 cq_set->u.request.page[cnt].addr_hi =
15298 putPaddrHigh(dmabuf->phys);
15304 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15306 /* The IOCTL status is embedded in the mailbox subheader. */
15307 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15308 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15309 if (shdr_status || shdr_add_status || rc) {
15310 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15311 "3119 CQ_CREATE_SET mailbox failed with "
15312 "status x%x add_status x%x, mbx status x%x\n",
15313 shdr_status, shdr_add_status, rc);
15317 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15318 if (rc == 0xFFFF) {
15323 for (idx = 0; idx < numcq; idx++) {
15325 cq->queue_id = rc + idx;
15326 if (cq->queue_id > phba->sli4_hba.cq_max)
15327 phba->sli4_hba.cq_max = cq->queue_id;
15331 lpfc_sli4_mbox_cmd_free(phba, mbox);
15336 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15337 * @phba: HBA structure that indicates port to create a queue on.
15338 * @mq: The queue structure to use to create the mailbox queue.
15339 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15340 * @cq: The completion queue to associate with this cq.
15342 * This function provides failback (fb) functionality when the
15343 * mq_create_ext fails on older FW generations. It's purpose is identical
15344 * to mq_create_ext otherwise.
15346 * This routine cannot fail as all attributes were previously accessed and
15347 * initialized in mq_create_ext.
15350 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15351 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15353 struct lpfc_mbx_mq_create *mq_create;
15354 struct lpfc_dmabuf *dmabuf;
15357 length = (sizeof(struct lpfc_mbx_mq_create) -
15358 sizeof(struct lpfc_sli4_cfg_mhdr));
15359 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15360 LPFC_MBOX_OPCODE_MQ_CREATE,
15361 length, LPFC_SLI4_MBX_EMBED);
15362 mq_create = &mbox->u.mqe.un.mq_create;
15363 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15365 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15367 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15368 switch (mq->entry_count) {
15370 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15371 LPFC_MQ_RING_SIZE_16);
15374 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15375 LPFC_MQ_RING_SIZE_32);
15378 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15379 LPFC_MQ_RING_SIZE_64);
15382 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15383 LPFC_MQ_RING_SIZE_128);
15386 list_for_each_entry(dmabuf, &mq->page_list, list) {
15387 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15388 putPaddrLow(dmabuf->phys);
15389 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15390 putPaddrHigh(dmabuf->phys);
15395 * lpfc_mq_create - Create a mailbox Queue on the HBA
15396 * @phba: HBA structure that indicates port to create a queue on.
15397 * @mq: The queue structure to use to create the mailbox queue.
15398 * @cq: The completion queue to associate with this cq.
15399 * @subtype: The queue's subtype.
15401 * This function creates a mailbox queue, as detailed in @mq, on a port,
15402 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15404 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15405 * is used to get the entry count and entry size that are necessary to
15406 * determine the number of pages to allocate and use for this queue. This
15407 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15408 * mailbox queue. This function is asynchronous and will wait for the mailbox
15409 * command to finish before continuing.
15411 * On success this function will return a zero. If unable to allocate enough
15412 * memory this function will return -ENOMEM. If the queue create mailbox command
15413 * fails this function will return -ENXIO.
15416 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15417 struct lpfc_queue *cq, uint32_t subtype)
15419 struct lpfc_mbx_mq_create *mq_create;
15420 struct lpfc_mbx_mq_create_ext *mq_create_ext;
15421 struct lpfc_dmabuf *dmabuf;
15422 LPFC_MBOXQ_t *mbox;
15423 int rc, length, status = 0;
15424 uint32_t shdr_status, shdr_add_status;
15425 union lpfc_sli4_cfg_shdr *shdr;
15426 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15428 /* sanity check on queue memory */
15431 if (!phba->sli4_hba.pc_sli4_params.supported)
15432 hw_page_size = SLI4_PAGE_SIZE;
15434 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15437 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15438 sizeof(struct lpfc_sli4_cfg_mhdr));
15439 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15440 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15441 length, LPFC_SLI4_MBX_EMBED);
15443 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15444 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15445 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15446 &mq_create_ext->u.request, mq->page_count);
15447 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15448 &mq_create_ext->u.request, 1);
15449 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
15450 &mq_create_ext->u.request, 1);
15451 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15452 &mq_create_ext->u.request, 1);
15453 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15454 &mq_create_ext->u.request, 1);
15455 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15456 &mq_create_ext->u.request, 1);
15457 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
15458 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15459 phba->sli4_hba.pc_sli4_params.mqv);
15460 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15461 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15464 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15466 switch (mq->entry_count) {
15468 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15469 "0362 Unsupported MQ count. (%d)\n",
15471 if (mq->entry_count < 16) {
15475 /* fall through - otherwise default to smallest count */
15477 bf_set(lpfc_mq_context_ring_size,
15478 &mq_create_ext->u.request.context,
15479 LPFC_MQ_RING_SIZE_16);
15482 bf_set(lpfc_mq_context_ring_size,
15483 &mq_create_ext->u.request.context,
15484 LPFC_MQ_RING_SIZE_32);
15487 bf_set(lpfc_mq_context_ring_size,
15488 &mq_create_ext->u.request.context,
15489 LPFC_MQ_RING_SIZE_64);
15492 bf_set(lpfc_mq_context_ring_size,
15493 &mq_create_ext->u.request.context,
15494 LPFC_MQ_RING_SIZE_128);
15497 list_for_each_entry(dmabuf, &mq->page_list, list) {
15498 memset(dmabuf->virt, 0, hw_page_size);
15499 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15500 putPaddrLow(dmabuf->phys);
15501 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15502 putPaddrHigh(dmabuf->phys);
15504 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15505 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15506 &mq_create_ext->u.response);
15507 if (rc != MBX_SUCCESS) {
15508 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15509 "2795 MQ_CREATE_EXT failed with "
15510 "status x%x. Failback to MQ_CREATE.\n",
15512 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15513 mq_create = &mbox->u.mqe.un.mq_create;
15514 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15515 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15516 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15517 &mq_create->u.response);
15520 /* The IOCTL status is embedded in the mailbox subheader. */
15521 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15522 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15523 if (shdr_status || shdr_add_status || rc) {
15524 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15525 "2502 MQ_CREATE mailbox failed with "
15526 "status x%x add_status x%x, mbx status x%x\n",
15527 shdr_status, shdr_add_status, rc);
15531 if (mq->queue_id == 0xFFFF) {
15535 mq->type = LPFC_MQ;
15536 mq->assoc_qid = cq->queue_id;
15537 mq->subtype = subtype;
15538 mq->host_index = 0;
15541 /* link the mq onto the parent cq child list */
15542 list_add_tail(&mq->list, &cq->child_list);
15544 mempool_free(mbox, phba->mbox_mem_pool);
15549 * lpfc_wq_create - Create a Work Queue on the HBA
15550 * @phba: HBA structure that indicates port to create a queue on.
15551 * @wq: The queue structure to use to create the work queue.
15552 * @cq: The completion queue to bind this work queue to.
15553 * @subtype: The subtype of the work queue indicating its functionality.
15555 * This function creates a work queue, as detailed in @wq, on a port, described
15556 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15558 * The @phba struct is used to send mailbox command to HBA. The @wq struct
15559 * is used to get the entry count and entry size that are necessary to
15560 * determine the number of pages to allocate and use for this queue. The @cq
15561 * is used to indicate which completion queue to bind this work queue to. This
15562 * function will send the WQ_CREATE mailbox command to the HBA to setup the
15563 * work queue. This function is asynchronous and will wait for the mailbox
15564 * command to finish before continuing.
15566 * On success this function will return a zero. If unable to allocate enough
15567 * memory this function will return -ENOMEM. If the queue create mailbox command
15568 * fails this function will return -ENXIO.
15571 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15572 struct lpfc_queue *cq, uint32_t subtype)
15574 struct lpfc_mbx_wq_create *wq_create;
15575 struct lpfc_dmabuf *dmabuf;
15576 LPFC_MBOXQ_t *mbox;
15577 int rc, length, status = 0;
15578 uint32_t shdr_status, shdr_add_status;
15579 union lpfc_sli4_cfg_shdr *shdr;
15580 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15581 struct dma_address *page;
15582 void __iomem *bar_memmap_p;
15583 uint32_t db_offset;
15584 uint16_t pci_barset;
15585 uint8_t dpp_barset;
15586 uint32_t dpp_offset;
15587 unsigned long pg_addr;
15588 uint8_t wq_create_version;
15590 /* sanity check on queue memory */
15593 if (!phba->sli4_hba.pc_sli4_params.supported)
15594 hw_page_size = wq->page_size;
15596 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15599 length = (sizeof(struct lpfc_mbx_wq_create) -
15600 sizeof(struct lpfc_sli4_cfg_mhdr));
15601 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15602 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15603 length, LPFC_SLI4_MBX_EMBED);
15604 wq_create = &mbox->u.mqe.un.wq_create;
15605 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15606 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15608 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15611 /* wqv is the earliest version supported, NOT the latest */
15612 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15613 phba->sli4_hba.pc_sli4_params.wqv);
15615 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15616 (wq->page_size > SLI4_PAGE_SIZE))
15617 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15619 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15622 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15623 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15625 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15627 switch (wq_create_version) {
15628 case LPFC_Q_CREATE_VERSION_1:
15629 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15631 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15632 LPFC_Q_CREATE_VERSION_1);
15634 switch (wq->entry_size) {
15637 bf_set(lpfc_mbx_wq_create_wqe_size,
15638 &wq_create->u.request_1,
15639 LPFC_WQ_WQE_SIZE_64);
15642 bf_set(lpfc_mbx_wq_create_wqe_size,
15643 &wq_create->u.request_1,
15644 LPFC_WQ_WQE_SIZE_128);
15647 /* Request DPP by default */
15648 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15649 bf_set(lpfc_mbx_wq_create_page_size,
15650 &wq_create->u.request_1,
15651 (wq->page_size / SLI4_PAGE_SIZE));
15652 page = wq_create->u.request_1.page;
15655 page = wq_create->u.request.page;
15659 list_for_each_entry(dmabuf, &wq->page_list, list) {
15660 memset(dmabuf->virt, 0, hw_page_size);
15661 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15662 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15665 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15666 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15668 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15669 /* The IOCTL status is embedded in the mailbox subheader. */
15670 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15671 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15672 if (shdr_status || shdr_add_status || rc) {
15673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15674 "2503 WQ_CREATE mailbox failed with "
15675 "status x%x add_status x%x, mbx status x%x\n",
15676 shdr_status, shdr_add_status, rc);
15681 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15682 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15683 &wq_create->u.response);
15685 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15686 &wq_create->u.response_1);
15688 if (wq->queue_id == 0xFFFF) {
15693 wq->db_format = LPFC_DB_LIST_FORMAT;
15694 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15695 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15696 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15697 &wq_create->u.response);
15698 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15699 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15700 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15701 "3265 WQ[%d] doorbell format "
15702 "not supported: x%x\n",
15703 wq->queue_id, wq->db_format);
15707 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15708 &wq_create->u.response);
15709 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15711 if (!bar_memmap_p) {
15712 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15713 "3263 WQ[%d] failed to memmap "
15714 "pci barset:x%x\n",
15715 wq->queue_id, pci_barset);
15719 db_offset = wq_create->u.response.doorbell_offset;
15720 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15721 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15723 "3252 WQ[%d] doorbell offset "
15724 "not supported: x%x\n",
15725 wq->queue_id, db_offset);
15729 wq->db_regaddr = bar_memmap_p + db_offset;
15730 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15731 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15732 "format:x%x\n", wq->queue_id,
15733 pci_barset, db_offset, wq->db_format);
15735 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15737 /* Check if DPP was honored by the firmware */
15738 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15739 &wq_create->u.response_1);
15740 if (wq->dpp_enable) {
15741 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15742 &wq_create->u.response_1);
15743 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15745 if (!bar_memmap_p) {
15746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15747 "3267 WQ[%d] failed to memmap "
15748 "pci barset:x%x\n",
15749 wq->queue_id, pci_barset);
15753 db_offset = wq_create->u.response_1.doorbell_offset;
15754 wq->db_regaddr = bar_memmap_p + db_offset;
15755 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15756 &wq_create->u.response_1);
15757 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15758 &wq_create->u.response_1);
15759 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15761 if (!bar_memmap_p) {
15762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15763 "3268 WQ[%d] failed to memmap "
15764 "pci barset:x%x\n",
15765 wq->queue_id, dpp_barset);
15769 dpp_offset = wq_create->u.response_1.dpp_offset;
15770 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15771 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15772 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15773 "dpp_id:x%x dpp_barset:x%x "
15774 "dpp_offset:x%x\n",
15775 wq->queue_id, pci_barset, db_offset,
15776 wq->dpp_id, dpp_barset, dpp_offset);
15778 /* Enable combined writes for DPP aperture */
15779 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15781 rc = set_memory_wc(pg_addr, 1);
15783 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15784 "3272 Cannot setup Combined "
15785 "Write on WQ[%d] - disable DPP\n",
15787 phba->cfg_enable_dpp = 0;
15790 phba->cfg_enable_dpp = 0;
15793 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15795 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15796 if (wq->pring == NULL) {
15800 wq->type = LPFC_WQ;
15801 wq->assoc_qid = cq->queue_id;
15802 wq->subtype = subtype;
15803 wq->host_index = 0;
15805 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
15807 /* link the wq onto the parent cq child list */
15808 list_add_tail(&wq->list, &cq->child_list);
15810 mempool_free(mbox, phba->mbox_mem_pool);
15815 * lpfc_rq_create - Create a Receive Queue on the HBA
15816 * @phba: HBA structure that indicates port to create a queue on.
15817 * @hrq: The queue structure to use to create the header receive queue.
15818 * @drq: The queue structure to use to create the data receive queue.
15819 * @cq: The completion queue to bind this work queue to.
15821 * This function creates a receive buffer queue pair , as detailed in @hrq and
15822 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15825 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15826 * struct is used to get the entry count that is necessary to determine the
15827 * number of pages to use for this queue. The @cq is used to indicate which
15828 * completion queue to bind received buffers that are posted to these queues to.
15829 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15830 * receive queue pair. This function is asynchronous and will wait for the
15831 * mailbox command to finish before continuing.
15833 * On success this function will return a zero. If unable to allocate enough
15834 * memory this function will return -ENOMEM. If the queue create mailbox command
15835 * fails this function will return -ENXIO.
15838 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15839 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15841 struct lpfc_mbx_rq_create *rq_create;
15842 struct lpfc_dmabuf *dmabuf;
15843 LPFC_MBOXQ_t *mbox;
15844 int rc, length, status = 0;
15845 uint32_t shdr_status, shdr_add_status;
15846 union lpfc_sli4_cfg_shdr *shdr;
15847 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15848 void __iomem *bar_memmap_p;
15849 uint32_t db_offset;
15850 uint16_t pci_barset;
15852 /* sanity check on queue memory */
15853 if (!hrq || !drq || !cq)
15855 if (!phba->sli4_hba.pc_sli4_params.supported)
15856 hw_page_size = SLI4_PAGE_SIZE;
15858 if (hrq->entry_count != drq->entry_count)
15860 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15863 length = (sizeof(struct lpfc_mbx_rq_create) -
15864 sizeof(struct lpfc_sli4_cfg_mhdr));
15865 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15866 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15867 length, LPFC_SLI4_MBX_EMBED);
15868 rq_create = &mbox->u.mqe.un.rq_create;
15869 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15870 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15871 phba->sli4_hba.pc_sli4_params.rqv);
15872 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15873 bf_set(lpfc_rq_context_rqe_count_1,
15874 &rq_create->u.request.context,
15876 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
15877 bf_set(lpfc_rq_context_rqe_size,
15878 &rq_create->u.request.context,
15880 bf_set(lpfc_rq_context_page_size,
15881 &rq_create->u.request.context,
15882 LPFC_RQ_PAGE_SIZE_4096);
15884 switch (hrq->entry_count) {
15886 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15887 "2535 Unsupported RQ count. (%d)\n",
15889 if (hrq->entry_count < 512) {
15893 /* fall through - otherwise default to smallest count */
15895 bf_set(lpfc_rq_context_rqe_count,
15896 &rq_create->u.request.context,
15897 LPFC_RQ_RING_SIZE_512);
15900 bf_set(lpfc_rq_context_rqe_count,
15901 &rq_create->u.request.context,
15902 LPFC_RQ_RING_SIZE_1024);
15905 bf_set(lpfc_rq_context_rqe_count,
15906 &rq_create->u.request.context,
15907 LPFC_RQ_RING_SIZE_2048);
15910 bf_set(lpfc_rq_context_rqe_count,
15911 &rq_create->u.request.context,
15912 LPFC_RQ_RING_SIZE_4096);
15915 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15916 LPFC_HDR_BUF_SIZE);
15918 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15920 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15922 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15923 memset(dmabuf->virt, 0, hw_page_size);
15924 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15925 putPaddrLow(dmabuf->phys);
15926 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15927 putPaddrHigh(dmabuf->phys);
15929 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15930 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15932 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15933 /* The IOCTL status is embedded in the mailbox subheader. */
15934 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15935 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15936 if (shdr_status || shdr_add_status || rc) {
15937 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15938 "2504 RQ_CREATE mailbox failed with "
15939 "status x%x add_status x%x, mbx status x%x\n",
15940 shdr_status, shdr_add_status, rc);
15944 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15945 if (hrq->queue_id == 0xFFFF) {
15950 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15951 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15952 &rq_create->u.response);
15953 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15954 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15955 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15956 "3262 RQ [%d] doorbell format not "
15957 "supported: x%x\n", hrq->queue_id,
15963 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15964 &rq_create->u.response);
15965 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15966 if (!bar_memmap_p) {
15967 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15968 "3269 RQ[%d] failed to memmap pci "
15969 "barset:x%x\n", hrq->queue_id,
15975 db_offset = rq_create->u.response.doorbell_offset;
15976 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15977 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15978 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15979 "3270 RQ[%d] doorbell offset not "
15980 "supported: x%x\n", hrq->queue_id,
15985 hrq->db_regaddr = bar_memmap_p + db_offset;
15986 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15987 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15988 "format:x%x\n", hrq->queue_id, pci_barset,
15989 db_offset, hrq->db_format);
15991 hrq->db_format = LPFC_DB_RING_FORMAT;
15992 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15994 hrq->type = LPFC_HRQ;
15995 hrq->assoc_qid = cq->queue_id;
15996 hrq->subtype = subtype;
15997 hrq->host_index = 0;
15998 hrq->hba_index = 0;
15999 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16001 /* now create the data queue */
16002 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16003 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16004 length, LPFC_SLI4_MBX_EMBED);
16005 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16006 phba->sli4_hba.pc_sli4_params.rqv);
16007 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16008 bf_set(lpfc_rq_context_rqe_count_1,
16009 &rq_create->u.request.context, hrq->entry_count);
16010 if (subtype == LPFC_NVMET)
16011 rq_create->u.request.context.buffer_size =
16012 LPFC_NVMET_DATA_BUF_SIZE;
16014 rq_create->u.request.context.buffer_size =
16015 LPFC_DATA_BUF_SIZE;
16016 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
16018 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
16019 (PAGE_SIZE/SLI4_PAGE_SIZE));
16021 switch (drq->entry_count) {
16023 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16024 "2536 Unsupported RQ count. (%d)\n",
16026 if (drq->entry_count < 512) {
16030 /* fall through - otherwise default to smallest count */
16032 bf_set(lpfc_rq_context_rqe_count,
16033 &rq_create->u.request.context,
16034 LPFC_RQ_RING_SIZE_512);
16037 bf_set(lpfc_rq_context_rqe_count,
16038 &rq_create->u.request.context,
16039 LPFC_RQ_RING_SIZE_1024);
16042 bf_set(lpfc_rq_context_rqe_count,
16043 &rq_create->u.request.context,
16044 LPFC_RQ_RING_SIZE_2048);
16047 bf_set(lpfc_rq_context_rqe_count,
16048 &rq_create->u.request.context,
16049 LPFC_RQ_RING_SIZE_4096);
16052 if (subtype == LPFC_NVMET)
16053 bf_set(lpfc_rq_context_buf_size,
16054 &rq_create->u.request.context,
16055 LPFC_NVMET_DATA_BUF_SIZE);
16057 bf_set(lpfc_rq_context_buf_size,
16058 &rq_create->u.request.context,
16059 LPFC_DATA_BUF_SIZE);
16061 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16063 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16065 list_for_each_entry(dmabuf, &drq->page_list, list) {
16066 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16067 putPaddrLow(dmabuf->phys);
16068 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16069 putPaddrHigh(dmabuf->phys);
16071 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16072 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16073 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16074 /* The IOCTL status is embedded in the mailbox subheader. */
16075 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16076 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16077 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16078 if (shdr_status || shdr_add_status || rc) {
16082 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16083 if (drq->queue_id == 0xFFFF) {
16087 drq->type = LPFC_DRQ;
16088 drq->assoc_qid = cq->queue_id;
16089 drq->subtype = subtype;
16090 drq->host_index = 0;
16091 drq->hba_index = 0;
16092 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16094 /* link the header and data RQs onto the parent cq child list */
16095 list_add_tail(&hrq->list, &cq->child_list);
16096 list_add_tail(&drq->list, &cq->child_list);
16099 mempool_free(mbox, phba->mbox_mem_pool);
16104 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
16105 * @phba: HBA structure that indicates port to create a queue on.
16106 * @hrqp: The queue structure array to use to create the header receive queues.
16107 * @drqp: The queue structure array to use to create the data receive queues.
16108 * @cqp: The completion queue array to bind these receive queues to.
16110 * This function creates a receive buffer queue pair , as detailed in @hrq and
16111 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16114 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16115 * struct is used to get the entry count that is necessary to determine the
16116 * number of pages to use for this queue. The @cq is used to indicate which
16117 * completion queue to bind received buffers that are posted to these queues to.
16118 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16119 * receive queue pair. This function is asynchronous and will wait for the
16120 * mailbox command to finish before continuing.
16122 * On success this function will return a zero. If unable to allocate enough
16123 * memory this function will return -ENOMEM. If the queue create mailbox command
16124 * fails this function will return -ENXIO.
16127 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16128 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16131 struct lpfc_queue *hrq, *drq, *cq;
16132 struct lpfc_mbx_rq_create_v2 *rq_create;
16133 struct lpfc_dmabuf *dmabuf;
16134 LPFC_MBOXQ_t *mbox;
16135 int rc, length, alloclen, status = 0;
16136 int cnt, idx, numrq, page_idx = 0;
16137 uint32_t shdr_status, shdr_add_status;
16138 union lpfc_sli4_cfg_shdr *shdr;
16139 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16141 numrq = phba->cfg_nvmet_mrq;
16142 /* sanity check on array memory */
16143 if (!hrqp || !drqp || !cqp || !numrq)
16145 if (!phba->sli4_hba.pc_sli4_params.supported)
16146 hw_page_size = SLI4_PAGE_SIZE;
16148 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16152 length = sizeof(struct lpfc_mbx_rq_create_v2);
16153 length += ((2 * numrq * hrqp[0]->page_count) *
16154 sizeof(struct dma_address));
16156 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16157 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16158 LPFC_SLI4_MBX_NEMBED);
16159 if (alloclen < length) {
16160 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16161 "3099 Allocated DMA memory size (%d) is "
16162 "less than the requested DMA memory size "
16163 "(%d)\n", alloclen, length);
16170 rq_create = mbox->sge_array->addr[0];
16171 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16173 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16176 for (idx = 0; idx < numrq; idx++) {
16181 /* sanity check on queue memory */
16182 if (!hrq || !drq || !cq) {
16187 if (hrq->entry_count != drq->entry_count) {
16193 bf_set(lpfc_mbx_rq_create_num_pages,
16194 &rq_create->u.request,
16196 bf_set(lpfc_mbx_rq_create_rq_cnt,
16197 &rq_create->u.request, (numrq * 2));
16198 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16200 bf_set(lpfc_rq_context_base_cq,
16201 &rq_create->u.request.context,
16203 bf_set(lpfc_rq_context_data_size,
16204 &rq_create->u.request.context,
16205 LPFC_NVMET_DATA_BUF_SIZE);
16206 bf_set(lpfc_rq_context_hdr_size,
16207 &rq_create->u.request.context,
16208 LPFC_HDR_BUF_SIZE);
16209 bf_set(lpfc_rq_context_rqe_count_1,
16210 &rq_create->u.request.context,
16212 bf_set(lpfc_rq_context_rqe_size,
16213 &rq_create->u.request.context,
16215 bf_set(lpfc_rq_context_page_size,
16216 &rq_create->u.request.context,
16217 (PAGE_SIZE/SLI4_PAGE_SIZE));
16220 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16221 memset(dmabuf->virt, 0, hw_page_size);
16222 cnt = page_idx + dmabuf->buffer_tag;
16223 rq_create->u.request.page[cnt].addr_lo =
16224 putPaddrLow(dmabuf->phys);
16225 rq_create->u.request.page[cnt].addr_hi =
16226 putPaddrHigh(dmabuf->phys);
16232 list_for_each_entry(dmabuf, &drq->page_list, list) {
16233 memset(dmabuf->virt, 0, hw_page_size);
16234 cnt = page_idx + dmabuf->buffer_tag;
16235 rq_create->u.request.page[cnt].addr_lo =
16236 putPaddrLow(dmabuf->phys);
16237 rq_create->u.request.page[cnt].addr_hi =
16238 putPaddrHigh(dmabuf->phys);
16243 hrq->db_format = LPFC_DB_RING_FORMAT;
16244 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16245 hrq->type = LPFC_HRQ;
16246 hrq->assoc_qid = cq->queue_id;
16247 hrq->subtype = subtype;
16248 hrq->host_index = 0;
16249 hrq->hba_index = 0;
16250 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16252 drq->db_format = LPFC_DB_RING_FORMAT;
16253 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16254 drq->type = LPFC_DRQ;
16255 drq->assoc_qid = cq->queue_id;
16256 drq->subtype = subtype;
16257 drq->host_index = 0;
16258 drq->hba_index = 0;
16259 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16261 list_add_tail(&hrq->list, &cq->child_list);
16262 list_add_tail(&drq->list, &cq->child_list);
16265 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16266 /* The IOCTL status is embedded in the mailbox subheader. */
16267 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16268 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16269 if (shdr_status || shdr_add_status || rc) {
16270 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16271 "3120 RQ_CREATE mailbox failed with "
16272 "status x%x add_status x%x, mbx status x%x\n",
16273 shdr_status, shdr_add_status, rc);
16277 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16278 if (rc == 0xFFFF) {
16283 /* Initialize all RQs with associated queue id */
16284 for (idx = 0; idx < numrq; idx++) {
16286 hrq->queue_id = rc + (2 * idx);
16288 drq->queue_id = rc + (2 * idx) + 1;
16292 lpfc_sli4_mbox_cmd_free(phba, mbox);
16297 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16298 * @eq: The queue structure associated with the queue to destroy.
16300 * This function destroys a queue, as detailed in @eq by sending an mailbox
16301 * command, specific to the type of queue, to the HBA.
16303 * The @eq struct is used to get the queue ID of the queue to destroy.
16305 * On success this function will return a zero. If the queue destroy mailbox
16306 * command fails this function will return -ENXIO.
16309 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16311 LPFC_MBOXQ_t *mbox;
16312 int rc, length, status = 0;
16313 uint32_t shdr_status, shdr_add_status;
16314 union lpfc_sli4_cfg_shdr *shdr;
16316 /* sanity check on queue memory */
16320 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16323 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16324 sizeof(struct lpfc_sli4_cfg_mhdr));
16325 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16326 LPFC_MBOX_OPCODE_EQ_DESTROY,
16327 length, LPFC_SLI4_MBX_EMBED);
16328 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16330 mbox->vport = eq->phba->pport;
16331 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16333 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16334 /* The IOCTL status is embedded in the mailbox subheader. */
16335 shdr = (union lpfc_sli4_cfg_shdr *)
16336 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16337 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16338 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16339 if (shdr_status || shdr_add_status || rc) {
16340 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16341 "2505 EQ_DESTROY mailbox failed with "
16342 "status x%x add_status x%x, mbx status x%x\n",
16343 shdr_status, shdr_add_status, rc);
16347 /* Remove eq from any list */
16348 list_del_init(&eq->list);
16349 mempool_free(mbox, eq->phba->mbox_mem_pool);
16354 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16355 * @cq: The queue structure associated with the queue to destroy.
16357 * This function destroys a queue, as detailed in @cq by sending an mailbox
16358 * command, specific to the type of queue, to the HBA.
16360 * The @cq struct is used to get the queue ID of the queue to destroy.
16362 * On success this function will return a zero. If the queue destroy mailbox
16363 * command fails this function will return -ENXIO.
16366 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16368 LPFC_MBOXQ_t *mbox;
16369 int rc, length, status = 0;
16370 uint32_t shdr_status, shdr_add_status;
16371 union lpfc_sli4_cfg_shdr *shdr;
16373 /* sanity check on queue memory */
16376 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16379 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16380 sizeof(struct lpfc_sli4_cfg_mhdr));
16381 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16382 LPFC_MBOX_OPCODE_CQ_DESTROY,
16383 length, LPFC_SLI4_MBX_EMBED);
16384 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16386 mbox->vport = cq->phba->pport;
16387 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16388 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16389 /* The IOCTL status is embedded in the mailbox subheader. */
16390 shdr = (union lpfc_sli4_cfg_shdr *)
16391 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16392 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16393 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16394 if (shdr_status || shdr_add_status || rc) {
16395 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16396 "2506 CQ_DESTROY mailbox failed with "
16397 "status x%x add_status x%x, mbx status x%x\n",
16398 shdr_status, shdr_add_status, rc);
16401 /* Remove cq from any list */
16402 list_del_init(&cq->list);
16403 mempool_free(mbox, cq->phba->mbox_mem_pool);
16408 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16409 * @qm: The queue structure associated with the queue to destroy.
16411 * This function destroys a queue, as detailed in @mq by sending an mailbox
16412 * command, specific to the type of queue, to the HBA.
16414 * The @mq struct is used to get the queue ID of the queue to destroy.
16416 * On success this function will return a zero. If the queue destroy mailbox
16417 * command fails this function will return -ENXIO.
16420 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16422 LPFC_MBOXQ_t *mbox;
16423 int rc, length, status = 0;
16424 uint32_t shdr_status, shdr_add_status;
16425 union lpfc_sli4_cfg_shdr *shdr;
16427 /* sanity check on queue memory */
16430 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16433 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16434 sizeof(struct lpfc_sli4_cfg_mhdr));
16435 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16436 LPFC_MBOX_OPCODE_MQ_DESTROY,
16437 length, LPFC_SLI4_MBX_EMBED);
16438 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16440 mbox->vport = mq->phba->pport;
16441 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16442 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16443 /* The IOCTL status is embedded in the mailbox subheader. */
16444 shdr = (union lpfc_sli4_cfg_shdr *)
16445 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16446 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16447 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16448 if (shdr_status || shdr_add_status || rc) {
16449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16450 "2507 MQ_DESTROY mailbox failed with "
16451 "status x%x add_status x%x, mbx status x%x\n",
16452 shdr_status, shdr_add_status, rc);
16455 /* Remove mq from any list */
16456 list_del_init(&mq->list);
16457 mempool_free(mbox, mq->phba->mbox_mem_pool);
16462 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16463 * @wq: The queue structure associated with the queue to destroy.
16465 * This function destroys a queue, as detailed in @wq by sending an mailbox
16466 * command, specific to the type of queue, to the HBA.
16468 * The @wq struct is used to get the queue ID of the queue to destroy.
16470 * On success this function will return a zero. If the queue destroy mailbox
16471 * command fails this function will return -ENXIO.
16474 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16476 LPFC_MBOXQ_t *mbox;
16477 int rc, length, status = 0;
16478 uint32_t shdr_status, shdr_add_status;
16479 union lpfc_sli4_cfg_shdr *shdr;
16481 /* sanity check on queue memory */
16484 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16487 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16488 sizeof(struct lpfc_sli4_cfg_mhdr));
16489 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16490 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16491 length, LPFC_SLI4_MBX_EMBED);
16492 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16494 mbox->vport = wq->phba->pport;
16495 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16496 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16497 shdr = (union lpfc_sli4_cfg_shdr *)
16498 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16499 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16500 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16501 if (shdr_status || shdr_add_status || rc) {
16502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16503 "2508 WQ_DESTROY mailbox failed with "
16504 "status x%x add_status x%x, mbx status x%x\n",
16505 shdr_status, shdr_add_status, rc);
16508 /* Remove wq from any list */
16509 list_del_init(&wq->list);
16512 mempool_free(mbox, wq->phba->mbox_mem_pool);
16517 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16518 * @rq: The queue structure associated with the queue to destroy.
16520 * This function destroys a queue, as detailed in @rq by sending an mailbox
16521 * command, specific to the type of queue, to the HBA.
16523 * The @rq struct is used to get the queue ID of the queue to destroy.
16525 * On success this function will return a zero. If the queue destroy mailbox
16526 * command fails this function will return -ENXIO.
16529 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16530 struct lpfc_queue *drq)
16532 LPFC_MBOXQ_t *mbox;
16533 int rc, length, status = 0;
16534 uint32_t shdr_status, shdr_add_status;
16535 union lpfc_sli4_cfg_shdr *shdr;
16537 /* sanity check on queue memory */
16540 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16543 length = (sizeof(struct lpfc_mbx_rq_destroy) -
16544 sizeof(struct lpfc_sli4_cfg_mhdr));
16545 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16546 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16547 length, LPFC_SLI4_MBX_EMBED);
16548 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16550 mbox->vport = hrq->phba->pport;
16551 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16552 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16553 /* The IOCTL status is embedded in the mailbox subheader. */
16554 shdr = (union lpfc_sli4_cfg_shdr *)
16555 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16556 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16557 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16558 if (shdr_status || shdr_add_status || rc) {
16559 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16560 "2509 RQ_DESTROY mailbox failed with "
16561 "status x%x add_status x%x, mbx status x%x\n",
16562 shdr_status, shdr_add_status, rc);
16563 if (rc != MBX_TIMEOUT)
16564 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16567 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16569 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16570 shdr = (union lpfc_sli4_cfg_shdr *)
16571 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16572 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16573 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16574 if (shdr_status || shdr_add_status || rc) {
16575 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16576 "2510 RQ_DESTROY mailbox failed with "
16577 "status x%x add_status x%x, mbx status x%x\n",
16578 shdr_status, shdr_add_status, rc);
16581 list_del_init(&hrq->list);
16582 list_del_init(&drq->list);
16583 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16588 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16589 * @phba: The virtual port for which this call being executed.
16590 * @pdma_phys_addr0: Physical address of the 1st SGL page.
16591 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16592 * @xritag: the xritag that ties this io to the SGL pages.
16594 * This routine will post the sgl pages for the IO that has the xritag
16595 * that is in the iocbq structure. The xritag is assigned during iocbq
16596 * creation and persists for as long as the driver is loaded.
16597 * if the caller has fewer than 256 scatter gather segments to map then
16598 * pdma_phys_addr1 should be 0.
16599 * If the caller needs to map more than 256 scatter gather segment then
16600 * pdma_phys_addr1 should be a valid physical address.
16601 * physical address for SGLs must be 64 byte aligned.
16602 * If you are going to map 2 SGL's then the first one must have 256 entries
16603 * the second sgl can have between 1 and 256 entries.
16607 * -ENXIO, -ENOMEM - Failure
16610 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16611 dma_addr_t pdma_phys_addr0,
16612 dma_addr_t pdma_phys_addr1,
16615 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16616 LPFC_MBOXQ_t *mbox;
16618 uint32_t shdr_status, shdr_add_status;
16620 union lpfc_sli4_cfg_shdr *shdr;
16622 if (xritag == NO_XRI) {
16623 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16624 "0364 Invalid param:\n");
16628 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16632 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16633 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16634 sizeof(struct lpfc_mbx_post_sgl_pages) -
16635 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16637 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16638 &mbox->u.mqe.un.post_sgl_pages;
16639 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16640 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16642 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16643 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16644 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16645 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16647 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16648 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16649 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16650 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16651 if (!phba->sli4_hba.intr_enable)
16652 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16654 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16655 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16657 /* The IOCTL status is embedded in the mailbox subheader. */
16658 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16659 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16660 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16661 if (rc != MBX_TIMEOUT)
16662 mempool_free(mbox, phba->mbox_mem_pool);
16663 if (shdr_status || shdr_add_status || rc) {
16664 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16665 "2511 POST_SGL mailbox failed with "
16666 "status x%x add_status x%x, mbx status x%x\n",
16667 shdr_status, shdr_add_status, rc);
16673 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
16674 * @phba: pointer to lpfc hba data structure.
16676 * This routine is invoked to post rpi header templates to the
16677 * HBA consistent with the SLI-4 interface spec. This routine
16678 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16679 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
16682 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16683 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16686 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16691 * Fetch the next logical xri. Because this index is logical,
16692 * the driver starts at 0 each time.
16694 spin_lock_irq(&phba->hbalock);
16695 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16696 phba->sli4_hba.max_cfg_param.max_xri, 0);
16697 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16698 spin_unlock_irq(&phba->hbalock);
16701 set_bit(xri, phba->sli4_hba.xri_bmask);
16702 phba->sli4_hba.max_cfg_param.xri_used++;
16704 spin_unlock_irq(&phba->hbalock);
16709 * lpfc_sli4_free_xri - Release an xri for reuse.
16710 * @phba: pointer to lpfc hba data structure.
16712 * This routine is invoked to release an xri to the pool of
16713 * available rpis maintained by the driver.
16716 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16718 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
16719 phba->sli4_hba.max_cfg_param.xri_used--;
16724 * lpfc_sli4_free_xri - Release an xri for reuse.
16725 * @phba: pointer to lpfc hba data structure.
16727 * This routine is invoked to release an xri to the pool of
16728 * available rpis maintained by the driver.
16731 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16733 spin_lock_irq(&phba->hbalock);
16734 __lpfc_sli4_free_xri(phba, xri);
16735 spin_unlock_irq(&phba->hbalock);
16739 * lpfc_sli4_next_xritag - Get an xritag for the io
16740 * @phba: Pointer to HBA context object.
16742 * This function gets an xritag for the iocb. If there is no unused xritag
16743 * it will return 0xffff.
16744 * The function returns the allocated xritag if successful, else returns zero.
16745 * Zero is not a valid xritag.
16746 * The caller is not required to hold any lock.
16749 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16751 uint16_t xri_index;
16753 xri_index = lpfc_sli4_alloc_xri(phba);
16754 if (xri_index == NO_XRI)
16755 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16756 "2004 Failed to allocate XRI.last XRITAG is %d"
16757 " Max XRI is %d, Used XRI is %d\n",
16759 phba->sli4_hba.max_cfg_param.max_xri,
16760 phba->sli4_hba.max_cfg_param.xri_used);
16765 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
16766 * @phba: pointer to lpfc hba data structure.
16767 * @post_sgl_list: pointer to els sgl entry list.
16768 * @count: number of els sgl entries on the list.
16770 * This routine is invoked to post a block of driver's sgl pages to the
16771 * HBA using non-embedded mailbox command. No Lock is held. This routine
16772 * is only called when the driver is loading and after all IO has been
16776 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
16777 struct list_head *post_sgl_list,
16780 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
16781 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16782 struct sgl_page_pairs *sgl_pg_pairs;
16784 LPFC_MBOXQ_t *mbox;
16785 uint32_t reqlen, alloclen, pg_pairs;
16787 uint16_t xritag_start = 0;
16789 uint32_t shdr_status, shdr_add_status;
16790 union lpfc_sli4_cfg_shdr *shdr;
16792 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
16793 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16794 if (reqlen > SLI4_PAGE_SIZE) {
16795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16796 "2559 Block sgl registration required DMA "
16797 "size (%d) great than a page\n", reqlen);
16801 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16805 /* Allocate DMA memory and set up the non-embedded mailbox command */
16806 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16807 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16808 LPFC_SLI4_MBX_NEMBED);
16810 if (alloclen < reqlen) {
16811 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16812 "0285 Allocated DMA memory size (%d) is "
16813 "less than the requested DMA memory "
16814 "size (%d)\n", alloclen, reqlen);
16815 lpfc_sli4_mbox_cmd_free(phba, mbox);
16818 /* Set up the SGL pages in the non-embedded DMA pages */
16819 viraddr = mbox->sge_array->addr[0];
16820 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16821 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16824 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
16825 /* Set up the sge entry */
16826 sgl_pg_pairs->sgl_pg0_addr_lo =
16827 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16828 sgl_pg_pairs->sgl_pg0_addr_hi =
16829 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16830 sgl_pg_pairs->sgl_pg1_addr_lo =
16831 cpu_to_le32(putPaddrLow(0));
16832 sgl_pg_pairs->sgl_pg1_addr_hi =
16833 cpu_to_le32(putPaddrHigh(0));
16835 /* Keep the first xritag on the list */
16837 xritag_start = sglq_entry->sli4_xritag;
16842 /* Complete initialization and perform endian conversion. */
16843 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16844 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
16845 sgl->word0 = cpu_to_le32(sgl->word0);
16847 if (!phba->sli4_hba.intr_enable)
16848 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16850 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16851 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16853 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16854 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16855 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16856 if (rc != MBX_TIMEOUT)
16857 lpfc_sli4_mbox_cmd_free(phba, mbox);
16858 if (shdr_status || shdr_add_status || rc) {
16859 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16860 "2513 POST_SGL_BLOCK mailbox command failed "
16861 "status x%x add_status x%x mbx status x%x\n",
16862 shdr_status, shdr_add_status, rc);
16869 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
16870 * @phba: pointer to lpfc hba data structure.
16871 * @nblist: pointer to nvme buffer list.
16872 * @count: number of scsi buffers on the list.
16874 * This routine is invoked to post a block of @count scsi sgl pages from a
16875 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
16880 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
16883 struct lpfc_io_buf *lpfc_ncmd;
16884 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16885 struct sgl_page_pairs *sgl_pg_pairs;
16887 LPFC_MBOXQ_t *mbox;
16888 uint32_t reqlen, alloclen, pg_pairs;
16890 uint16_t xritag_start = 0;
16892 uint32_t shdr_status, shdr_add_status;
16893 dma_addr_t pdma_phys_bpl1;
16894 union lpfc_sli4_cfg_shdr *shdr;
16896 /* Calculate the requested length of the dma memory */
16897 reqlen = count * sizeof(struct sgl_page_pairs) +
16898 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16899 if (reqlen > SLI4_PAGE_SIZE) {
16900 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16901 "6118 Block sgl registration required DMA "
16902 "size (%d) great than a page\n", reqlen);
16905 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16907 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16908 "6119 Failed to allocate mbox cmd memory\n");
16912 /* Allocate DMA memory and set up the non-embedded mailbox command */
16913 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16914 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16915 reqlen, LPFC_SLI4_MBX_NEMBED);
16917 if (alloclen < reqlen) {
16918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16919 "6120 Allocated DMA memory size (%d) is "
16920 "less than the requested DMA memory "
16921 "size (%d)\n", alloclen, reqlen);
16922 lpfc_sli4_mbox_cmd_free(phba, mbox);
16926 /* Get the first SGE entry from the non-embedded DMA memory */
16927 viraddr = mbox->sge_array->addr[0];
16929 /* Set up the SGL pages in the non-embedded DMA pages */
16930 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16931 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16934 list_for_each_entry(lpfc_ncmd, nblist, list) {
16935 /* Set up the sge entry */
16936 sgl_pg_pairs->sgl_pg0_addr_lo =
16937 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
16938 sgl_pg_pairs->sgl_pg0_addr_hi =
16939 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
16940 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16941 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
16944 pdma_phys_bpl1 = 0;
16945 sgl_pg_pairs->sgl_pg1_addr_lo =
16946 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16947 sgl_pg_pairs->sgl_pg1_addr_hi =
16948 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16949 /* Keep the first xritag on the list */
16951 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
16955 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16956 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16957 /* Perform endian conversion if necessary */
16958 sgl->word0 = cpu_to_le32(sgl->word0);
16960 if (!phba->sli4_hba.intr_enable) {
16961 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16963 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16964 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16966 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
16967 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16968 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16969 if (rc != MBX_TIMEOUT)
16970 lpfc_sli4_mbox_cmd_free(phba, mbox);
16971 if (shdr_status || shdr_add_status || rc) {
16972 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16973 "6125 POST_SGL_BLOCK mailbox command failed "
16974 "status x%x add_status x%x mbx status x%x\n",
16975 shdr_status, shdr_add_status, rc);
16982 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
16983 * @phba: pointer to lpfc hba data structure.
16984 * @post_nblist: pointer to the nvme buffer list.
16986 * This routine walks a list of nvme buffers that was passed in. It attempts
16987 * to construct blocks of nvme buffer sgls which contains contiguous xris and
16988 * uses the non-embedded SGL block post mailbox commands to post to the port.
16989 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
16990 * embedded SGL post mailbox command for posting. The @post_nblist passed in
16991 * must be local list, thus no lock is needed when manipulate the list.
16993 * Returns: 0 = failure, non-zero number of successfully posted buffers.
16996 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
16997 struct list_head *post_nblist, int sb_count)
16999 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
17000 int status, sgl_size;
17001 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
17002 dma_addr_t pdma_phys_sgl1;
17003 int last_xritag = NO_XRI;
17005 LIST_HEAD(prep_nblist);
17006 LIST_HEAD(blck_nblist);
17007 LIST_HEAD(nvme_nblist);
17013 sgl_size = phba->cfg_sg_dma_buf_size;
17014 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
17015 list_del_init(&lpfc_ncmd->list);
17017 if ((last_xritag != NO_XRI) &&
17018 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
17019 /* a hole in xri block, form a sgl posting block */
17020 list_splice_init(&prep_nblist, &blck_nblist);
17021 post_cnt = block_cnt - 1;
17022 /* prepare list for next posting block */
17023 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17026 /* prepare list for next posting block */
17027 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17028 /* enough sgls for non-embed sgl mbox command */
17029 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
17030 list_splice_init(&prep_nblist, &blck_nblist);
17031 post_cnt = block_cnt;
17036 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17038 /* end of repost sgl list condition for NVME buffers */
17039 if (num_posting == sb_count) {
17040 if (post_cnt == 0) {
17041 /* last sgl posting block */
17042 list_splice_init(&prep_nblist, &blck_nblist);
17043 post_cnt = block_cnt;
17044 } else if (block_cnt == 1) {
17045 /* last single sgl with non-contiguous xri */
17046 if (sgl_size > SGL_PAGE_SIZE)
17048 lpfc_ncmd->dma_phys_sgl +
17051 pdma_phys_sgl1 = 0;
17052 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17053 status = lpfc_sli4_post_sgl(
17054 phba, lpfc_ncmd->dma_phys_sgl,
17055 pdma_phys_sgl1, cur_xritag);
17057 /* Post error. Buffer unavailable. */
17058 lpfc_ncmd->flags |=
17059 LPFC_SBUF_NOT_POSTED;
17061 /* Post success. Bffer available. */
17062 lpfc_ncmd->flags &=
17063 ~LPFC_SBUF_NOT_POSTED;
17064 lpfc_ncmd->status = IOSTAT_SUCCESS;
17067 /* success, put on NVME buffer sgl list */
17068 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17072 /* continue until a nembed page worth of sgls */
17076 /* post block of NVME buffer list sgls */
17077 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
17080 /* don't reset xirtag due to hole in xri block */
17081 if (block_cnt == 0)
17082 last_xritag = NO_XRI;
17084 /* reset NVME buffer post count for next round of posting */
17087 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
17088 while (!list_empty(&blck_nblist)) {
17089 list_remove_head(&blck_nblist, lpfc_ncmd,
17090 struct lpfc_io_buf, list);
17092 /* Post error. Mark buffer unavailable. */
17093 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
17095 /* Post success, Mark buffer available. */
17096 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
17097 lpfc_ncmd->status = IOSTAT_SUCCESS;
17100 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17103 /* Push NVME buffers with sgl posted to the available list */
17104 lpfc_io_buf_replenish(phba, &nvme_nblist);
17110 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
17111 * @phba: pointer to lpfc_hba struct that the frame was received on
17112 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17114 * This function checks the fields in the @fc_hdr to see if the FC frame is a
17115 * valid type of frame that the LPFC driver will handle. This function will
17116 * return a zero if the frame is a valid frame or a non zero value when the
17117 * frame does not pass the check.
17120 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
17122 /* make rctl_names static to save stack space */
17123 struct fc_vft_header *fc_vft_hdr;
17124 uint32_t *header = (uint32_t *) fc_hdr;
17126 #define FC_RCTL_MDS_DIAGS 0xF4
17128 switch (fc_hdr->fh_r_ctl) {
17129 case FC_RCTL_DD_UNCAT: /* uncategorized information */
17130 case FC_RCTL_DD_SOL_DATA: /* solicited data */
17131 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
17132 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
17133 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
17134 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
17135 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
17136 case FC_RCTL_DD_CMD_STATUS: /* command status */
17137 case FC_RCTL_ELS_REQ: /* extended link services request */
17138 case FC_RCTL_ELS_REP: /* extended link services reply */
17139 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
17140 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
17141 case FC_RCTL_BA_NOP: /* basic link service NOP */
17142 case FC_RCTL_BA_ABTS: /* basic link service abort */
17143 case FC_RCTL_BA_RMC: /* remove connection */
17144 case FC_RCTL_BA_ACC: /* basic accept */
17145 case FC_RCTL_BA_RJT: /* basic reject */
17146 case FC_RCTL_BA_PRMT:
17147 case FC_RCTL_ACK_1: /* acknowledge_1 */
17148 case FC_RCTL_ACK_0: /* acknowledge_0 */
17149 case FC_RCTL_P_RJT: /* port reject */
17150 case FC_RCTL_F_RJT: /* fabric reject */
17151 case FC_RCTL_P_BSY: /* port busy */
17152 case FC_RCTL_F_BSY: /* fabric busy to data frame */
17153 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
17154 case FC_RCTL_LCR: /* link credit reset */
17155 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
17156 case FC_RCTL_END: /* end */
17158 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
17159 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17160 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17161 return lpfc_fc_frame_check(phba, fc_hdr);
17166 switch (fc_hdr->fh_type) {
17179 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
17180 "2538 Received frame rctl:x%x, type:x%x, "
17181 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
17182 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17183 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17184 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17185 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17186 be32_to_cpu(header[6]));
17189 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
17190 "2539 Dropped frame rctl:x%x type:x%x\n",
17191 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17196 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17197 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17199 * This function processes the FC header to retrieve the VFI from the VF
17200 * header, if one exists. This function will return the VFI if one exists
17201 * or 0 if no VSAN Header exists.
17204 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17206 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17208 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17210 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17214 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17215 * @phba: Pointer to the HBA structure to search for the vport on
17216 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17217 * @fcfi: The FC Fabric ID that the frame came from
17219 * This function searches the @phba for a vport that matches the content of the
17220 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17221 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17222 * returns the matching vport pointer or NULL if unable to match frame to a
17225 static struct lpfc_vport *
17226 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17227 uint16_t fcfi, uint32_t did)
17229 struct lpfc_vport **vports;
17230 struct lpfc_vport *vport = NULL;
17233 if (did == Fabric_DID)
17234 return phba->pport;
17235 if ((phba->pport->fc_flag & FC_PT2PT) &&
17236 !(phba->link_state == LPFC_HBA_READY))
17237 return phba->pport;
17239 vports = lpfc_create_vport_work_array(phba);
17240 if (vports != NULL) {
17241 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17242 if (phba->fcf.fcfi == fcfi &&
17243 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17244 vports[i]->fc_myDID == did) {
17250 lpfc_destroy_vport_work_array(phba, vports);
17255 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17256 * @vport: The vport to work on.
17258 * This function updates the receive sequence time stamp for this vport. The
17259 * receive sequence time stamp indicates the time that the last frame of the
17260 * the sequence that has been idle for the longest amount of time was received.
17261 * the driver uses this time stamp to indicate if any received sequences have
17265 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17267 struct lpfc_dmabuf *h_buf;
17268 struct hbq_dmabuf *dmabuf = NULL;
17270 /* get the oldest sequence on the rcv list */
17271 h_buf = list_get_first(&vport->rcv_buffer_list,
17272 struct lpfc_dmabuf, list);
17275 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17276 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17280 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17281 * @vport: The vport that the received sequences were sent to.
17283 * This function cleans up all outstanding received sequences. This is called
17284 * by the driver when a link event or user action invalidates all the received
17288 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17290 struct lpfc_dmabuf *h_buf, *hnext;
17291 struct lpfc_dmabuf *d_buf, *dnext;
17292 struct hbq_dmabuf *dmabuf = NULL;
17294 /* start with the oldest sequence on the rcv list */
17295 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17296 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17297 list_del_init(&dmabuf->hbuf.list);
17298 list_for_each_entry_safe(d_buf, dnext,
17299 &dmabuf->dbuf.list, list) {
17300 list_del_init(&d_buf->list);
17301 lpfc_in_buf_free(vport->phba, d_buf);
17303 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17308 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17309 * @vport: The vport that the received sequences were sent to.
17311 * This function determines whether any received sequences have timed out by
17312 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17313 * indicates that there is at least one timed out sequence this routine will
17314 * go through the received sequences one at a time from most inactive to most
17315 * active to determine which ones need to be cleaned up. Once it has determined
17316 * that a sequence needs to be cleaned up it will simply free up the resources
17317 * without sending an abort.
17320 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17322 struct lpfc_dmabuf *h_buf, *hnext;
17323 struct lpfc_dmabuf *d_buf, *dnext;
17324 struct hbq_dmabuf *dmabuf = NULL;
17325 unsigned long timeout;
17326 int abort_count = 0;
17328 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17329 vport->rcv_buffer_time_stamp);
17330 if (list_empty(&vport->rcv_buffer_list) ||
17331 time_before(jiffies, timeout))
17333 /* start with the oldest sequence on the rcv list */
17334 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17335 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17336 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17337 dmabuf->time_stamp);
17338 if (time_before(jiffies, timeout))
17341 list_del_init(&dmabuf->hbuf.list);
17342 list_for_each_entry_safe(d_buf, dnext,
17343 &dmabuf->dbuf.list, list) {
17344 list_del_init(&d_buf->list);
17345 lpfc_in_buf_free(vport->phba, d_buf);
17347 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17350 lpfc_update_rcv_time_stamp(vport);
17354 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17355 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17357 * This function searches through the existing incomplete sequences that have
17358 * been sent to this @vport. If the frame matches one of the incomplete
17359 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17360 * make up that sequence. If no sequence is found that matches this frame then
17361 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17362 * This function returns a pointer to the first dmabuf in the sequence list that
17363 * the frame was linked to.
17365 static struct hbq_dmabuf *
17366 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17368 struct fc_frame_header *new_hdr;
17369 struct fc_frame_header *temp_hdr;
17370 struct lpfc_dmabuf *d_buf;
17371 struct lpfc_dmabuf *h_buf;
17372 struct hbq_dmabuf *seq_dmabuf = NULL;
17373 struct hbq_dmabuf *temp_dmabuf = NULL;
17376 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17377 dmabuf->time_stamp = jiffies;
17378 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17380 /* Use the hdr_buf to find the sequence that this frame belongs to */
17381 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17382 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17383 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17384 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17385 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17387 /* found a pending sequence that matches this frame */
17388 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17393 * This indicates first frame received for this sequence.
17394 * Queue the buffer on the vport's rcv_buffer_list.
17396 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17397 lpfc_update_rcv_time_stamp(vport);
17400 temp_hdr = seq_dmabuf->hbuf.virt;
17401 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17402 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17403 list_del_init(&seq_dmabuf->hbuf.list);
17404 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17405 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17406 lpfc_update_rcv_time_stamp(vport);
17409 /* move this sequence to the tail to indicate a young sequence */
17410 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17411 seq_dmabuf->time_stamp = jiffies;
17412 lpfc_update_rcv_time_stamp(vport);
17413 if (list_empty(&seq_dmabuf->dbuf.list)) {
17414 temp_hdr = dmabuf->hbuf.virt;
17415 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17418 /* find the correct place in the sequence to insert this frame */
17419 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17421 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17422 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17424 * If the frame's sequence count is greater than the frame on
17425 * the list then insert the frame right after this frame
17427 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17428 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17429 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
17434 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17436 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
17445 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17446 * @vport: pointer to a vitural port
17447 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17449 * This function tries to abort from the partially assembed sequence, described
17450 * by the information from basic abbort @dmabuf. It checks to see whether such
17451 * partially assembled sequence held by the driver. If so, it shall free up all
17452 * the frames from the partially assembled sequence.
17455 * true -- if there is matching partially assembled sequence present and all
17456 * the frames freed with the sequence;
17457 * false -- if there is no matching partially assembled sequence present so
17458 * nothing got aborted in the lower layer driver
17461 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17462 struct hbq_dmabuf *dmabuf)
17464 struct fc_frame_header *new_hdr;
17465 struct fc_frame_header *temp_hdr;
17466 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17467 struct hbq_dmabuf *seq_dmabuf = NULL;
17469 /* Use the hdr_buf to find the sequence that matches this frame */
17470 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17471 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17472 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17473 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17474 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17475 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17476 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17477 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17479 /* found a pending sequence that matches this frame */
17480 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17484 /* Free up all the frames from the partially assembled sequence */
17486 list_for_each_entry_safe(d_buf, n_buf,
17487 &seq_dmabuf->dbuf.list, list) {
17488 list_del_init(&d_buf->list);
17489 lpfc_in_buf_free(vport->phba, d_buf);
17497 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17498 * @vport: pointer to a vitural port
17499 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17501 * This function tries to abort from the assembed sequence from upper level
17502 * protocol, described by the information from basic abbort @dmabuf. It
17503 * checks to see whether such pending context exists at upper level protocol.
17504 * If so, it shall clean up the pending context.
17507 * true -- if there is matching pending context of the sequence cleaned
17509 * false -- if there is no matching pending context of the sequence present
17513 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17515 struct lpfc_hba *phba = vport->phba;
17518 /* Accepting abort at ulp with SLI4 only */
17519 if (phba->sli_rev < LPFC_SLI_REV4)
17522 /* Register all caring upper level protocols to attend abort */
17523 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17531 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
17532 * @phba: Pointer to HBA context object.
17533 * @cmd_iocbq: pointer to the command iocbq structure.
17534 * @rsp_iocbq: pointer to the response iocbq structure.
17536 * This function handles the sequence abort response iocb command complete
17537 * event. It properly releases the memory allocated to the sequence abort
17541 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
17542 struct lpfc_iocbq *cmd_iocbq,
17543 struct lpfc_iocbq *rsp_iocbq)
17545 struct lpfc_nodelist *ndlp;
17548 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17549 lpfc_nlp_put(ndlp);
17550 lpfc_nlp_not_used(ndlp);
17551 lpfc_sli_release_iocbq(phba, cmd_iocbq);
17554 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17555 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17556 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17557 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17558 rsp_iocbq->iocb.ulpStatus,
17559 rsp_iocbq->iocb.un.ulpWord[4]);
17563 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17564 * @phba: Pointer to HBA context object.
17565 * @xri: xri id in transaction.
17567 * This function validates the xri maps to the known range of XRIs allocated an
17568 * used by the driver.
17571 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17576 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17577 if (xri == phba->sli4_hba.xri_ids[i])
17584 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
17585 * @phba: Pointer to HBA context object.
17586 * @fc_hdr: pointer to a FC frame header.
17588 * This function sends a basic response to a previous unsol sequence abort
17589 * event after aborting the sequence handling.
17592 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17593 struct fc_frame_header *fc_hdr, bool aborted)
17595 struct lpfc_hba *phba = vport->phba;
17596 struct lpfc_iocbq *ctiocb = NULL;
17597 struct lpfc_nodelist *ndlp;
17598 uint16_t oxid, rxid, xri, lxri;
17599 uint32_t sid, fctl;
17603 if (!lpfc_is_link_up(phba))
17606 sid = sli4_sid_from_fc_hdr(fc_hdr);
17607 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17608 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17610 ndlp = lpfc_findnode_did(vport, sid);
17612 ndlp = lpfc_nlp_init(vport, sid);
17614 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17615 "1268 Failed to allocate ndlp for "
17616 "oxid:x%x SID:x%x\n", oxid, sid);
17619 /* Put ndlp onto pport node list */
17620 lpfc_enqueue_node(vport, ndlp);
17621 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17622 /* re-setup ndlp without removing from node list */
17623 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17625 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17626 "3275 Failed to active ndlp found "
17627 "for oxid:x%x SID:x%x\n", oxid, sid);
17632 /* Allocate buffer for rsp iocb */
17633 ctiocb = lpfc_sli_get_iocbq(phba);
17637 /* Extract the F_CTL field from FC_HDR */
17638 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17640 icmd = &ctiocb->iocb;
17641 icmd->un.xseq64.bdl.bdeSize = 0;
17642 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17643 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17644 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17645 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17647 /* Fill in the rest of iocb fields */
17648 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17649 icmd->ulpBdeCount = 0;
17651 icmd->ulpClass = CLASS3;
17652 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17653 ctiocb->context1 = lpfc_nlp_get(ndlp);
17655 ctiocb->vport = phba->pport;
17656 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17657 ctiocb->sli4_lxritag = NO_XRI;
17658 ctiocb->sli4_xritag = NO_XRI;
17660 if (fctl & FC_FC_EX_CTX)
17661 /* Exchange responder sent the abort so we
17667 lxri = lpfc_sli4_xri_inrange(phba, xri);
17668 if (lxri != NO_XRI)
17669 lpfc_set_rrq_active(phba, ndlp, lxri,
17670 (xri == oxid) ? rxid : oxid, 0);
17671 /* For BA_ABTS from exchange responder, if the logical xri with
17672 * the oxid maps to the FCP XRI range, the port no longer has
17673 * that exchange context, send a BLS_RJT. Override the IOCB for
17676 if ((fctl & FC_FC_EX_CTX) &&
17677 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17678 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17679 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17680 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17681 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17684 /* If BA_ABTS failed to abort a partially assembled receive sequence,
17685 * the driver no longer has that exchange, send a BLS_RJT. Override
17686 * the IOCB for a BA_RJT.
17688 if (aborted == false) {
17689 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17690 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17691 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17692 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17695 if (fctl & FC_FC_EX_CTX) {
17696 /* ABTS sent by responder to CT exchange, construction
17697 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17698 * field and RX_ID from ABTS for RX_ID field.
17700 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
17702 /* ABTS sent by initiator to CT exchange, construction
17703 * of BA_ACC will need to allocate a new XRI as for the
17706 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
17708 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
17709 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
17711 /* Xmit CT abts response on exchange <xid> */
17712 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17713 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17714 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
17716 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17717 if (rc == IOCB_ERROR) {
17718 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17719 "2925 Failed to issue CT ABTS RSP x%x on "
17720 "xri x%x, Data x%x\n",
17721 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17723 lpfc_nlp_put(ndlp);
17724 ctiocb->context1 = NULL;
17725 lpfc_sli_release_iocbq(phba, ctiocb);
17730 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17731 * @vport: Pointer to the vport on which this sequence was received
17732 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17734 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17735 * receive sequence is only partially assembed by the driver, it shall abort
17736 * the partially assembled frames for the sequence. Otherwise, if the
17737 * unsolicited receive sequence has been completely assembled and passed to
17738 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
17739 * unsolicited sequence has been aborted. After that, it will issue a basic
17740 * accept to accept the abort.
17743 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17744 struct hbq_dmabuf *dmabuf)
17746 struct lpfc_hba *phba = vport->phba;
17747 struct fc_frame_header fc_hdr;
17751 /* Make a copy of fc_hdr before the dmabuf being released */
17752 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
17753 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
17755 if (fctl & FC_FC_EX_CTX) {
17756 /* ABTS by responder to exchange, no cleanup needed */
17759 /* ABTS by initiator to exchange, need to do cleanup */
17760 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17761 if (aborted == false)
17762 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
17764 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17766 if (phba->nvmet_support) {
17767 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17771 /* Respond with BA_ACC or BA_RJT accordingly */
17772 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
17776 * lpfc_seq_complete - Indicates if a sequence is complete
17777 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17779 * This function checks the sequence, starting with the frame described by
17780 * @dmabuf, to see if all the frames associated with this sequence are present.
17781 * the frames associated with this sequence are linked to the @dmabuf using the
17782 * dbuf list. This function looks for two major things. 1) That the first frame
17783 * has a sequence count of zero. 2) There is a frame with last frame of sequence
17784 * set. 3) That there are no holes in the sequence count. The function will
17785 * return 1 when the sequence is complete, otherwise it will return 0.
17788 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17790 struct fc_frame_header *hdr;
17791 struct lpfc_dmabuf *d_buf;
17792 struct hbq_dmabuf *seq_dmabuf;
17796 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17797 /* make sure first fame of sequence has a sequence count of zero */
17798 if (hdr->fh_seq_cnt != seq_count)
17800 fctl = (hdr->fh_f_ctl[0] << 16 |
17801 hdr->fh_f_ctl[1] << 8 |
17803 /* If last frame of sequence we can return success. */
17804 if (fctl & FC_FC_END_SEQ)
17806 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17807 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17808 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17809 /* If there is a hole in the sequence count then fail. */
17810 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
17812 fctl = (hdr->fh_f_ctl[0] << 16 |
17813 hdr->fh_f_ctl[1] << 8 |
17815 /* If last frame of sequence we can return success. */
17816 if (fctl & FC_FC_END_SEQ)
17823 * lpfc_prep_seq - Prep sequence for ULP processing
17824 * @vport: Pointer to the vport on which this sequence was received
17825 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17827 * This function takes a sequence, described by a list of frames, and creates
17828 * a list of iocbq structures to describe the sequence. This iocbq list will be
17829 * used to issue to the generic unsolicited sequence handler. This routine
17830 * returns a pointer to the first iocbq in the list. If the function is unable
17831 * to allocate an iocbq then it throw out the received frames that were not
17832 * able to be described and return a pointer to the first iocbq. If unable to
17833 * allocate any iocbqs (including the first) this function will return NULL.
17835 static struct lpfc_iocbq *
17836 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17838 struct hbq_dmabuf *hbq_buf;
17839 struct lpfc_dmabuf *d_buf, *n_buf;
17840 struct lpfc_iocbq *first_iocbq, *iocbq;
17841 struct fc_frame_header *fc_hdr;
17843 uint32_t len, tot_len;
17844 struct ulp_bde64 *pbde;
17846 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17847 /* remove from receive buffer list */
17848 list_del_init(&seq_dmabuf->hbuf.list);
17849 lpfc_update_rcv_time_stamp(vport);
17850 /* get the Remote Port's SID */
17851 sid = sli4_sid_from_fc_hdr(fc_hdr);
17853 /* Get an iocbq struct to fill in. */
17854 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17856 /* Initialize the first IOCB. */
17857 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
17858 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
17859 first_iocbq->vport = vport;
17861 /* Check FC Header to see what TYPE of frame we are rcv'ing */
17862 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17863 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17864 first_iocbq->iocb.un.rcvels.parmRo =
17865 sli4_did_from_fc_hdr(fc_hdr);
17866 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17868 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
17869 first_iocbq->iocb.ulpContext = NO_XRI;
17870 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17871 be16_to_cpu(fc_hdr->fh_ox_id);
17872 /* iocbq is prepped for internal consumption. Physical vpi. */
17873 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17874 vport->phba->vpi_ids[vport->vpi];
17875 /* put the first buffer into the first IOCBq */
17876 tot_len = bf_get(lpfc_rcqe_length,
17877 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17879 first_iocbq->context2 = &seq_dmabuf->dbuf;
17880 first_iocbq->context3 = NULL;
17881 first_iocbq->iocb.ulpBdeCount = 1;
17882 if (tot_len > LPFC_DATA_BUF_SIZE)
17883 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17884 LPFC_DATA_BUF_SIZE;
17886 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17888 first_iocbq->iocb.un.rcvels.remoteID = sid;
17890 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17892 iocbq = first_iocbq;
17894 * Each IOCBq can have two Buffers assigned, so go through the list
17895 * of buffers for this sequence and save two buffers in each IOCBq
17897 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17899 lpfc_in_buf_free(vport->phba, d_buf);
17902 if (!iocbq->context3) {
17903 iocbq->context3 = d_buf;
17904 iocbq->iocb.ulpBdeCount++;
17905 /* We need to get the size out of the right CQE */
17906 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17907 len = bf_get(lpfc_rcqe_length,
17908 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17909 pbde = (struct ulp_bde64 *)
17910 &iocbq->iocb.unsli3.sli3Words[4];
17911 if (len > LPFC_DATA_BUF_SIZE)
17912 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17914 pbde->tus.f.bdeSize = len;
17916 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17919 iocbq = lpfc_sli_get_iocbq(vport->phba);
17922 first_iocbq->iocb.ulpStatus =
17923 IOSTAT_FCP_RSP_ERROR;
17924 first_iocbq->iocb.un.ulpWord[4] =
17925 IOERR_NO_RESOURCES;
17927 lpfc_in_buf_free(vport->phba, d_buf);
17930 /* We need to get the size out of the right CQE */
17931 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17932 len = bf_get(lpfc_rcqe_length,
17933 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17934 iocbq->context2 = d_buf;
17935 iocbq->context3 = NULL;
17936 iocbq->iocb.ulpBdeCount = 1;
17937 if (len > LPFC_DATA_BUF_SIZE)
17938 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17939 LPFC_DATA_BUF_SIZE;
17941 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
17944 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17946 iocbq->iocb.un.rcvels.remoteID = sid;
17947 list_add_tail(&iocbq->list, &first_iocbq->list);
17950 /* Free the sequence's header buffer */
17952 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
17954 return first_iocbq;
17958 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17959 struct hbq_dmabuf *seq_dmabuf)
17961 struct fc_frame_header *fc_hdr;
17962 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17963 struct lpfc_hba *phba = vport->phba;
17965 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17966 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17968 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17969 "2707 Ring %d handler: Failed to allocate "
17970 "iocb Rctl x%x Type x%x received\n",
17972 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17975 if (!lpfc_complete_unsol_iocb(phba,
17976 phba->sli4_hba.els_wq->pring,
17977 iocbq, fc_hdr->fh_r_ctl,
17979 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17980 "2540 Ring %d handler: unexpected Rctl "
17981 "x%x Type x%x received\n",
17983 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17985 /* Free iocb created in lpfc_prep_seq */
17986 list_for_each_entry_safe(curr_iocb, next_iocb,
17987 &iocbq->list, list) {
17988 list_del_init(&curr_iocb->list);
17989 lpfc_sli_release_iocbq(phba, curr_iocb);
17991 lpfc_sli_release_iocbq(phba, iocbq);
17995 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17996 struct lpfc_iocbq *rspiocb)
17998 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
18000 if (pcmd && pcmd->virt)
18001 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18003 lpfc_sli_release_iocbq(phba, cmdiocb);
18004 lpfc_drain_txq(phba);
18008 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
18009 struct hbq_dmabuf *dmabuf)
18011 struct fc_frame_header *fc_hdr;
18012 struct lpfc_hba *phba = vport->phba;
18013 struct lpfc_iocbq *iocbq = NULL;
18014 union lpfc_wqe *wqe;
18015 struct lpfc_dmabuf *pcmd = NULL;
18016 uint32_t frame_len;
18018 unsigned long iflags;
18020 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18021 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
18023 /* Send the received frame back */
18024 iocbq = lpfc_sli_get_iocbq(phba);
18026 /* Queue cq event and wakeup worker thread to process it */
18027 spin_lock_irqsave(&phba->hbalock, iflags);
18028 list_add_tail(&dmabuf->cq_event.list,
18029 &phba->sli4_hba.sp_queue_event);
18030 phba->hba_flag |= HBA_SP_QUEUE_EVT;
18031 spin_unlock_irqrestore(&phba->hbalock, iflags);
18032 lpfc_worker_wake_up(phba);
18036 /* Allocate buffer for command payload */
18037 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
18039 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
18041 if (!pcmd || !pcmd->virt)
18044 INIT_LIST_HEAD(&pcmd->list);
18046 /* copyin the payload */
18047 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
18049 /* fill in BDE's for command */
18050 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
18051 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
18052 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
18053 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
18055 iocbq->context2 = pcmd;
18056 iocbq->vport = vport;
18057 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
18058 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
18061 * Setup rest of the iocb as though it were a WQE
18062 * Build the SEND_FRAME WQE
18064 wqe = (union lpfc_wqe *)&iocbq->iocb;
18066 wqe->send_frame.frame_len = frame_len;
18067 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
18068 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
18069 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
18070 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
18071 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
18072 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
18074 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
18075 iocbq->iocb.ulpLe = 1;
18076 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
18077 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
18078 if (rc == IOCB_ERROR)
18081 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18085 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18086 "2023 Unable to process MDS loopback frame\n");
18087 if (pcmd && pcmd->virt)
18088 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18091 lpfc_sli_release_iocbq(phba, iocbq);
18092 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18096 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
18097 * @phba: Pointer to HBA context object.
18099 * This function is called with no lock held. This function processes all
18100 * the received buffers and gives it to upper layers when a received buffer
18101 * indicates that it is the final frame in the sequence. The interrupt
18102 * service routine processes received buffers at interrupt contexts.
18103 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
18104 * appropriate receive function when the final frame in a sequence is received.
18107 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
18108 struct hbq_dmabuf *dmabuf)
18110 struct hbq_dmabuf *seq_dmabuf;
18111 struct fc_frame_header *fc_hdr;
18112 struct lpfc_vport *vport;
18116 /* Process each received buffer */
18117 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18119 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18120 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18121 vport = phba->pport;
18122 /* Handle MDS Loopback frames */
18123 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18127 /* check to see if this a valid type of frame */
18128 if (lpfc_fc_frame_check(phba, fc_hdr)) {
18129 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18133 if ((bf_get(lpfc_cqe_code,
18134 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
18135 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
18136 &dmabuf->cq_event.cqe.rcqe_cmpl);
18138 fcfi = bf_get(lpfc_rcqe_fcf_id,
18139 &dmabuf->cq_event.cqe.rcqe_cmpl);
18141 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
18142 vport = phba->pport;
18143 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18144 "2023 MDS Loopback %d bytes\n",
18145 bf_get(lpfc_rcqe_length,
18146 &dmabuf->cq_event.cqe.rcqe_cmpl));
18147 /* Handle MDS Loopback frames */
18148 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18152 /* d_id this frame is directed to */
18153 did = sli4_did_from_fc_hdr(fc_hdr);
18155 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
18157 /* throw out the frame */
18158 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18162 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
18163 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18164 (did != Fabric_DID)) {
18166 * Throw out the frame if we are not pt2pt.
18167 * The pt2pt protocol allows for discovery frames
18168 * to be received without a registered VPI.
18170 if (!(vport->fc_flag & FC_PT2PT) ||
18171 (phba->link_state == LPFC_HBA_READY)) {
18172 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18177 /* Handle the basic abort sequence (BA_ABTS) event */
18178 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18179 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18183 /* Link this frame */
18184 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18186 /* unable to add frame to vport - throw it out */
18187 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18190 /* If not last frame in sequence continue processing frames. */
18191 if (!lpfc_seq_complete(seq_dmabuf))
18194 /* Send the complete sequence to the upper layer protocol */
18195 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
18199 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18200 * @phba: pointer to lpfc hba data structure.
18202 * This routine is invoked to post rpi header templates to the
18203 * HBA consistent with the SLI-4 interface spec. This routine
18204 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18205 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18207 * This routine does not require any locks. It's usage is expected
18208 * to be driver load or reset recovery when the driver is
18213 * -EIO - The mailbox failed to complete successfully.
18214 * When this error occurs, the driver is not guaranteed
18215 * to have any rpi regions posted to the device and
18216 * must either attempt to repost the regions or take a
18220 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18222 struct lpfc_rpi_hdr *rpi_page;
18226 /* SLI4 ports that support extents do not require RPI headers. */
18227 if (!phba->sli4_hba.rpi_hdrs_in_use)
18229 if (phba->sli4_hba.extents_in_use)
18232 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18234 * Assign the rpi headers a physical rpi only if the driver
18235 * has not initialized those resources. A port reset only
18236 * needs the headers posted.
18238 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18240 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18242 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18243 if (rc != MBX_SUCCESS) {
18244 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18245 "2008 Error %d posting all rpi "
18253 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18254 LPFC_RPI_RSRC_RDY);
18259 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18260 * @phba: pointer to lpfc hba data structure.
18261 * @rpi_page: pointer to the rpi memory region.
18263 * This routine is invoked to post a single rpi header to the
18264 * HBA consistent with the SLI-4 interface spec. This memory region
18265 * maps up to 64 rpi context regions.
18269 * -ENOMEM - No available memory
18270 * -EIO - The mailbox failed to complete successfully.
18273 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18275 LPFC_MBOXQ_t *mboxq;
18276 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18278 uint32_t shdr_status, shdr_add_status;
18279 union lpfc_sli4_cfg_shdr *shdr;
18281 /* SLI4 ports that support extents do not require RPI headers. */
18282 if (!phba->sli4_hba.rpi_hdrs_in_use)
18284 if (phba->sli4_hba.extents_in_use)
18287 /* The port is notified of the header region via a mailbox command. */
18288 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18290 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18291 "2001 Unable to allocate memory for issuing "
18292 "SLI_CONFIG_SPECIAL mailbox command\n");
18296 /* Post all rpi memory regions to the port. */
18297 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18298 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18299 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18300 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18301 sizeof(struct lpfc_sli4_cfg_mhdr),
18302 LPFC_SLI4_MBX_EMBED);
18305 /* Post the physical rpi to the port for this rpi header. */
18306 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18307 rpi_page->start_rpi);
18308 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18309 hdr_tmpl, rpi_page->page_count);
18311 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18312 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18313 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18314 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18315 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18316 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18317 if (rc != MBX_TIMEOUT)
18318 mempool_free(mboxq, phba->mbox_mem_pool);
18319 if (shdr_status || shdr_add_status || rc) {
18320 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18321 "2514 POST_RPI_HDR mailbox failed with "
18322 "status x%x add_status x%x, mbx status x%x\n",
18323 shdr_status, shdr_add_status, rc);
18327 * The next_rpi stores the next logical module-64 rpi value used
18328 * to post physical rpis in subsequent rpi postings.
18330 spin_lock_irq(&phba->hbalock);
18331 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18332 spin_unlock_irq(&phba->hbalock);
18338 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18339 * @phba: pointer to lpfc hba data structure.
18341 * This routine is invoked to post rpi header templates to the
18342 * HBA consistent with the SLI-4 interface spec. This routine
18343 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18344 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18347 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
18348 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18351 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18354 uint16_t max_rpi, rpi_limit;
18355 uint16_t rpi_remaining, lrpi = 0;
18356 struct lpfc_rpi_hdr *rpi_hdr;
18357 unsigned long iflag;
18360 * Fetch the next logical rpi. Because this index is logical,
18361 * the driver starts at 0 each time.
18363 spin_lock_irqsave(&phba->hbalock, iflag);
18364 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18365 rpi_limit = phba->sli4_hba.next_rpi;
18367 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18368 if (rpi >= rpi_limit)
18369 rpi = LPFC_RPI_ALLOC_ERROR;
18371 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18372 phba->sli4_hba.max_cfg_param.rpi_used++;
18373 phba->sli4_hba.rpi_count++;
18375 lpfc_printf_log(phba, KERN_INFO,
18376 LOG_NODE | LOG_DISCOVERY,
18377 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
18378 (int) rpi, max_rpi, rpi_limit);
18381 * Don't try to allocate more rpi header regions if the device limit
18382 * has been exhausted.
18384 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18385 (phba->sli4_hba.rpi_count >= max_rpi)) {
18386 spin_unlock_irqrestore(&phba->hbalock, iflag);
18391 * RPI header postings are not required for SLI4 ports capable of
18394 if (!phba->sli4_hba.rpi_hdrs_in_use) {
18395 spin_unlock_irqrestore(&phba->hbalock, iflag);
18400 * If the driver is running low on rpi resources, allocate another
18401 * page now. Note that the next_rpi value is used because
18402 * it represents how many are actually in use whereas max_rpi notes
18403 * how many are supported max by the device.
18405 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18406 spin_unlock_irqrestore(&phba->hbalock, iflag);
18407 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18408 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18410 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18411 "2002 Error Could not grow rpi "
18414 lrpi = rpi_hdr->start_rpi;
18415 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18416 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18424 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18425 * @phba: pointer to lpfc hba data structure.
18427 * This routine is invoked to release an rpi to the pool of
18428 * available rpis maintained by the driver.
18431 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18434 * if the rpi value indicates a prior unreg has already
18435 * been done, skip the unreg.
18437 if (rpi == LPFC_RPI_ALLOC_ERROR)
18440 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18441 phba->sli4_hba.rpi_count--;
18442 phba->sli4_hba.max_cfg_param.rpi_used--;
18444 lpfc_printf_log(phba, KERN_INFO,
18445 LOG_NODE | LOG_DISCOVERY,
18446 "2016 rpi %x not inuse\n",
18452 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18453 * @phba: pointer to lpfc hba data structure.
18455 * This routine is invoked to release an rpi to the pool of
18456 * available rpis maintained by the driver.
18459 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18461 spin_lock_irq(&phba->hbalock);
18462 __lpfc_sli4_free_rpi(phba, rpi);
18463 spin_unlock_irq(&phba->hbalock);
18467 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18468 * @phba: pointer to lpfc hba data structure.
18470 * This routine is invoked to remove the memory region that
18471 * provided rpi via a bitmask.
18474 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18476 kfree(phba->sli4_hba.rpi_bmask);
18477 kfree(phba->sli4_hba.rpi_ids);
18478 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
18482 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18483 * @phba: pointer to lpfc hba data structure.
18485 * This routine is invoked to remove the memory region that
18486 * provided rpi via a bitmask.
18489 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18490 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
18492 LPFC_MBOXQ_t *mboxq;
18493 struct lpfc_hba *phba = ndlp->phba;
18496 /* The port is notified of the header region via a mailbox command. */
18497 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18501 /* Post all rpi memory regions to the port. */
18502 lpfc_resume_rpi(mboxq, ndlp);
18504 mboxq->mbox_cmpl = cmpl;
18505 mboxq->ctx_buf = arg;
18506 mboxq->ctx_ndlp = ndlp;
18508 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18509 mboxq->vport = ndlp->vport;
18510 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18511 if (rc == MBX_NOT_FINISHED) {
18512 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18513 "2010 Resume RPI Mailbox failed "
18514 "status %d, mbxStatus x%x\n", rc,
18515 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18516 mempool_free(mboxq, phba->mbox_mem_pool);
18523 * lpfc_sli4_init_vpi - Initialize a vpi with the port
18524 * @vport: Pointer to the vport for which the vpi is being initialized
18526 * This routine is invoked to activate a vpi with the port.
18530 * -Evalue otherwise
18533 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
18535 LPFC_MBOXQ_t *mboxq;
18537 int retval = MBX_SUCCESS;
18539 struct lpfc_hba *phba = vport->phba;
18540 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18543 lpfc_init_vpi(phba, mboxq, vport->vpi);
18544 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
18545 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
18546 if (rc != MBX_SUCCESS) {
18547 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
18548 "2022 INIT VPI Mailbox failed "
18549 "status %d, mbxStatus x%x\n", rc,
18550 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18553 if (rc != MBX_TIMEOUT)
18554 mempool_free(mboxq, vport->phba->mbox_mem_pool);
18560 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18561 * @phba: pointer to lpfc hba data structure.
18562 * @mboxq: Pointer to mailbox object.
18564 * This routine is invoked to manually add a single FCF record. The caller
18565 * must pass a completely initialized FCF_Record. This routine takes
18566 * care of the nonembedded mailbox operations.
18569 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18572 union lpfc_sli4_cfg_shdr *shdr;
18573 uint32_t shdr_status, shdr_add_status;
18575 virt_addr = mboxq->sge_array->addr[0];
18576 /* The IOCTL status is embedded in the mailbox subheader. */
18577 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18578 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18579 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18581 if ((shdr_status || shdr_add_status) &&
18582 (shdr_status != STATUS_FCF_IN_USE))
18583 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18584 "2558 ADD_FCF_RECORD mailbox failed with "
18585 "status x%x add_status x%x\n",
18586 shdr_status, shdr_add_status);
18588 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18592 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18593 * @phba: pointer to lpfc hba data structure.
18594 * @fcf_record: pointer to the initialized fcf record to add.
18596 * This routine is invoked to manually add a single FCF record. The caller
18597 * must pass a completely initialized FCF_Record. This routine takes
18598 * care of the nonembedded mailbox operations.
18601 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18604 LPFC_MBOXQ_t *mboxq;
18607 struct lpfc_mbx_sge sge;
18608 uint32_t alloc_len, req_len;
18611 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18613 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18614 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18618 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18621 /* Allocate DMA memory and set up the non-embedded mailbox command */
18622 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18623 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18624 req_len, LPFC_SLI4_MBX_NEMBED);
18625 if (alloc_len < req_len) {
18626 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18627 "2523 Allocated DMA memory size (x%x) is "
18628 "less than the requested DMA memory "
18629 "size (x%x)\n", alloc_len, req_len);
18630 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18635 * Get the first SGE entry from the non-embedded DMA memory. This
18636 * routine only uses a single SGE.
18638 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18639 virt_addr = mboxq->sge_array->addr[0];
18641 * Configure the FCF record for FCFI 0. This is the driver's
18642 * hardcoded default and gets used in nonFIP mode.
18644 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18645 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18646 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18649 * Copy the fcf_index and the FCF Record Data. The data starts after
18650 * the FCoE header plus word10. The data copy needs to be endian
18653 bytep += sizeof(uint32_t);
18654 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18655 mboxq->vport = phba->pport;
18656 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18657 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18658 if (rc == MBX_NOT_FINISHED) {
18659 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18660 "2515 ADD_FCF_RECORD mailbox failed with "
18661 "status 0x%x\n", rc);
18662 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18671 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18672 * @phba: pointer to lpfc hba data structure.
18673 * @fcf_record: pointer to the fcf record to write the default data.
18674 * @fcf_index: FCF table entry index.
18676 * This routine is invoked to build the driver's default FCF record. The
18677 * values used are hardcoded. This routine handles memory initialization.
18681 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18682 struct fcf_record *fcf_record,
18683 uint16_t fcf_index)
18685 memset(fcf_record, 0, sizeof(struct fcf_record));
18686 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18687 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18688 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18689 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18690 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18691 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18692 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18693 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18694 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18695 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18696 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18697 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18698 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
18699 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
18700 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18701 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18702 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18703 /* Set the VLAN bit map */
18704 if (phba->valid_vlan) {
18705 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18706 = 1 << (phba->vlan_id % 8);
18711 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
18712 * @phba: pointer to lpfc hba data structure.
18713 * @fcf_index: FCF table entry offset.
18715 * This routine is invoked to scan the entire FCF table by reading FCF
18716 * record and processing it one at a time starting from the @fcf_index
18717 * for initial FCF discovery or fast FCF failover rediscovery.
18719 * Return 0 if the mailbox command is submitted successfully, none 0
18723 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18726 LPFC_MBOXQ_t *mboxq;
18728 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
18729 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
18730 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18732 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18733 "2000 Failed to allocate mbox for "
18736 goto fail_fcf_scan;
18738 /* Construct the read FCF record mailbox command */
18739 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18742 goto fail_fcf_scan;
18744 /* Issue the mailbox command asynchronously */
18745 mboxq->vport = phba->pport;
18746 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
18748 spin_lock_irq(&phba->hbalock);
18749 phba->hba_flag |= FCF_TS_INPROG;
18750 spin_unlock_irq(&phba->hbalock);
18752 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18753 if (rc == MBX_NOT_FINISHED)
18756 /* Reset eligible FCF count for new scan */
18757 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
18758 phba->fcf.eligible_fcf_cnt = 0;
18764 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18765 /* FCF scan failed, clear FCF_TS_INPROG flag */
18766 spin_lock_irq(&phba->hbalock);
18767 phba->hba_flag &= ~FCF_TS_INPROG;
18768 spin_unlock_irq(&phba->hbalock);
18774 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
18775 * @phba: pointer to lpfc hba data structure.
18776 * @fcf_index: FCF table entry offset.
18778 * This routine is invoked to read an FCF record indicated by @fcf_index
18779 * and to use it for FLOGI roundrobin FCF failover.
18781 * Return 0 if the mailbox command is submitted successfully, none 0
18785 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18788 LPFC_MBOXQ_t *mboxq;
18790 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18792 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18793 "2763 Failed to allocate mbox for "
18796 goto fail_fcf_read;
18798 /* Construct the read FCF record mailbox command */
18799 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18802 goto fail_fcf_read;
18804 /* Issue the mailbox command asynchronously */
18805 mboxq->vport = phba->pport;
18806 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18807 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18808 if (rc == MBX_NOT_FINISHED)
18814 if (error && mboxq)
18815 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18820 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
18821 * @phba: pointer to lpfc hba data structure.
18822 * @fcf_index: FCF table entry offset.
18824 * This routine is invoked to read an FCF record indicated by @fcf_index to
18825 * determine whether it's eligible for FLOGI roundrobin failover list.
18827 * Return 0 if the mailbox command is submitted successfully, none 0
18831 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18834 LPFC_MBOXQ_t *mboxq;
18836 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18838 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18839 "2758 Failed to allocate mbox for "
18842 goto fail_fcf_read;
18844 /* Construct the read FCF record mailbox command */
18845 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18848 goto fail_fcf_read;
18850 /* Issue the mailbox command asynchronously */
18851 mboxq->vport = phba->pport;
18852 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18853 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18854 if (rc == MBX_NOT_FINISHED)
18860 if (error && mboxq)
18861 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18866 * lpfc_check_next_fcf_pri_level
18867 * phba pointer to the lpfc_hba struct for this port.
18868 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
18869 * routine when the rr_bmask is empty. The FCF indecies are put into the
18870 * rr_bmask based on their priority level. Starting from the highest priority
18871 * to the lowest. The most likely FCF candidate will be in the highest
18872 * priority group. When this routine is called it searches the fcf_pri list for
18873 * next lowest priority group and repopulates the rr_bmask with only those
18876 * 1=success 0=failure
18879 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18881 uint16_t next_fcf_pri;
18882 uint16_t last_index;
18883 struct lpfc_fcf_pri *fcf_pri;
18887 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18888 LPFC_SLI4_FCF_TBL_INDX_MAX);
18889 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18890 "3060 Last IDX %d\n", last_index);
18892 /* Verify the priority list has 2 or more entries */
18893 spin_lock_irq(&phba->hbalock);
18894 if (list_empty(&phba->fcf.fcf_pri_list) ||
18895 list_is_singular(&phba->fcf.fcf_pri_list)) {
18896 spin_unlock_irq(&phba->hbalock);
18897 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18898 "3061 Last IDX %d\n", last_index);
18899 return 0; /* Empty rr list */
18901 spin_unlock_irq(&phba->hbalock);
18905 * Clear the rr_bmask and set all of the bits that are at this
18908 memset(phba->fcf.fcf_rr_bmask, 0,
18909 sizeof(*phba->fcf.fcf_rr_bmask));
18910 spin_lock_irq(&phba->hbalock);
18911 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18912 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18915 * the 1st priority that has not FLOGI failed
18916 * will be the highest.
18919 next_fcf_pri = fcf_pri->fcf_rec.priority;
18920 spin_unlock_irq(&phba->hbalock);
18921 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18922 rc = lpfc_sli4_fcf_rr_index_set(phba,
18923 fcf_pri->fcf_rec.fcf_index);
18927 spin_lock_irq(&phba->hbalock);
18930 * if next_fcf_pri was not set above and the list is not empty then
18931 * we have failed flogis on all of them. So reset flogi failed
18932 * and start at the beginning.
18934 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18935 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18936 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18938 * the 1st priority that has not FLOGI failed
18939 * will be the highest.
18942 next_fcf_pri = fcf_pri->fcf_rec.priority;
18943 spin_unlock_irq(&phba->hbalock);
18944 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18945 rc = lpfc_sli4_fcf_rr_index_set(phba,
18946 fcf_pri->fcf_rec.fcf_index);
18950 spin_lock_irq(&phba->hbalock);
18954 spin_unlock_irq(&phba->hbalock);
18959 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
18960 * @phba: pointer to lpfc hba data structure.
18962 * This routine is to get the next eligible FCF record index in a round
18963 * robin fashion. If the next eligible FCF record index equals to the
18964 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
18965 * shall be returned, otherwise, the next eligible FCF record's index
18966 * shall be returned.
18969 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18971 uint16_t next_fcf_index;
18974 /* Search start from next bit of currently registered FCF index */
18975 next_fcf_index = phba->fcf.current_rec.fcf_indx;
18978 /* Determine the next fcf index to check */
18979 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
18980 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18981 LPFC_SLI4_FCF_TBL_INDX_MAX,
18984 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
18985 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18987 * If we have wrapped then we need to clear the bits that
18988 * have been tested so that we can detect when we should
18989 * change the priority level.
18991 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18992 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
18996 /* Check roundrobin failover list empty condition */
18997 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18998 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
19000 * If next fcf index is not found check if there are lower
19001 * Priority level fcf's in the fcf_priority list.
19002 * Set up the rr_bmask with all of the avaiable fcf bits
19003 * at that level and continue the selection process.
19005 if (lpfc_check_next_fcf_pri_level(phba))
19006 goto initial_priority;
19007 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
19008 "2844 No roundrobin failover FCF available\n");
19010 return LPFC_FCOE_FCF_NEXT_NONE;
19013 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
19014 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
19015 LPFC_FCF_FLOGI_FAILED) {
19016 if (list_is_singular(&phba->fcf.fcf_pri_list))
19017 return LPFC_FCOE_FCF_NEXT_NONE;
19019 goto next_priority;
19022 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19023 "2845 Get next roundrobin failover FCF (x%x)\n",
19026 return next_fcf_index;
19030 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
19031 * @phba: pointer to lpfc hba data structure.
19033 * This routine sets the FCF record index in to the eligible bmask for
19034 * roundrobin failover search. It checks to make sure that the index
19035 * does not go beyond the range of the driver allocated bmask dimension
19036 * before setting the bit.
19038 * Returns 0 if the index bit successfully set, otherwise, it returns
19042 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
19044 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19045 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19046 "2610 FCF (x%x) reached driver's book "
19047 "keeping dimension:x%x\n",
19048 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19051 /* Set the eligible FCF record index bmask */
19052 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19054 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19055 "2790 Set FCF (x%x) to roundrobin FCF failover "
19056 "bmask\n", fcf_index);
19062 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
19063 * @phba: pointer to lpfc hba data structure.
19065 * This routine clears the FCF record index from the eligible bmask for
19066 * roundrobin failover search. It checks to make sure that the index
19067 * does not go beyond the range of the driver allocated bmask dimension
19068 * before clearing the bit.
19071 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
19073 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
19074 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19075 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19076 "2762 FCF (x%x) reached driver's book "
19077 "keeping dimension:x%x\n",
19078 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19081 /* Clear the eligible FCF record index bmask */
19082 spin_lock_irq(&phba->hbalock);
19083 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
19085 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
19086 list_del_init(&fcf_pri->list);
19090 spin_unlock_irq(&phba->hbalock);
19091 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19093 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19094 "2791 Clear FCF (x%x) from roundrobin failover "
19095 "bmask\n", fcf_index);
19099 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
19100 * @phba: pointer to lpfc hba data structure.
19102 * This routine is the completion routine for the rediscover FCF table mailbox
19103 * command. If the mailbox command returned failure, it will try to stop the
19104 * FCF rediscover wait timer.
19107 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
19109 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19110 uint32_t shdr_status, shdr_add_status;
19112 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19114 shdr_status = bf_get(lpfc_mbox_hdr_status,
19115 &redisc_fcf->header.cfg_shdr.response);
19116 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19117 &redisc_fcf->header.cfg_shdr.response);
19118 if (shdr_status || shdr_add_status) {
19119 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19120 "2746 Requesting for FCF rediscovery failed "
19121 "status x%x add_status x%x\n",
19122 shdr_status, shdr_add_status);
19123 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
19124 spin_lock_irq(&phba->hbalock);
19125 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
19126 spin_unlock_irq(&phba->hbalock);
19128 * CVL event triggered FCF rediscover request failed,
19129 * last resort to re-try current registered FCF entry.
19131 lpfc_retry_pport_discovery(phba);
19133 spin_lock_irq(&phba->hbalock);
19134 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
19135 spin_unlock_irq(&phba->hbalock);
19137 * DEAD FCF event triggered FCF rediscover request
19138 * failed, last resort to fail over as a link down
19139 * to FCF registration.
19141 lpfc_sli4_fcf_dead_failthrough(phba);
19144 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19145 "2775 Start FCF rediscover quiescent timer\n");
19147 * Start FCF rediscovery wait timer for pending FCF
19148 * before rescan FCF record table.
19150 lpfc_fcf_redisc_wait_start_timer(phba);
19153 mempool_free(mbox, phba->mbox_mem_pool);
19157 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
19158 * @phba: pointer to lpfc hba data structure.
19160 * This routine is invoked to request for rediscovery of the entire FCF table
19164 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
19166 LPFC_MBOXQ_t *mbox;
19167 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19170 /* Cancel retry delay timers to all vports before FCF rediscover */
19171 lpfc_cancel_all_vport_retry_delay_timer(phba);
19173 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19175 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19176 "2745 Failed to allocate mbox for "
19177 "requesting FCF rediscover.\n");
19181 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19182 sizeof(struct lpfc_sli4_cfg_mhdr));
19183 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19184 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19185 length, LPFC_SLI4_MBX_EMBED);
19187 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19188 /* Set count to 0 for invalidating the entire FCF database */
19189 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19191 /* Issue the mailbox command asynchronously */
19192 mbox->vport = phba->pport;
19193 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19194 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19196 if (rc == MBX_NOT_FINISHED) {
19197 mempool_free(mbox, phba->mbox_mem_pool);
19204 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
19205 * @phba: pointer to lpfc hba data structure.
19207 * This function is the failover routine as a last resort to the FCF DEAD
19208 * event when driver failed to perform fast FCF failover.
19211 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19213 uint32_t link_state;
19216 * Last resort as FCF DEAD event failover will treat this as
19217 * a link down, but save the link state because we don't want
19218 * it to be changed to Link Down unless it is already down.
19220 link_state = phba->link_state;
19221 lpfc_linkdown(phba);
19222 phba->link_state = link_state;
19224 /* Unregister FCF if no devices connected to it */
19225 lpfc_unregister_unused_fcf(phba);
19229 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
19230 * @phba: pointer to lpfc hba data structure.
19231 * @rgn23_data: pointer to configure region 23 data.
19233 * This function gets SLI3 port configure region 23 data through memory dump
19234 * mailbox command. When it successfully retrieves data, the size of the data
19235 * will be returned, otherwise, 0 will be returned.
19238 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19240 LPFC_MBOXQ_t *pmb = NULL;
19242 uint32_t offset = 0;
19248 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19250 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19251 "2600 failed to allocate mailbox memory\n");
19257 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19258 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19260 if (rc != MBX_SUCCESS) {
19261 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19262 "2601 failed to read config "
19263 "region 23, rc 0x%x Status 0x%x\n",
19264 rc, mb->mbxStatus);
19265 mb->un.varDmp.word_cnt = 0;
19268 * dump mem may return a zero when finished or we got a
19269 * mailbox error, either way we are done.
19271 if (mb->un.varDmp.word_cnt == 0)
19273 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19274 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19276 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19277 rgn23_data + offset,
19278 mb->un.varDmp.word_cnt);
19279 offset += mb->un.varDmp.word_cnt;
19280 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19282 mempool_free(pmb, phba->mbox_mem_pool);
19287 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19288 * @phba: pointer to lpfc hba data structure.
19289 * @rgn23_data: pointer to configure region 23 data.
19291 * This function gets SLI4 port configure region 23 data through memory dump
19292 * mailbox command. When it successfully retrieves data, the size of the data
19293 * will be returned, otherwise, 0 will be returned.
19296 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19298 LPFC_MBOXQ_t *mboxq = NULL;
19299 struct lpfc_dmabuf *mp = NULL;
19300 struct lpfc_mqe *mqe;
19301 uint32_t data_length = 0;
19307 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19309 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19310 "3105 failed to allocate mailbox memory\n");
19314 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19316 mqe = &mboxq->u.mqe;
19317 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19318 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19321 data_length = mqe->un.mb_words[5];
19322 if (data_length == 0)
19324 if (data_length > DMP_RGN23_SIZE) {
19328 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19330 mempool_free(mboxq, phba->mbox_mem_pool);
19332 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19335 return data_length;
19339 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19340 * @phba: pointer to lpfc hba data structure.
19342 * This function read region 23 and parse TLV for port status to
19343 * decide if the user disaled the port. If the TLV indicates the
19344 * port is disabled, the hba_flag is set accordingly.
19347 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19349 uint8_t *rgn23_data = NULL;
19350 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19351 uint32_t offset = 0;
19353 /* Get adapter Region 23 data */
19354 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19358 if (phba->sli_rev < LPFC_SLI_REV4)
19359 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19361 if_type = bf_get(lpfc_sli_intf_if_type,
19362 &phba->sli4_hba.sli_intf);
19363 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19365 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19371 /* Check the region signature first */
19372 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19373 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19374 "2619 Config region 23 has bad signature\n");
19379 /* Check the data structure version */
19380 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19382 "2620 Config region 23 has bad version\n");
19387 /* Parse TLV entries in the region */
19388 while (offset < data_size) {
19389 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19392 * If the TLV is not driver specific TLV or driver id is
19393 * not linux driver id, skip the record.
19395 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19396 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19397 (rgn23_data[offset + 3] != 0)) {
19398 offset += rgn23_data[offset + 1] * 4 + 4;
19402 /* Driver found a driver specific TLV in the config region */
19403 sub_tlv_len = rgn23_data[offset + 1] * 4;
19408 * Search for configured port state sub-TLV.
19410 while ((offset < data_size) &&
19411 (tlv_offset < sub_tlv_len)) {
19412 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19417 if (rgn23_data[offset] != PORT_STE_TYPE) {
19418 offset += rgn23_data[offset + 1] * 4 + 4;
19419 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19423 /* This HBA contains PORT_STE configured */
19424 if (!rgn23_data[offset + 2])
19425 phba->hba_flag |= LINK_DISABLED;
19437 * lpfc_wr_object - write an object to the firmware
19438 * @phba: HBA structure that indicates port to create a queue on.
19439 * @dmabuf_list: list of dmabufs to write to the port.
19440 * @size: the total byte value of the objects to write to the port.
19441 * @offset: the current offset to be used to start the transfer.
19443 * This routine will create a wr_object mailbox command to send to the port.
19444 * the mailbox command will be constructed using the dma buffers described in
19445 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19446 * BDEs that the imbedded mailbox can support. The @offset variable will be
19447 * used to indicate the starting offset of the transfer and will also return
19448 * the offset after the write object mailbox has completed. @size is used to
19449 * determine the end of the object and whether the eof bit should be set.
19451 * Return 0 is successful and offset will contain the the new offset to use
19452 * for the next write.
19453 * Return negative value for error cases.
19456 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19457 uint32_t size, uint32_t *offset)
19459 struct lpfc_mbx_wr_object *wr_object;
19460 LPFC_MBOXQ_t *mbox;
19462 uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
19464 struct lpfc_dmabuf *dmabuf;
19465 uint32_t written = 0;
19466 bool check_change_status = false;
19468 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19472 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19473 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19474 sizeof(struct lpfc_mbx_wr_object) -
19475 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19477 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19478 wr_object->u.request.write_offset = *offset;
19479 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19480 wr_object->u.request.object_name[0] =
19481 cpu_to_le32(wr_object->u.request.object_name[0]);
19482 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19483 list_for_each_entry(dmabuf, dmabuf_list, list) {
19484 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19486 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19487 wr_object->u.request.bde[i].addrHigh =
19488 putPaddrHigh(dmabuf->phys);
19489 if (written + SLI4_PAGE_SIZE >= size) {
19490 wr_object->u.request.bde[i].tus.f.bdeSize =
19492 written += (size - written);
19493 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
19494 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19495 check_change_status = true;
19497 wr_object->u.request.bde[i].tus.f.bdeSize =
19499 written += SLI4_PAGE_SIZE;
19503 wr_object->u.request.bde_count = i;
19504 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19505 if (!phba->sli4_hba.intr_enable)
19506 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19508 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
19509 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19511 /* The IOCTL status is embedded in the mailbox subheader. */
19512 shdr_status = bf_get(lpfc_mbox_hdr_status,
19513 &wr_object->header.cfg_shdr.response);
19514 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19515 &wr_object->header.cfg_shdr.response);
19516 if (check_change_status) {
19517 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19518 &wr_object->u.response);
19520 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
19521 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
19522 shdr_csf = bf_get(lpfc_wr_object_csf,
19523 &wr_object->u.response);
19525 shdr_change_status =
19526 LPFC_CHANGE_STATUS_PCI_RESET;
19529 switch (shdr_change_status) {
19530 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19531 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19532 "3198 Firmware write complete: System "
19533 "reboot required to instantiate\n");
19535 case (LPFC_CHANGE_STATUS_FW_RESET):
19536 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19537 "3199 Firmware write complete: Firmware"
19538 " reset required to instantiate\n");
19540 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19541 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19542 "3200 Firmware write complete: Port "
19543 "Migration or PCI Reset required to "
19546 case (LPFC_CHANGE_STATUS_PCI_RESET):
19547 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19548 "3201 Firmware write complete: PCI "
19549 "Reset required to instantiate\n");
19555 if (rc != MBX_TIMEOUT)
19556 mempool_free(mbox, phba->mbox_mem_pool);
19557 if (shdr_status || shdr_add_status || rc) {
19558 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19559 "3025 Write Object mailbox failed with "
19560 "status x%x add_status x%x, mbx status x%x\n",
19561 shdr_status, shdr_add_status, rc);
19563 *offset = shdr_add_status;
19565 *offset += wr_object->u.response.actual_write_length;
19570 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
19571 * @vport: pointer to vport data structure.
19573 * This function iterate through the mailboxq and clean up all REG_LOGIN
19574 * and REG_VPI mailbox commands associated with the vport. This function
19575 * is called when driver want to restart discovery of the vport due to
19576 * a Clear Virtual Link event.
19579 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19581 struct lpfc_hba *phba = vport->phba;
19582 LPFC_MBOXQ_t *mb, *nextmb;
19583 struct lpfc_dmabuf *mp;
19584 struct lpfc_nodelist *ndlp;
19585 struct lpfc_nodelist *act_mbx_ndlp = NULL;
19586 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
19587 LIST_HEAD(mbox_cmd_list);
19588 uint8_t restart_loop;
19590 /* Clean up internally queued mailbox commands with the vport */
19591 spin_lock_irq(&phba->hbalock);
19592 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19593 if (mb->vport != vport)
19596 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19597 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19600 list_del(&mb->list);
19601 list_add_tail(&mb->list, &mbox_cmd_list);
19603 /* Clean up active mailbox command with the vport */
19604 mb = phba->sli.mbox_active;
19605 if (mb && (mb->vport == vport)) {
19606 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19607 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19608 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19609 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19610 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19611 /* Put reference count for delayed processing */
19612 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19613 /* Unregister the RPI when mailbox complete */
19614 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19617 /* Cleanup any mailbox completions which are not yet processed */
19620 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19622 * If this mailox is already processed or it is
19623 * for another vport ignore it.
19625 if ((mb->vport != vport) ||
19626 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19629 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19630 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19633 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19634 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19635 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19636 /* Unregister the RPI when mailbox complete */
19637 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19639 spin_unlock_irq(&phba->hbalock);
19640 spin_lock(shost->host_lock);
19641 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19642 spin_unlock(shost->host_lock);
19643 spin_lock_irq(&phba->hbalock);
19647 } while (restart_loop);
19649 spin_unlock_irq(&phba->hbalock);
19651 /* Release the cleaned-up mailbox commands */
19652 while (!list_empty(&mbox_cmd_list)) {
19653 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
19654 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19655 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
19657 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19660 mb->ctx_buf = NULL;
19661 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19662 mb->ctx_ndlp = NULL;
19664 spin_lock(shost->host_lock);
19665 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19666 spin_unlock(shost->host_lock);
19667 lpfc_nlp_put(ndlp);
19670 mempool_free(mb, phba->mbox_mem_pool);
19673 /* Release the ndlp with the cleaned-up active mailbox command */
19674 if (act_mbx_ndlp) {
19675 spin_lock(shost->host_lock);
19676 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19677 spin_unlock(shost->host_lock);
19678 lpfc_nlp_put(act_mbx_ndlp);
19683 * lpfc_drain_txq - Drain the txq
19684 * @phba: Pointer to HBA context object.
19686 * This function attempt to submit IOCBs on the txq
19687 * to the adapter. For SLI4 adapters, the txq contains
19688 * ELS IOCBs that have been deferred because the there
19689 * are no SGLs. This congestion can occur with large
19690 * vport counts during node discovery.
19694 lpfc_drain_txq(struct lpfc_hba *phba)
19696 LIST_HEAD(completions);
19697 struct lpfc_sli_ring *pring;
19698 struct lpfc_iocbq *piocbq = NULL;
19699 unsigned long iflags = 0;
19700 char *fail_msg = NULL;
19701 struct lpfc_sglq *sglq;
19702 union lpfc_wqe128 wqe;
19703 uint32_t txq_cnt = 0;
19704 struct lpfc_queue *wq;
19706 if (phba->link_flag & LS_MDS_LOOPBACK) {
19707 /* MDS WQE are posted only to first WQ*/
19708 wq = phba->sli4_hba.hdwq[0].io_wq;
19713 wq = phba->sli4_hba.els_wq;
19716 pring = lpfc_phba_elsring(phba);
19719 if (unlikely(!pring) || list_empty(&pring->txq))
19722 spin_lock_irqsave(&pring->ring_lock, iflags);
19723 list_for_each_entry(piocbq, &pring->txq, list) {
19727 if (txq_cnt > pring->txq_max)
19728 pring->txq_max = txq_cnt;
19730 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19732 while (!list_empty(&pring->txq)) {
19733 spin_lock_irqsave(&pring->ring_lock, iflags);
19735 piocbq = lpfc_sli_ringtx_get(phba, pring);
19737 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19738 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19739 "2823 txq empty and txq_cnt is %d\n ",
19743 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
19745 __lpfc_sli_ringtx_put(phba, pring, piocbq);
19746 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19751 /* The xri and iocb resources secured,
19752 * attempt to issue request
19754 piocbq->sli4_lxritag = sglq->sli4_lxritag;
19755 piocbq->sli4_xritag = sglq->sli4_xritag;
19756 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19757 fail_msg = "to convert bpl to sgl";
19758 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
19759 fail_msg = "to convert iocb to wqe";
19760 else if (lpfc_sli4_wq_put(wq, &wqe))
19761 fail_msg = " - Wq is full";
19763 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19766 /* Failed means we can't issue and need to cancel */
19767 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19768 "2822 IOCB failed %s iotag 0x%x "
19771 piocbq->iotag, piocbq->sli4_xritag);
19772 list_add_tail(&piocbq->list, &completions);
19774 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19777 /* Cancel all the IOCBs that cannot be issued */
19778 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19779 IOERR_SLI_ABORTED);
19785 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
19786 * @phba: Pointer to HBA context object.
19787 * @pwqe: Pointer to command WQE.
19788 * @sglq: Pointer to the scatter gather queue object.
19790 * This routine converts the bpl or bde that is in the WQE
19791 * to a sgl list for the sli4 hardware. The physical address
19792 * of the bpl/bde is converted back to a virtual address.
19793 * If the WQE contains a BPL then the list of BDE's is
19794 * converted to sli4_sge's. If the WQE contains a single
19795 * BDE then it is converted to a single sli_sge.
19796 * The WQE is still in cpu endianness so the contents of
19797 * the bpl can be used without byte swapping.
19799 * Returns valid XRI = Success, NO_XRI = Failure.
19802 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19803 struct lpfc_sglq *sglq)
19805 uint16_t xritag = NO_XRI;
19806 struct ulp_bde64 *bpl = NULL;
19807 struct ulp_bde64 bde;
19808 struct sli4_sge *sgl = NULL;
19809 struct lpfc_dmabuf *dmabuf;
19810 union lpfc_wqe128 *wqe;
19813 uint32_t offset = 0; /* accumulated offset in the sg request list */
19814 int inbound = 0; /* number of sg reply entries inbound from firmware */
19817 if (!pwqeq || !sglq)
19820 sgl = (struct sli4_sge *)sglq->sgl;
19822 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19824 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19825 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19826 return sglq->sli4_xritag;
19827 numBdes = pwqeq->rsvd2;
19829 /* The addrHigh and addrLow fields within the WQE
19830 * have not been byteswapped yet so there is no
19831 * need to swap them back.
19833 if (pwqeq->context3)
19834 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19838 bpl = (struct ulp_bde64 *)dmabuf->virt;
19842 for (i = 0; i < numBdes; i++) {
19843 /* Should already be byte swapped. */
19844 sgl->addr_hi = bpl->addrHigh;
19845 sgl->addr_lo = bpl->addrLow;
19847 sgl->word2 = le32_to_cpu(sgl->word2);
19848 if ((i+1) == numBdes)
19849 bf_set(lpfc_sli4_sge_last, sgl, 1);
19851 bf_set(lpfc_sli4_sge_last, sgl, 0);
19852 /* swap the size field back to the cpu so we
19853 * can assign it to the sgl.
19855 bde.tus.w = le32_to_cpu(bpl->tus.w);
19856 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19857 /* The offsets in the sgl need to be accumulated
19858 * separately for the request and reply lists.
19859 * The request is always first, the reply follows.
19862 case CMD_GEN_REQUEST64_WQE:
19863 /* add up the reply sg entries */
19864 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19866 /* first inbound? reset the offset */
19869 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19870 bf_set(lpfc_sli4_sge_type, sgl,
19871 LPFC_SGE_TYPE_DATA);
19872 offset += bde.tus.f.bdeSize;
19874 case CMD_FCP_TRSP64_WQE:
19875 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19876 bf_set(lpfc_sli4_sge_type, sgl,
19877 LPFC_SGE_TYPE_DATA);
19879 case CMD_FCP_TSEND64_WQE:
19880 case CMD_FCP_TRECEIVE64_WQE:
19881 bf_set(lpfc_sli4_sge_type, sgl,
19882 bpl->tus.f.bdeFlags);
19886 offset += bde.tus.f.bdeSize;
19887 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19890 sgl->word2 = cpu_to_le32(sgl->word2);
19894 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19895 /* The addrHigh and addrLow fields of the BDE have not
19896 * been byteswapped yet so they need to be swapped
19897 * before putting them in the sgl.
19899 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19900 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19901 sgl->word2 = le32_to_cpu(sgl->word2);
19902 bf_set(lpfc_sli4_sge_last, sgl, 1);
19903 sgl->word2 = cpu_to_le32(sgl->word2);
19904 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19906 return sglq->sli4_xritag;
19910 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
19911 * @phba: Pointer to HBA context object.
19912 * @ring_number: Base sli ring number
19913 * @pwqe: Pointer to command WQE.
19916 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
19917 struct lpfc_iocbq *pwqe)
19919 union lpfc_wqe128 *wqe = &pwqe->wqe;
19920 struct lpfc_nvmet_rcv_ctx *ctxp;
19921 struct lpfc_queue *wq;
19922 struct lpfc_sglq *sglq;
19923 struct lpfc_sli_ring *pring;
19924 unsigned long iflags;
19927 /* NVME_LS and NVME_LS ABTS requests. */
19928 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19929 pring = phba->sli4_hba.nvmels_wq->pring;
19930 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19932 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19934 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19937 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19938 pwqe->sli4_xritag = sglq->sli4_xritag;
19939 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19940 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19943 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19944 pwqe->sli4_xritag);
19945 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19947 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19951 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19952 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19954 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
19958 /* NVME_FCREQ and NVME_ABTS requests */
19959 if (pwqe->iocb_flag & LPFC_IO_NVME) {
19960 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19964 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
19966 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19968 ret = lpfc_sli4_wq_put(wq, wqe);
19970 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19973 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19974 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19976 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
19980 /* NVMET requests */
19981 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19982 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19986 ctxp = pwqe->context2;
19987 sglq = ctxp->ctxbuf->sglq;
19988 if (pwqe->sli4_xritag == NO_XRI) {
19989 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19990 pwqe->sli4_xritag = sglq->sli4_xritag;
19992 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19993 pwqe->sli4_xritag);
19994 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
19996 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19998 ret = lpfc_sli4_wq_put(wq, wqe);
20000 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20003 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20004 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20006 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20012 #ifdef LPFC_MXP_STAT
20014 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
20015 * @phba: pointer to lpfc hba data structure.
20016 * @hwqid: belong to which HWQ.
20018 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
20019 * 15 seconds after a test case is running.
20021 * The user should call lpfc_debugfs_multixripools_write before running a test
20022 * case to clear stat_snapshot_taken. Then the user starts a test case. During
20023 * test case is running, stat_snapshot_taken is incremented by 1 every time when
20024 * this routine is called from heartbeat timer. When stat_snapshot_taken is
20025 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
20027 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
20029 struct lpfc_sli4_hdw_queue *qp;
20030 struct lpfc_multixri_pool *multixri_pool;
20031 struct lpfc_pvt_pool *pvt_pool;
20032 struct lpfc_pbl_pool *pbl_pool;
20035 qp = &phba->sli4_hba.hdwq[hwqid];
20036 multixri_pool = qp->p_multixri_pool;
20037 if (!multixri_pool)
20040 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
20041 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20042 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20043 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20045 multixri_pool->stat_pbl_count = pbl_pool->count;
20046 multixri_pool->stat_pvt_count = pvt_pool->count;
20047 multixri_pool->stat_busy_count = txcmplq_cnt;
20050 multixri_pool->stat_snapshot_taken++;
20055 * lpfc_adjust_pvt_pool_count - Adjust private pool count
20056 * @phba: pointer to lpfc hba data structure.
20057 * @hwqid: belong to which HWQ.
20059 * This routine moves some XRIs from private to public pool when private pool
20062 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
20064 struct lpfc_multixri_pool *multixri_pool;
20066 u32 prev_io_req_count;
20068 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20069 if (!multixri_pool)
20071 io_req_count = multixri_pool->io_req_count;
20072 prev_io_req_count = multixri_pool->prev_io_req_count;
20074 if (prev_io_req_count != io_req_count) {
20075 /* Private pool is busy */
20076 multixri_pool->prev_io_req_count = io_req_count;
20078 /* Private pool is not busy.
20079 * Move XRIs from private to public pool.
20081 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
20086 * lpfc_adjust_high_watermark - Adjust high watermark
20087 * @phba: pointer to lpfc hba data structure.
20088 * @hwqid: belong to which HWQ.
20090 * This routine sets high watermark as number of outstanding XRIs,
20091 * but make sure the new value is between xri_limit/2 and xri_limit.
20093 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
20101 struct lpfc_multixri_pool *multixri_pool;
20102 struct lpfc_sli4_hdw_queue *qp;
20104 qp = &phba->sli4_hba.hdwq[hwqid];
20105 multixri_pool = qp->p_multixri_pool;
20106 if (!multixri_pool)
20108 xri_limit = multixri_pool->xri_limit;
20110 watermark_max = xri_limit;
20111 watermark_min = xri_limit / 2;
20113 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20114 abts_io_bufs = qp->abts_scsi_io_bufs;
20115 abts_io_bufs += qp->abts_nvme_io_bufs;
20117 new_watermark = txcmplq_cnt + abts_io_bufs;
20118 new_watermark = min(watermark_max, new_watermark);
20119 new_watermark = max(watermark_min, new_watermark);
20120 multixri_pool->pvt_pool.high_watermark = new_watermark;
20122 #ifdef LPFC_MXP_STAT
20123 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
20129 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
20130 * @phba: pointer to lpfc hba data structure.
20131 * @hwqid: belong to which HWQ.
20133 * This routine is called from hearbeat timer when pvt_pool is idle.
20134 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
20135 * The first step moves (all - low_watermark) amount of XRIs.
20136 * The second step moves the rest of XRIs.
20138 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
20140 struct lpfc_pbl_pool *pbl_pool;
20141 struct lpfc_pvt_pool *pvt_pool;
20142 struct lpfc_sli4_hdw_queue *qp;
20143 struct lpfc_io_buf *lpfc_ncmd;
20144 struct lpfc_io_buf *lpfc_ncmd_next;
20145 unsigned long iflag;
20146 struct list_head tmp_list;
20149 qp = &phba->sli4_hba.hdwq[hwqid];
20150 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20151 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20154 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
20155 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
20157 if (pvt_pool->count > pvt_pool->low_watermark) {
20158 /* Step 1: move (all - low_watermark) from pvt_pool
20162 /* Move low watermark of bufs from pvt_pool to tmp_list */
20163 INIT_LIST_HEAD(&tmp_list);
20164 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20165 &pvt_pool->list, list) {
20166 list_move_tail(&lpfc_ncmd->list, &tmp_list);
20168 if (tmp_count >= pvt_pool->low_watermark)
20172 /* Move all bufs from pvt_pool to pbl_pool */
20173 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20175 /* Move all bufs from tmp_list to pvt_pool */
20176 list_splice(&tmp_list, &pvt_pool->list);
20178 pbl_pool->count += (pvt_pool->count - tmp_count);
20179 pvt_pool->count = tmp_count;
20181 /* Step 2: move the rest from pvt_pool to pbl_pool */
20182 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20183 pbl_pool->count += pvt_pool->count;
20184 pvt_pool->count = 0;
20187 spin_unlock(&pvt_pool->lock);
20188 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20192 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20193 * @phba: pointer to lpfc hba data structure
20194 * @pbl_pool: specified public free XRI pool
20195 * @pvt_pool: specified private free XRI pool
20196 * @count: number of XRIs to move
20198 * This routine tries to move some free common bufs from the specified pbl_pool
20199 * to the specified pvt_pool. It might move less than count XRIs if there's not
20200 * enough in public pool.
20203 * true - if XRIs are successfully moved from the specified pbl_pool to the
20204 * specified pvt_pool
20205 * false - if the specified pbl_pool is empty or locked by someone else
20208 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20209 struct lpfc_pbl_pool *pbl_pool,
20210 struct lpfc_pvt_pool *pvt_pool, u32 count)
20212 struct lpfc_io_buf *lpfc_ncmd;
20213 struct lpfc_io_buf *lpfc_ncmd_next;
20214 unsigned long iflag;
20217 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20219 if (pbl_pool->count) {
20220 /* Move a batch of XRIs from public to private pool */
20221 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
20222 list_for_each_entry_safe(lpfc_ncmd,
20226 list_move_tail(&lpfc_ncmd->list,
20235 spin_unlock(&pvt_pool->lock);
20236 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20239 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20246 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20247 * @phba: pointer to lpfc hba data structure.
20248 * @hwqid: belong to which HWQ.
20249 * @count: number of XRIs to move
20251 * This routine tries to find some free common bufs in one of public pools with
20252 * Round Robin method. The search always starts from local hwqid, then the next
20253 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20254 * a batch of free common bufs are moved to private pool on hwqid.
20255 * It might move less than count XRIs if there's not enough in public pool.
20257 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20259 struct lpfc_multixri_pool *multixri_pool;
20260 struct lpfc_multixri_pool *next_multixri_pool;
20261 struct lpfc_pvt_pool *pvt_pool;
20262 struct lpfc_pbl_pool *pbl_pool;
20263 struct lpfc_sli4_hdw_queue *qp;
20268 qp = &phba->sli4_hba.hdwq[hwqid];
20269 multixri_pool = qp->p_multixri_pool;
20270 pvt_pool = &multixri_pool->pvt_pool;
20271 pbl_pool = &multixri_pool->pbl_pool;
20273 /* Check if local pbl_pool is available */
20274 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20276 #ifdef LPFC_MXP_STAT
20277 multixri_pool->local_pbl_hit_count++;
20282 hwq_count = phba->cfg_hdw_queue;
20284 /* Get the next hwqid which was found last time */
20285 next_hwqid = multixri_pool->rrb_next_hwqid;
20288 /* Go to next hwq */
20289 next_hwqid = (next_hwqid + 1) % hwq_count;
20291 next_multixri_pool =
20292 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20293 pbl_pool = &next_multixri_pool->pbl_pool;
20295 /* Check if the public free xri pool is available */
20296 ret = _lpfc_move_xri_pbl_to_pvt(
20297 phba, qp, pbl_pool, pvt_pool, count);
20299 /* Exit while-loop if success or all hwqid are checked */
20300 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20302 /* Starting point for the next time */
20303 multixri_pool->rrb_next_hwqid = next_hwqid;
20306 /* stats: all public pools are empty*/
20307 multixri_pool->pbl_empty_count++;
20310 #ifdef LPFC_MXP_STAT
20312 if (next_hwqid == hwqid)
20313 multixri_pool->local_pbl_hit_count++;
20315 multixri_pool->other_pbl_hit_count++;
20321 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20322 * @phba: pointer to lpfc hba data structure.
20323 * @qp: belong to which HWQ.
20325 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20328 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20330 struct lpfc_multixri_pool *multixri_pool;
20331 struct lpfc_pvt_pool *pvt_pool;
20333 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20334 pvt_pool = &multixri_pool->pvt_pool;
20336 if (pvt_pool->count < pvt_pool->low_watermark)
20337 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20341 * lpfc_release_io_buf - Return one IO buf back to free pool
20342 * @phba: pointer to lpfc hba data structure.
20343 * @lpfc_ncmd: IO buf to be returned.
20344 * @qp: belong to which HWQ.
20346 * This routine returns one IO buf back to free pool. If this is an urgent IO,
20347 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
20348 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
20349 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
20350 * lpfc_io_buf_list_put.
20352 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20353 struct lpfc_sli4_hdw_queue *qp)
20355 unsigned long iflag;
20356 struct lpfc_pbl_pool *pbl_pool;
20357 struct lpfc_pvt_pool *pvt_pool;
20358 struct lpfc_epd_pool *epd_pool;
20364 /* MUST zero fields if buffer is reused by another protocol */
20365 lpfc_ncmd->nvmeCmd = NULL;
20366 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20367 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20369 if (phba->cfg_xpsgl && !phba->nvmet_support &&
20370 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
20371 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
20373 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
20374 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
20376 if (phba->cfg_xri_rebalancing) {
20377 if (lpfc_ncmd->expedite) {
20378 /* Return to expedite pool */
20379 epd_pool = &phba->epd_pool;
20380 spin_lock_irqsave(&epd_pool->lock, iflag);
20381 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20383 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20387 /* Avoid invalid access if an IO sneaks in and is being rejected
20388 * just _after_ xri pools are destroyed in lpfc_offline.
20389 * Nothing much can be done at this point.
20391 if (!qp->p_multixri_pool)
20394 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20395 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20397 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20398 abts_io_bufs = qp->abts_scsi_io_bufs;
20399 abts_io_bufs += qp->abts_nvme_io_bufs;
20401 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20402 xri_limit = qp->p_multixri_pool->xri_limit;
20404 #ifdef LPFC_MXP_STAT
20405 if (xri_owned <= xri_limit)
20406 qp->p_multixri_pool->below_limit_count++;
20408 qp->p_multixri_pool->above_limit_count++;
20411 /* XRI goes to either public or private free xri pool
20412 * based on watermark and xri_limit
20414 if ((pvt_pool->count < pvt_pool->low_watermark) ||
20415 (xri_owned < xri_limit &&
20416 pvt_pool->count < pvt_pool->high_watermark)) {
20417 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20418 qp, free_pvt_pool);
20419 list_add_tail(&lpfc_ncmd->list,
20422 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20424 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20425 qp, free_pub_pool);
20426 list_add_tail(&lpfc_ncmd->list,
20429 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20432 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20434 list_add_tail(&lpfc_ncmd->list,
20435 &qp->lpfc_io_buf_list_put);
20437 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20443 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
20444 * @phba: pointer to lpfc hba data structure.
20445 * @pvt_pool: pointer to private pool data structure.
20446 * @ndlp: pointer to lpfc nodelist data structure.
20448 * This routine tries to get one free IO buf from private pool.
20451 * pointer to one free IO buf - if private pool is not empty
20452 * NULL - if private pool is empty
20454 static struct lpfc_io_buf *
20455 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
20456 struct lpfc_sli4_hdw_queue *qp,
20457 struct lpfc_pvt_pool *pvt_pool,
20458 struct lpfc_nodelist *ndlp)
20460 struct lpfc_io_buf *lpfc_ncmd;
20461 struct lpfc_io_buf *lpfc_ncmd_next;
20462 unsigned long iflag;
20464 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
20465 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20466 &pvt_pool->list, list) {
20467 if (lpfc_test_rrq_active(
20468 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20470 list_del(&lpfc_ncmd->list);
20472 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20475 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20481 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
20482 * @phba: pointer to lpfc hba data structure.
20484 * This routine tries to get one free IO buf from expedite pool.
20487 * pointer to one free IO buf - if expedite pool is not empty
20488 * NULL - if expedite pool is empty
20490 static struct lpfc_io_buf *
20491 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20493 struct lpfc_io_buf *lpfc_ncmd;
20494 struct lpfc_io_buf *lpfc_ncmd_next;
20495 unsigned long iflag;
20496 struct lpfc_epd_pool *epd_pool;
20498 epd_pool = &phba->epd_pool;
20501 spin_lock_irqsave(&epd_pool->lock, iflag);
20502 if (epd_pool->count > 0) {
20503 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20504 &epd_pool->list, list) {
20505 list_del(&lpfc_ncmd->list);
20510 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20516 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
20517 * @phba: pointer to lpfc hba data structure.
20518 * @ndlp: pointer to lpfc nodelist data structure.
20519 * @hwqid: belong to which HWQ
20520 * @expedite: 1 means this request is urgent.
20522 * This routine will do the following actions and then return a pointer to
20525 * 1. If private free xri count is empty, move some XRIs from public to
20527 * 2. Get one XRI from private free xri pool.
20528 * 3. If we fail to get one from pvt_pool and this is an expedite request,
20529 * get one free xri from expedite pool.
20531 * Note: ndlp is only used on SCSI side for RRQ testing.
20532 * The caller should pass NULL for ndlp on NVME side.
20535 * pointer to one free IO buf - if private pool is not empty
20536 * NULL - if private pool is empty
20538 static struct lpfc_io_buf *
20539 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20540 struct lpfc_nodelist *ndlp,
20541 int hwqid, int expedite)
20543 struct lpfc_sli4_hdw_queue *qp;
20544 struct lpfc_multixri_pool *multixri_pool;
20545 struct lpfc_pvt_pool *pvt_pool;
20546 struct lpfc_io_buf *lpfc_ncmd;
20548 qp = &phba->sli4_hba.hdwq[hwqid];
20550 multixri_pool = qp->p_multixri_pool;
20551 pvt_pool = &multixri_pool->pvt_pool;
20552 multixri_pool->io_req_count++;
20554 /* If pvt_pool is empty, move some XRIs from public to private pool */
20555 if (pvt_pool->count == 0)
20556 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20558 /* Get one XRI from private free xri pool */
20559 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
20562 lpfc_ncmd->hdwq = qp;
20563 lpfc_ncmd->hdwq_no = hwqid;
20564 } else if (expedite) {
20565 /* If we fail to get one from pvt_pool and this is an expedite
20566 * request, get one free xri from expedite pool.
20568 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20574 static inline struct lpfc_io_buf *
20575 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20577 struct lpfc_sli4_hdw_queue *qp;
20578 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20580 qp = &phba->sli4_hba.hdwq[idx];
20581 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20582 &qp->lpfc_io_buf_list_get, list) {
20583 if (lpfc_test_rrq_active(phba, ndlp,
20584 lpfc_cmd->cur_iocbq.sli4_lxritag))
20587 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20590 list_del_init(&lpfc_cmd->list);
20592 lpfc_cmd->hdwq = qp;
20593 lpfc_cmd->hdwq_no = idx;
20600 * lpfc_get_io_buf - Get one IO buffer from free pool
20601 * @phba: The HBA for which this call is being executed.
20602 * @ndlp: pointer to lpfc nodelist data structure.
20603 * @hwqid: belong to which HWQ
20604 * @expedite: 1 means this request is urgent.
20606 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
20607 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
20608 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
20610 * Note: ndlp is only used on SCSI side for RRQ testing.
20611 * The caller should pass NULL for ndlp on NVME side.
20615 * Pointer to lpfc_io_buf - Success
20617 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20618 struct lpfc_nodelist *ndlp,
20619 u32 hwqid, int expedite)
20621 struct lpfc_sli4_hdw_queue *qp;
20622 unsigned long iflag;
20623 struct lpfc_io_buf *lpfc_cmd;
20625 qp = &phba->sli4_hba.hdwq[hwqid];
20628 if (phba->cfg_xri_rebalancing)
20629 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20630 phba, ndlp, hwqid, expedite);
20632 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20633 qp, alloc_xri_get);
20634 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20635 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20637 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20638 qp, alloc_xri_put);
20639 list_splice(&qp->lpfc_io_buf_list_put,
20640 &qp->lpfc_io_buf_list_get);
20641 qp->get_io_bufs += qp->put_io_bufs;
20642 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20643 qp->put_io_bufs = 0;
20644 spin_unlock(&qp->io_buf_list_put_lock);
20645 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20647 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20649 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
20656 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
20657 * @phba: The HBA for which this call is being executed.
20658 * @lpfc_buf: IO buf structure to append the SGL chunk
20660 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
20661 * and will allocate an SGL chunk if the pool is empty.
20665 * Pointer to sli4_hybrid_sgl - Success
20667 struct sli4_hybrid_sgl *
20668 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20670 struct sli4_hybrid_sgl *list_entry = NULL;
20671 struct sli4_hybrid_sgl *tmp = NULL;
20672 struct sli4_hybrid_sgl *allocated_sgl = NULL;
20673 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20674 struct list_head *buf_list = &hdwq->sgl_list;
20675 unsigned long iflags;
20677 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20679 if (likely(!list_empty(buf_list))) {
20680 /* break off 1 chunk from the sgl_list */
20681 list_for_each_entry_safe(list_entry, tmp,
20682 buf_list, list_node) {
20683 list_move_tail(&list_entry->list_node,
20684 &lpfc_buf->dma_sgl_xtra_list);
20688 /* allocate more */
20689 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20690 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
20691 cpu_to_node(hdwq->io_wq->chann));
20693 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20694 "8353 error kmalloc memory for HDWQ "
20696 lpfc_buf->hdwq_no, __func__);
20700 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
20701 GFP_ATOMIC, &tmp->dma_phys_sgl);
20702 if (!tmp->dma_sgl) {
20703 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20704 "8354 error pool_alloc memory for HDWQ "
20706 lpfc_buf->hdwq_no, __func__);
20711 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20712 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
20715 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
20716 struct sli4_hybrid_sgl,
20719 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20721 return allocated_sgl;
20725 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
20726 * @phba: The HBA for which this call is being executed.
20727 * @lpfc_buf: IO buf structure with the SGL chunk
20729 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
20736 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20739 struct sli4_hybrid_sgl *list_entry = NULL;
20740 struct sli4_hybrid_sgl *tmp = NULL;
20741 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20742 struct list_head *buf_list = &hdwq->sgl_list;
20743 unsigned long iflags;
20745 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20747 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
20748 list_for_each_entry_safe(list_entry, tmp,
20749 &lpfc_buf->dma_sgl_xtra_list,
20751 list_move_tail(&list_entry->list_node,
20758 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20763 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
20764 * @phba: phba object
20765 * @hdwq: hdwq to cleanup sgl buff resources on
20767 * This routine frees all SGL chunks of hdwq SGL chunk pool.
20773 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
20774 struct lpfc_sli4_hdw_queue *hdwq)
20776 struct list_head *buf_list = &hdwq->sgl_list;
20777 struct sli4_hybrid_sgl *list_entry = NULL;
20778 struct sli4_hybrid_sgl *tmp = NULL;
20779 unsigned long iflags;
20781 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20783 /* Free sgl pool */
20784 list_for_each_entry_safe(list_entry, tmp,
20785 buf_list, list_node) {
20786 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
20787 list_entry->dma_sgl,
20788 list_entry->dma_phys_sgl);
20789 list_del(&list_entry->list_node);
20793 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20797 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
20798 * @phba: The HBA for which this call is being executed.
20799 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
20801 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
20802 * and will allocate an CMD/RSP buffer if the pool is empty.
20806 * Pointer to fcp_cmd_rsp_buf - Success
20808 struct fcp_cmd_rsp_buf *
20809 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20810 struct lpfc_io_buf *lpfc_buf)
20812 struct fcp_cmd_rsp_buf *list_entry = NULL;
20813 struct fcp_cmd_rsp_buf *tmp = NULL;
20814 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
20815 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20816 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
20817 unsigned long iflags;
20819 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20821 if (likely(!list_empty(buf_list))) {
20822 /* break off 1 chunk from the list */
20823 list_for_each_entry_safe(list_entry, tmp,
20826 list_move_tail(&list_entry->list_node,
20827 &lpfc_buf->dma_cmd_rsp_list);
20831 /* allocate more */
20832 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20833 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
20834 cpu_to_node(hdwq->io_wq->chann));
20836 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20837 "8355 error kmalloc memory for HDWQ "
20839 lpfc_buf->hdwq_no, __func__);
20843 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
20845 &tmp->fcp_cmd_rsp_dma_handle);
20847 if (!tmp->fcp_cmnd) {
20848 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20849 "8356 error pool_alloc memory for HDWQ "
20851 lpfc_buf->hdwq_no, __func__);
20856 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
20857 sizeof(struct fcp_cmnd));
20859 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20860 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
20863 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
20864 struct fcp_cmd_rsp_buf,
20867 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20869 return allocated_buf;
20873 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
20874 * @phba: The HBA for which this call is being executed.
20875 * @lpfc_buf: IO buf structure with the CMD/RSP buf
20877 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
20884 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20885 struct lpfc_io_buf *lpfc_buf)
20888 struct fcp_cmd_rsp_buf *list_entry = NULL;
20889 struct fcp_cmd_rsp_buf *tmp = NULL;
20890 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20891 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
20892 unsigned long iflags;
20894 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20896 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
20897 list_for_each_entry_safe(list_entry, tmp,
20898 &lpfc_buf->dma_cmd_rsp_list,
20900 list_move_tail(&list_entry->list_node,
20907 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20912 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
20913 * @phba: phba object
20914 * @hdwq: hdwq to cleanup cmd rsp buff resources on
20916 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
20922 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20923 struct lpfc_sli4_hdw_queue *hdwq)
20925 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
20926 struct fcp_cmd_rsp_buf *list_entry = NULL;
20927 struct fcp_cmd_rsp_buf *tmp = NULL;
20928 unsigned long iflags;
20930 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20932 /* Free cmd_rsp buf pool */
20933 list_for_each_entry_safe(list_entry, tmp,
20936 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
20937 list_entry->fcp_cmnd,
20938 list_entry->fcp_cmd_rsp_dma_handle);
20939 list_del(&list_entry->list_node);
20943 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);