1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
39 #include <asm/set_memory.h>
42 #include <linux/nvme-fc-driver.h>
47 #include "lpfc_sli4.h"
49 #include "lpfc_disc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_logmsg.h"
56 #include "lpfc_compat.h"
57 #include "lpfc_debugfs.h"
58 #include "lpfc_vport.h"
59 #include "lpfc_version.h"
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type {
70 /* Provide function prototypes local to this module. */
71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
74 uint8_t *, uint32_t *);
75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80 struct hbq_dmabuf *dmabuf);
81 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
82 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
86 struct lpfc_queue *eq,
87 struct lpfc_eqe *eqe);
88 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
89 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
92 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
97 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
99 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
100 * @srcp: Source memory pointer.
101 * @destp: Destination memory pointer.
102 * @cnt: Number of words required to be copied.
103 * Must be a multiple of sizeof(uint64_t)
105 * This function is used for copying data between driver memory
106 * and the SLI WQ. This function also changes the endianness
107 * of each word if native endianness is different from SLI
108 * endianness. This function can be called with or without
112 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
114 uint64_t *src = srcp;
115 uint64_t *dest = destp;
118 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
122 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
126 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
127 * @q: The Work Queue to operate on.
128 * @wqe: The work Queue Entry to put on the Work queue.
130 * This routine will copy the contents of @wqe to the next available entry on
131 * the @q. This function will then ring the Work Queue Doorbell to signal the
132 * HBA to start processing the Work Queue Entry. This function returns 0 if
133 * successful. If no entries are available on @q then this function will return
135 * The caller is expected to hold the hbalock when calling this routine.
138 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
140 union lpfc_wqe *temp_wqe;
141 struct lpfc_register doorbell;
148 /* sanity check on queue memory */
151 temp_wqe = lpfc_sli4_qe(q, q->host_index);
153 /* If the host has not yet processed the next entry then we are done */
154 idx = ((q->host_index + 1) % q->entry_count);
155 if (idx == q->hba_index) {
160 /* set consumption flag every once in a while */
161 if (!((q->host_index + 1) % q->notify_interval))
162 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
165 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
166 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
167 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
168 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
169 /* write to DPP aperture taking advatage of Combined Writes */
170 tmp = (uint8_t *)temp_wqe;
172 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
173 __raw_writeq(*((uint64_t *)(tmp + i)),
176 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
177 __raw_writel(*((uint32_t *)(tmp + i)),
181 /* ensure WQE bcopy and DPP flushed before doorbell write */
184 /* Update the host index before invoking device */
185 host_index = q->host_index;
191 if (q->db_format == LPFC_DB_LIST_FORMAT) {
192 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
193 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
194 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
195 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
197 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
200 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
201 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
203 /* Leave bits <23:16> clear for if_type 6 dpp */
204 if_type = bf_get(lpfc_sli_intf_if_type,
205 &q->phba->sli4_hba.sli_intf);
206 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
207 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
210 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
211 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
212 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
216 writel(doorbell.word0, q->db_regaddr);
222 * lpfc_sli4_wq_release - Updates internal hba index for WQ
223 * @q: The Work Queue to operate on.
224 * @index: The index to advance the hba index to.
226 * This routine will update the HBA index of a queue to reflect consumption of
227 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
228 * an entry the host calls this function to update the queue's internal
229 * pointers. This routine returns the number of entries that were consumed by
233 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
235 uint32_t released = 0;
237 /* sanity check on queue memory */
241 if (q->hba_index == index)
244 q->hba_index = ((q->hba_index + 1) % q->entry_count);
246 } while (q->hba_index != index);
251 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
252 * @q: The Mailbox Queue to operate on.
253 * @wqe: The Mailbox Queue Entry to put on the Work queue.
255 * This routine will copy the contents of @mqe to the next available entry on
256 * the @q. This function will then ring the Work Queue Doorbell to signal the
257 * HBA to start processing the Work Queue Entry. This function returns 0 if
258 * successful. If no entries are available on @q then this function will return
260 * The caller is expected to hold the hbalock when calling this routine.
263 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
265 struct lpfc_mqe *temp_mqe;
266 struct lpfc_register doorbell;
268 /* sanity check on queue memory */
271 temp_mqe = lpfc_sli4_qe(q, q->host_index);
273 /* If the host has not yet processed the next entry then we are done */
274 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
276 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
277 /* Save off the mailbox pointer for completion */
278 q->phba->mbox = (MAILBOX_t *)temp_mqe;
280 /* Update the host index before invoking device */
281 q->host_index = ((q->host_index + 1) % q->entry_count);
285 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
286 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
287 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
292 * lpfc_sli4_mq_release - Updates internal hba index for MQ
293 * @q: The Mailbox Queue to operate on.
295 * This routine will update the HBA index of a queue to reflect consumption of
296 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
297 * an entry the host calls this function to update the queue's internal
298 * pointers. This routine returns the number of entries that were consumed by
302 lpfc_sli4_mq_release(struct lpfc_queue *q)
304 /* sanity check on queue memory */
308 /* Clear the mailbox pointer for completion */
309 q->phba->mbox = NULL;
310 q->hba_index = ((q->hba_index + 1) % q->entry_count);
315 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
316 * @q: The Event Queue to get the first valid EQE from
318 * This routine will get the first valid Event Queue Entry from @q, update
319 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
320 * the Queue (no more work to do), or the Queue is full of EQEs that have been
321 * processed, but not popped back to the HBA then this routine will return NULL.
323 static struct lpfc_eqe *
324 lpfc_sli4_eq_get(struct lpfc_queue *q)
326 struct lpfc_eqe *eqe;
328 /* sanity check on queue memory */
331 eqe = lpfc_sli4_qe(q, q->host_index);
333 /* If the next EQE is not valid then we are done */
334 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
338 * insert barrier for instruction interlock : data from the hardware
339 * must have the valid bit checked before it can be copied and acted
340 * upon. Speculative instructions were allowing a bcopy at the start
341 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
342 * after our return, to copy data before the valid bit check above
343 * was done. As such, some of the copied data was stale. The barrier
344 * ensures the check is before any data is copied.
351 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
352 * @q: The Event Queue to disable interrupts
356 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
358 struct lpfc_register doorbell;
361 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
362 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
363 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
364 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
365 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
366 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
370 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
371 * @q: The Event Queue to disable interrupts
375 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
377 struct lpfc_register doorbell;
380 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
381 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
385 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
386 * @phba: adapter with EQ
387 * @q: The Event Queue that the host has completed processing for.
388 * @count: Number of elements that have been consumed
389 * @arm: Indicates whether the host wants to arms this CQ.
391 * This routine will notify the HBA, by ringing the doorbell, that count
392 * number of EQEs have been processed. The @arm parameter indicates whether
393 * the queue should be rearmed when ringing the doorbell.
396 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
397 uint32_t count, bool arm)
399 struct lpfc_register doorbell;
401 /* sanity check on queue memory */
402 if (unlikely(!q || (count == 0 && !arm)))
405 /* ring doorbell for number popped */
408 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
409 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
411 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
412 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
413 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
414 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
415 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
416 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
417 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
418 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
419 readl(q->phba->sli4_hba.EQDBregaddr);
423 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
424 * @phba: adapter with EQ
425 * @q: The Event Queue that the host has completed processing for.
426 * @count: Number of elements that have been consumed
427 * @arm: Indicates whether the host wants to arms this CQ.
429 * This routine will notify the HBA, by ringing the doorbell, that count
430 * number of EQEs have been processed. The @arm parameter indicates whether
431 * the queue should be rearmed when ringing the doorbell.
434 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
435 uint32_t count, bool arm)
437 struct lpfc_register doorbell;
439 /* sanity check on queue memory */
440 if (unlikely(!q || (count == 0 && !arm)))
443 /* ring doorbell for number popped */
446 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
447 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
448 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
449 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
450 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
451 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
452 readl(q->phba->sli4_hba.EQDBregaddr);
456 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
457 struct lpfc_eqe *eqe)
459 if (!phba->sli4_hba.pc_sli4_params.eqav)
460 bf_set_le32(lpfc_eqe_valid, eqe, 0);
462 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
464 /* if the index wrapped around, toggle the valid bit */
465 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
466 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
470 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
472 struct lpfc_eqe *eqe;
475 /* walk all the EQ entries and drop on the floor */
476 eqe = lpfc_sli4_eq_get(eq);
478 __lpfc_sli4_consume_eqe(phba, eq, eqe);
480 eqe = lpfc_sli4_eq_get(eq);
483 /* Clear and re-arm the EQ */
484 phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM);
488 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
490 struct lpfc_eqe *eqe;
491 int count = 0, consumed = 0;
493 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
496 eqe = lpfc_sli4_eq_get(eq);
498 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
499 __lpfc_sli4_consume_eqe(phba, eq, eqe);
502 if (!(++count % eq->max_proc_limit))
505 if (!(count % eq->notify_interval)) {
506 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
511 eqe = lpfc_sli4_eq_get(eq);
513 eq->EQ_processed += count;
515 /* Track the max number of EQEs processed in 1 intr */
516 if (count > eq->EQ_max_eqe)
517 eq->EQ_max_eqe = count;
519 eq->queue_claimed = 0;
522 /* Always clear and re-arm the EQ */
523 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM);
529 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
530 * @q: The Completion Queue to get the first valid CQE from
532 * This routine will get the first valid Completion Queue Entry from @q, update
533 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
534 * the Queue (no more work to do), or the Queue is full of CQEs that have been
535 * processed, but not popped back to the HBA then this routine will return NULL.
537 static struct lpfc_cqe *
538 lpfc_sli4_cq_get(struct lpfc_queue *q)
540 struct lpfc_cqe *cqe;
542 /* sanity check on queue memory */
545 cqe = lpfc_sli4_qe(q, q->host_index);
547 /* If the next CQE is not valid then we are done */
548 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
552 * insert barrier for instruction interlock : data from the hardware
553 * must have the valid bit checked before it can be copied and acted
554 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
555 * instructions allowing action on content before valid bit checked,
556 * add barrier here as well. May not be needed as "content" is a
557 * single 32-bit entity here (vs multi word structure for cq's).
564 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
565 struct lpfc_cqe *cqe)
567 if (!phba->sli4_hba.pc_sli4_params.cqav)
568 bf_set_le32(lpfc_cqe_valid, cqe, 0);
570 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
572 /* if the index wrapped around, toggle the valid bit */
573 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
574 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
578 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
579 * @phba: the adapter with the CQ
580 * @q: The Completion Queue that the host has completed processing for.
581 * @count: the number of elements that were consumed
582 * @arm: Indicates whether the host wants to arms this CQ.
584 * This routine will notify the HBA, by ringing the doorbell, that the
585 * CQEs have been processed. The @arm parameter specifies whether the
586 * queue should be rearmed when ringing the doorbell.
589 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
590 uint32_t count, bool arm)
592 struct lpfc_register doorbell;
594 /* sanity check on queue memory */
595 if (unlikely(!q || (count == 0 && !arm)))
598 /* ring doorbell for number popped */
601 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
602 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
603 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
604 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
605 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
606 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
607 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
611 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
612 * @phba: the adapter with the CQ
613 * @q: The Completion Queue that the host has completed processing for.
614 * @count: the number of elements that were consumed
615 * @arm: Indicates whether the host wants to arms this CQ.
617 * This routine will notify the HBA, by ringing the doorbell, that the
618 * CQEs have been processed. The @arm parameter specifies whether the
619 * queue should be rearmed when ringing the doorbell.
622 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
623 uint32_t count, bool arm)
625 struct lpfc_register doorbell;
627 /* sanity check on queue memory */
628 if (unlikely(!q || (count == 0 && !arm)))
631 /* ring doorbell for number popped */
634 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
635 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
636 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
637 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
641 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
642 * @q: The Header Receive Queue to operate on.
643 * @wqe: The Receive Queue Entry to put on the Receive queue.
645 * This routine will copy the contents of @wqe to the next available entry on
646 * the @q. This function will then ring the Receive Queue Doorbell to signal the
647 * HBA to start processing the Receive Queue Entry. This function returns the
648 * index that the rqe was copied to if successful. If no entries are available
649 * on @q then this function will return -ENOMEM.
650 * The caller is expected to hold the hbalock when calling this routine.
653 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
654 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
656 struct lpfc_rqe *temp_hrqe;
657 struct lpfc_rqe *temp_drqe;
658 struct lpfc_register doorbell;
662 /* sanity check on queue memory */
663 if (unlikely(!hq) || unlikely(!dq))
665 hq_put_index = hq->host_index;
666 dq_put_index = dq->host_index;
667 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
668 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
670 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
672 if (hq_put_index != dq_put_index)
674 /* If the host has not yet processed the next entry then we are done */
675 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
677 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
678 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
680 /* Update the host index to point to the next slot */
681 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
682 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
685 /* Ring The Header Receive Queue Doorbell */
686 if (!(hq->host_index % hq->notify_interval)) {
688 if (hq->db_format == LPFC_DB_RING_FORMAT) {
689 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
690 hq->notify_interval);
691 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
692 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
693 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
694 hq->notify_interval);
695 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
697 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
701 writel(doorbell.word0, hq->db_regaddr);
707 * lpfc_sli4_rq_release - Updates internal hba index for RQ
708 * @q: The Header Receive Queue to operate on.
710 * This routine will update the HBA index of a queue to reflect consumption of
711 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
712 * consumed an entry the host calls this function to update the queue's
713 * internal pointers. This routine returns the number of entries that were
714 * consumed by the HBA.
717 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
719 /* sanity check on queue memory */
720 if (unlikely(!hq) || unlikely(!dq))
723 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
725 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
726 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
731 * lpfc_cmd_iocb - Get next command iocb entry in the ring
732 * @phba: Pointer to HBA context object.
733 * @pring: Pointer to driver SLI ring object.
735 * This function returns pointer to next command iocb entry
736 * in the command ring. The caller must hold hbalock to prevent
737 * other threads consume the next command iocb.
738 * SLI-2/SLI-3 provide different sized iocbs.
740 static inline IOCB_t *
741 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
743 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
744 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
748 * lpfc_resp_iocb - Get next response iocb entry in the ring
749 * @phba: Pointer to HBA context object.
750 * @pring: Pointer to driver SLI ring object.
752 * This function returns pointer to next response iocb entry
753 * in the response ring. The caller must hold hbalock to make sure
754 * that no other thread consume the next response iocb.
755 * SLI-2/SLI-3 provide different sized iocbs.
757 static inline IOCB_t *
758 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
760 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
761 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
765 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
766 * @phba: Pointer to HBA context object.
768 * This function is called with hbalock held. This function
769 * allocates a new driver iocb object from the iocb pool. If the
770 * allocation is successful, it returns pointer to the newly
771 * allocated iocb object else it returns NULL.
774 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
776 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
777 struct lpfc_iocbq * iocbq = NULL;
779 lockdep_assert_held(&phba->hbalock);
781 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
784 if (phba->iocb_cnt > phba->iocb_max)
785 phba->iocb_max = phba->iocb_cnt;
790 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
791 * @phba: Pointer to HBA context object.
792 * @xritag: XRI value.
794 * This function clears the sglq pointer from the array of acive
795 * sglq's. The xritag that is passed in is used to index into the
796 * array. Before the xritag can be used it needs to be adjusted
797 * by subtracting the xribase.
799 * Returns sglq ponter = success, NULL = Failure.
802 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
804 struct lpfc_sglq *sglq;
806 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
807 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
812 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
813 * @phba: Pointer to HBA context object.
814 * @xritag: XRI value.
816 * This function returns the sglq pointer from the array of acive
817 * sglq's. The xritag that is passed in is used to index into the
818 * array. Before the xritag can be used it needs to be adjusted
819 * by subtracting the xribase.
821 * Returns sglq ponter = success, NULL = Failure.
824 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
826 struct lpfc_sglq *sglq;
828 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
833 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
834 * @phba: Pointer to HBA context object.
835 * @xritag: xri used in this exchange.
836 * @rrq: The RRQ to be cleared.
840 lpfc_clr_rrq_active(struct lpfc_hba *phba,
842 struct lpfc_node_rrq *rrq)
844 struct lpfc_nodelist *ndlp = NULL;
846 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
847 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
849 /* The target DID could have been swapped (cable swap)
850 * we should use the ndlp from the findnode if it is
853 if ((!ndlp) && rrq->ndlp)
859 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
862 rrq->rrq_stop_time = 0;
865 mempool_free(rrq, phba->rrq_pool);
869 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
870 * @phba: Pointer to HBA context object.
872 * This function is called with hbalock held. This function
873 * Checks if stop_time (ratov from setting rrq active) has
874 * been reached, if it has and the send_rrq flag is set then
875 * it will call lpfc_send_rrq. If the send_rrq flag is not set
876 * then it will just call the routine to clear the rrq and
877 * free the rrq resource.
878 * The timer is set to the next rrq that is going to expire before
879 * leaving the routine.
883 lpfc_handle_rrq_active(struct lpfc_hba *phba)
885 struct lpfc_node_rrq *rrq;
886 struct lpfc_node_rrq *nextrrq;
887 unsigned long next_time;
888 unsigned long iflags;
891 spin_lock_irqsave(&phba->hbalock, iflags);
892 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
893 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
894 list_for_each_entry_safe(rrq, nextrrq,
895 &phba->active_rrq_list, list) {
896 if (time_after(jiffies, rrq->rrq_stop_time))
897 list_move(&rrq->list, &send_rrq);
898 else if (time_before(rrq->rrq_stop_time, next_time))
899 next_time = rrq->rrq_stop_time;
901 spin_unlock_irqrestore(&phba->hbalock, iflags);
902 if ((!list_empty(&phba->active_rrq_list)) &&
903 (!(phba->pport->load_flag & FC_UNLOADING)))
904 mod_timer(&phba->rrq_tmr, next_time);
905 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
906 list_del(&rrq->list);
907 if (!rrq->send_rrq) {
908 /* this call will free the rrq */
909 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
910 } else if (lpfc_send_rrq(phba, rrq)) {
911 /* if we send the rrq then the completion handler
912 * will clear the bit in the xribitmap.
914 lpfc_clr_rrq_active(phba, rrq->xritag,
921 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
922 * @vport: Pointer to vport context object.
923 * @xri: The xri used in the exchange.
924 * @did: The targets DID for this exchange.
926 * returns NULL = rrq not found in the phba->active_rrq_list.
927 * rrq = rrq for this xri and target.
929 struct lpfc_node_rrq *
930 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
932 struct lpfc_hba *phba = vport->phba;
933 struct lpfc_node_rrq *rrq;
934 struct lpfc_node_rrq *nextrrq;
935 unsigned long iflags;
937 if (phba->sli_rev != LPFC_SLI_REV4)
939 spin_lock_irqsave(&phba->hbalock, iflags);
940 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
941 if (rrq->vport == vport && rrq->xritag == xri &&
942 rrq->nlp_DID == did){
943 list_del(&rrq->list);
944 spin_unlock_irqrestore(&phba->hbalock, iflags);
948 spin_unlock_irqrestore(&phba->hbalock, iflags);
953 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
954 * @vport: Pointer to vport context object.
955 * @ndlp: Pointer to the lpfc_node_list structure.
956 * If ndlp is NULL Remove all active RRQs for this vport from the
957 * phba->active_rrq_list and clear the rrq.
958 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
961 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
964 struct lpfc_hba *phba = vport->phba;
965 struct lpfc_node_rrq *rrq;
966 struct lpfc_node_rrq *nextrrq;
967 unsigned long iflags;
970 if (phba->sli_rev != LPFC_SLI_REV4)
973 lpfc_sli4_vport_delete_els_xri_aborted(vport);
974 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
976 spin_lock_irqsave(&phba->hbalock, iflags);
977 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
978 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
979 list_move(&rrq->list, &rrq_list);
980 spin_unlock_irqrestore(&phba->hbalock, iflags);
982 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
983 list_del(&rrq->list);
984 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
989 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
990 * @phba: Pointer to HBA context object.
991 * @ndlp: Targets nodelist pointer for this exchange.
992 * @xritag the xri in the bitmap to test.
994 * This function returns:
995 * 0 = rrq not active for this xri
996 * 1 = rrq is valid for this xri.
999 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1004 if (!ndlp->active_rrqs_xri_bitmap)
1006 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1013 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1014 * @phba: Pointer to HBA context object.
1015 * @ndlp: nodelist pointer for this target.
1016 * @xritag: xri used in this exchange.
1017 * @rxid: Remote Exchange ID.
1018 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1020 * This function takes the hbalock.
1021 * The active bit is always set in the active rrq xri_bitmap even
1022 * if there is no slot avaiable for the other rrq information.
1024 * returns 0 rrq actived for this xri
1025 * < 0 No memory or invalid ndlp.
1028 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1029 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1031 unsigned long iflags;
1032 struct lpfc_node_rrq *rrq;
1038 if (!phba->cfg_enable_rrq)
1041 spin_lock_irqsave(&phba->hbalock, iflags);
1042 if (phba->pport->load_flag & FC_UNLOADING) {
1043 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1048 * set the active bit even if there is no mem available.
1050 if (NLP_CHK_FREE_REQ(ndlp))
1053 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1056 if (!ndlp->active_rrqs_xri_bitmap)
1059 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1062 spin_unlock_irqrestore(&phba->hbalock, iflags);
1063 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1065 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1066 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1067 " DID:0x%x Send:%d\n",
1068 xritag, rxid, ndlp->nlp_DID, send_rrq);
1071 if (phba->cfg_enable_rrq == 1)
1072 rrq->send_rrq = send_rrq;
1075 rrq->xritag = xritag;
1076 rrq->rrq_stop_time = jiffies +
1077 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1079 rrq->nlp_DID = ndlp->nlp_DID;
1080 rrq->vport = ndlp->vport;
1082 spin_lock_irqsave(&phba->hbalock, iflags);
1083 empty = list_empty(&phba->active_rrq_list);
1084 list_add_tail(&rrq->list, &phba->active_rrq_list);
1085 phba->hba_flag |= HBA_RRQ_ACTIVE;
1087 lpfc_worker_wake_up(phba);
1088 spin_unlock_irqrestore(&phba->hbalock, iflags);
1091 spin_unlock_irqrestore(&phba->hbalock, iflags);
1092 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1093 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1094 " DID:0x%x Send:%d\n",
1095 xritag, rxid, ndlp->nlp_DID, send_rrq);
1100 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1101 * @phba: Pointer to HBA context object.
1102 * @piocb: Pointer to the iocbq.
1104 * The driver calls this function with either the nvme ls ring lock
1105 * or the fc els ring lock held depending on the iocb usage. This function
1106 * gets a new driver sglq object from the sglq list. If the list is not empty
1107 * then it is successful, it returns pointer to the newly allocated sglq
1108 * object else it returns NULL.
1110 static struct lpfc_sglq *
1111 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1113 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1114 struct lpfc_sglq *sglq = NULL;
1115 struct lpfc_sglq *start_sglq = NULL;
1116 struct lpfc_io_buf *lpfc_cmd;
1117 struct lpfc_nodelist *ndlp;
1118 struct lpfc_sli_ring *pring = NULL;
1121 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1122 pring = phba->sli4_hba.nvmels_wq->pring;
1124 pring = lpfc_phba_elsring(phba);
1126 lockdep_assert_held(&pring->ring_lock);
1128 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1129 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1130 ndlp = lpfc_cmd->rdata->pnode;
1131 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1132 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1133 ndlp = piocbq->context_un.ndlp;
1134 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1135 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1138 ndlp = piocbq->context_un.ndlp;
1140 ndlp = piocbq->context1;
1143 spin_lock(&phba->sli4_hba.sgl_list_lock);
1144 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1149 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1150 test_bit(sglq->sli4_lxritag,
1151 ndlp->active_rrqs_xri_bitmap)) {
1152 /* This xri has an rrq outstanding for this DID.
1153 * put it back in the list and get another xri.
1155 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1157 list_remove_head(lpfc_els_sgl_list, sglq,
1158 struct lpfc_sglq, list);
1159 if (sglq == start_sglq) {
1160 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1168 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1169 sglq->state = SGL_ALLOCATED;
1171 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1176 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1177 * @phba: Pointer to HBA context object.
1178 * @piocb: Pointer to the iocbq.
1180 * This function is called with the sgl_list lock held. This function
1181 * gets a new driver sglq object from the sglq list. If the
1182 * list is not empty then it is successful, it returns pointer to the newly
1183 * allocated sglq object else it returns NULL.
1186 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1188 struct list_head *lpfc_nvmet_sgl_list;
1189 struct lpfc_sglq *sglq = NULL;
1191 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1193 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1195 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1198 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1199 sglq->state = SGL_ALLOCATED;
1204 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1205 * @phba: Pointer to HBA context object.
1207 * This function is called with no lock held. This function
1208 * allocates a new driver iocb object from the iocb pool. If the
1209 * allocation is successful, it returns pointer to the newly
1210 * allocated iocb object else it returns NULL.
1213 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1215 struct lpfc_iocbq * iocbq = NULL;
1216 unsigned long iflags;
1218 spin_lock_irqsave(&phba->hbalock, iflags);
1219 iocbq = __lpfc_sli_get_iocbq(phba);
1220 spin_unlock_irqrestore(&phba->hbalock, iflags);
1225 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1226 * @phba: Pointer to HBA context object.
1227 * @iocbq: Pointer to driver iocb object.
1229 * This function is called with hbalock held to release driver
1230 * iocb object to the iocb pool. The iotag in the iocb object
1231 * does not change for each use of the iocb object. This function
1232 * clears all other fields of the iocb object when it is freed.
1233 * The sqlq structure that holds the xritag and phys and virtual
1234 * mappings for the scatter gather list is retrieved from the
1235 * active array of sglq. The get of the sglq pointer also clears
1236 * the entry in the array. If the status of the IO indiactes that
1237 * this IO was aborted then the sglq entry it put on the
1238 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1239 * IO has good status or fails for any other reason then the sglq
1240 * entry is added to the free list (lpfc_els_sgl_list).
1243 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1245 struct lpfc_sglq *sglq;
1246 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1247 unsigned long iflag = 0;
1248 struct lpfc_sli_ring *pring;
1250 lockdep_assert_held(&phba->hbalock);
1252 if (iocbq->sli4_xritag == NO_XRI)
1255 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1259 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1260 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1262 sglq->state = SGL_FREED;
1264 list_add_tail(&sglq->list,
1265 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1266 spin_unlock_irqrestore(
1267 &phba->sli4_hba.sgl_list_lock, iflag);
1271 pring = phba->sli4_hba.els_wq->pring;
1272 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1273 (sglq->state != SGL_XRI_ABORTED)) {
1274 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1276 list_add(&sglq->list,
1277 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1278 spin_unlock_irqrestore(
1279 &phba->sli4_hba.sgl_list_lock, iflag);
1281 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1283 sglq->state = SGL_FREED;
1285 list_add_tail(&sglq->list,
1286 &phba->sli4_hba.lpfc_els_sgl_list);
1287 spin_unlock_irqrestore(
1288 &phba->sli4_hba.sgl_list_lock, iflag);
1290 /* Check if TXQ queue needs to be serviced */
1291 if (!list_empty(&pring->txq))
1292 lpfc_worker_wake_up(phba);
1298 * Clean all volatile data fields, preserve iotag and node struct.
1300 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1301 iocbq->sli4_lxritag = NO_XRI;
1302 iocbq->sli4_xritag = NO_XRI;
1303 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1305 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1310 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1311 * @phba: Pointer to HBA context object.
1312 * @iocbq: Pointer to driver iocb object.
1314 * This function is called with hbalock held to release driver
1315 * iocb object to the iocb pool. The iotag in the iocb object
1316 * does not change for each use of the iocb object. This function
1317 * clears all other fields of the iocb object when it is freed.
1320 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1322 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1324 lockdep_assert_held(&phba->hbalock);
1327 * Clean all volatile data fields, preserve iotag and node struct.
1329 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1330 iocbq->sli4_xritag = NO_XRI;
1331 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1335 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1336 * @phba: Pointer to HBA context object.
1337 * @iocbq: Pointer to driver iocb object.
1339 * This function is called with hbalock held to release driver
1340 * iocb object to the iocb pool. The iotag in the iocb object
1341 * does not change for each use of the iocb object. This function
1342 * clears all other fields of the iocb object when it is freed.
1345 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1347 lockdep_assert_held(&phba->hbalock);
1349 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1354 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1355 * @phba: Pointer to HBA context object.
1356 * @iocbq: Pointer to driver iocb object.
1358 * This function is called with no lock held to release the iocb to
1362 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1364 unsigned long iflags;
1367 * Clean all volatile data fields, preserve iotag and node struct.
1369 spin_lock_irqsave(&phba->hbalock, iflags);
1370 __lpfc_sli_release_iocbq(phba, iocbq);
1371 spin_unlock_irqrestore(&phba->hbalock, iflags);
1375 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1376 * @phba: Pointer to HBA context object.
1377 * @iocblist: List of IOCBs.
1378 * @ulpstatus: ULP status in IOCB command field.
1379 * @ulpWord4: ULP word-4 in IOCB command field.
1381 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1382 * on the list by invoking the complete callback function associated with the
1383 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1387 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1388 uint32_t ulpstatus, uint32_t ulpWord4)
1390 struct lpfc_iocbq *piocb;
1392 while (!list_empty(iocblist)) {
1393 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1394 if (!piocb->iocb_cmpl)
1395 lpfc_sli_release_iocbq(phba, piocb);
1397 piocb->iocb.ulpStatus = ulpstatus;
1398 piocb->iocb.un.ulpWord[4] = ulpWord4;
1399 (piocb->iocb_cmpl) (phba, piocb, piocb);
1406 * lpfc_sli_iocb_cmd_type - Get the iocb type
1407 * @iocb_cmnd: iocb command code.
1409 * This function is called by ring event handler function to get the iocb type.
1410 * This function translates the iocb command to an iocb command type used to
1411 * decide the final disposition of each completed IOCB.
1412 * The function returns
1413 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1414 * LPFC_SOL_IOCB if it is a solicited iocb completion
1415 * LPFC_ABORT_IOCB if it is an abort iocb
1416 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1418 * The caller is not required to hold any lock.
1420 static lpfc_iocb_type
1421 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1423 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1425 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1428 switch (iocb_cmnd) {
1429 case CMD_XMIT_SEQUENCE_CR:
1430 case CMD_XMIT_SEQUENCE_CX:
1431 case CMD_XMIT_BCAST_CN:
1432 case CMD_XMIT_BCAST_CX:
1433 case CMD_ELS_REQUEST_CR:
1434 case CMD_ELS_REQUEST_CX:
1435 case CMD_CREATE_XRI_CR:
1436 case CMD_CREATE_XRI_CX:
1437 case CMD_GET_RPI_CN:
1438 case CMD_XMIT_ELS_RSP_CX:
1439 case CMD_GET_RPI_CR:
1440 case CMD_FCP_IWRITE_CR:
1441 case CMD_FCP_IWRITE_CX:
1442 case CMD_FCP_IREAD_CR:
1443 case CMD_FCP_IREAD_CX:
1444 case CMD_FCP_ICMND_CR:
1445 case CMD_FCP_ICMND_CX:
1446 case CMD_FCP_TSEND_CX:
1447 case CMD_FCP_TRSP_CX:
1448 case CMD_FCP_TRECEIVE_CX:
1449 case CMD_FCP_AUTO_TRSP_CX:
1450 case CMD_ADAPTER_MSG:
1451 case CMD_ADAPTER_DUMP:
1452 case CMD_XMIT_SEQUENCE64_CR:
1453 case CMD_XMIT_SEQUENCE64_CX:
1454 case CMD_XMIT_BCAST64_CN:
1455 case CMD_XMIT_BCAST64_CX:
1456 case CMD_ELS_REQUEST64_CR:
1457 case CMD_ELS_REQUEST64_CX:
1458 case CMD_FCP_IWRITE64_CR:
1459 case CMD_FCP_IWRITE64_CX:
1460 case CMD_FCP_IREAD64_CR:
1461 case CMD_FCP_IREAD64_CX:
1462 case CMD_FCP_ICMND64_CR:
1463 case CMD_FCP_ICMND64_CX:
1464 case CMD_FCP_TSEND64_CX:
1465 case CMD_FCP_TRSP64_CX:
1466 case CMD_FCP_TRECEIVE64_CX:
1467 case CMD_GEN_REQUEST64_CR:
1468 case CMD_GEN_REQUEST64_CX:
1469 case CMD_XMIT_ELS_RSP64_CX:
1470 case DSSCMD_IWRITE64_CR:
1471 case DSSCMD_IWRITE64_CX:
1472 case DSSCMD_IREAD64_CR:
1473 case DSSCMD_IREAD64_CX:
1474 type = LPFC_SOL_IOCB;
1476 case CMD_ABORT_XRI_CN:
1477 case CMD_ABORT_XRI_CX:
1478 case CMD_CLOSE_XRI_CN:
1479 case CMD_CLOSE_XRI_CX:
1480 case CMD_XRI_ABORTED_CX:
1481 case CMD_ABORT_MXRI64_CN:
1482 case CMD_XMIT_BLS_RSP64_CX:
1483 type = LPFC_ABORT_IOCB;
1485 case CMD_RCV_SEQUENCE_CX:
1486 case CMD_RCV_ELS_REQ_CX:
1487 case CMD_RCV_SEQUENCE64_CX:
1488 case CMD_RCV_ELS_REQ64_CX:
1489 case CMD_ASYNC_STATUS:
1490 case CMD_IOCB_RCV_SEQ64_CX:
1491 case CMD_IOCB_RCV_ELS64_CX:
1492 case CMD_IOCB_RCV_CONT64_CX:
1493 case CMD_IOCB_RET_XRI64_CX:
1494 type = LPFC_UNSOL_IOCB;
1496 case CMD_IOCB_XMIT_MSEQ64_CR:
1497 case CMD_IOCB_XMIT_MSEQ64_CX:
1498 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1499 case CMD_IOCB_RCV_ELS_LIST64_CX:
1500 case CMD_IOCB_CLOSE_EXTENDED_CN:
1501 case CMD_IOCB_ABORT_EXTENDED_CN:
1502 case CMD_IOCB_RET_HBQE64_CN:
1503 case CMD_IOCB_FCP_IBIDIR64_CR:
1504 case CMD_IOCB_FCP_IBIDIR64_CX:
1505 case CMD_IOCB_FCP_ITASKMGT64_CX:
1506 case CMD_IOCB_LOGENTRY_CN:
1507 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1508 printk("%s - Unhandled SLI-3 Command x%x\n",
1509 __func__, iocb_cmnd);
1510 type = LPFC_UNKNOWN_IOCB;
1513 type = LPFC_UNKNOWN_IOCB;
1521 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1522 * @phba: Pointer to HBA context object.
1524 * This function is called from SLI initialization code
1525 * to configure every ring of the HBA's SLI interface. The
1526 * caller is not required to hold any lock. This function issues
1527 * a config_ring mailbox command for each ring.
1528 * This function returns zero if successful else returns a negative
1532 lpfc_sli_ring_map(struct lpfc_hba *phba)
1534 struct lpfc_sli *psli = &phba->sli;
1539 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1543 phba->link_state = LPFC_INIT_MBX_CMDS;
1544 for (i = 0; i < psli->num_rings; i++) {
1545 lpfc_config_ring(phba, i, pmb);
1546 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1547 if (rc != MBX_SUCCESS) {
1548 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1549 "0446 Adapter failed to init (%d), "
1550 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1552 rc, pmbox->mbxCommand,
1553 pmbox->mbxStatus, i);
1554 phba->link_state = LPFC_HBA_ERROR;
1559 mempool_free(pmb, phba->mbox_mem_pool);
1564 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1565 * @phba: Pointer to HBA context object.
1566 * @pring: Pointer to driver SLI ring object.
1567 * @piocb: Pointer to the driver iocb object.
1569 * The driver calls this function with the hbalock held for SLI3 ports or
1570 * the ring lock held for SLI4 ports. The function adds the
1571 * new iocb to txcmplq of the given ring. This function always returns
1572 * 0. If this function is called for ELS ring, this function checks if
1573 * there is a vport associated with the ELS command. This function also
1574 * starts els_tmofunc timer if this is an ELS command.
1577 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1578 struct lpfc_iocbq *piocb)
1580 if (phba->sli_rev == LPFC_SLI_REV4)
1581 lockdep_assert_held(&pring->ring_lock);
1583 lockdep_assert_held(&phba->hbalock);
1587 list_add_tail(&piocb->list, &pring->txcmplq);
1588 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1589 pring->txcmplq_cnt++;
1591 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1592 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1593 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1594 BUG_ON(!piocb->vport);
1595 if (!(piocb->vport->load_flag & FC_UNLOADING))
1596 mod_timer(&piocb->vport->els_tmofunc,
1598 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1605 * lpfc_sli_ringtx_get - Get first element of the txq
1606 * @phba: Pointer to HBA context object.
1607 * @pring: Pointer to driver SLI ring object.
1609 * This function is called with hbalock held to get next
1610 * iocb in txq of the given ring. If there is any iocb in
1611 * the txq, the function returns first iocb in the list after
1612 * removing the iocb from the list, else it returns NULL.
1615 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1617 struct lpfc_iocbq *cmd_iocb;
1619 lockdep_assert_held(&phba->hbalock);
1621 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1626 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1627 * @phba: Pointer to HBA context object.
1628 * @pring: Pointer to driver SLI ring object.
1630 * This function is called with hbalock held and the caller must post the
1631 * iocb without releasing the lock. If the caller releases the lock,
1632 * iocb slot returned by the function is not guaranteed to be available.
1633 * The function returns pointer to the next available iocb slot if there
1634 * is available slot in the ring, else it returns NULL.
1635 * If the get index of the ring is ahead of the put index, the function
1636 * will post an error attention event to the worker thread to take the
1637 * HBA to offline state.
1640 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1642 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1643 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1645 lockdep_assert_held(&phba->hbalock);
1647 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1648 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1649 pring->sli.sli3.next_cmdidx = 0;
1651 if (unlikely(pring->sli.sli3.local_getidx ==
1652 pring->sli.sli3.next_cmdidx)) {
1654 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1656 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1657 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1658 "0315 Ring %d issue: portCmdGet %d "
1659 "is bigger than cmd ring %d\n",
1661 pring->sli.sli3.local_getidx,
1664 phba->link_state = LPFC_HBA_ERROR;
1666 * All error attention handlers are posted to
1669 phba->work_ha |= HA_ERATT;
1670 phba->work_hs = HS_FFER3;
1672 lpfc_worker_wake_up(phba);
1677 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1681 return lpfc_cmd_iocb(phba, pring);
1685 * lpfc_sli_next_iotag - Get an iotag for the iocb
1686 * @phba: Pointer to HBA context object.
1687 * @iocbq: Pointer to driver iocb object.
1689 * This function gets an iotag for the iocb. If there is no unused iotag and
1690 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1691 * array and assigns a new iotag.
1692 * The function returns the allocated iotag if successful, else returns zero.
1693 * Zero is not a valid iotag.
1694 * The caller is not required to hold any lock.
1697 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1699 struct lpfc_iocbq **new_arr;
1700 struct lpfc_iocbq **old_arr;
1702 struct lpfc_sli *psli = &phba->sli;
1705 spin_lock_irq(&phba->hbalock);
1706 iotag = psli->last_iotag;
1707 if(++iotag < psli->iocbq_lookup_len) {
1708 psli->last_iotag = iotag;
1709 psli->iocbq_lookup[iotag] = iocbq;
1710 spin_unlock_irq(&phba->hbalock);
1711 iocbq->iotag = iotag;
1713 } else if (psli->iocbq_lookup_len < (0xffff
1714 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1715 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1716 spin_unlock_irq(&phba->hbalock);
1717 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1720 spin_lock_irq(&phba->hbalock);
1721 old_arr = psli->iocbq_lookup;
1722 if (new_len <= psli->iocbq_lookup_len) {
1723 /* highly unprobable case */
1725 iotag = psli->last_iotag;
1726 if(++iotag < psli->iocbq_lookup_len) {
1727 psli->last_iotag = iotag;
1728 psli->iocbq_lookup[iotag] = iocbq;
1729 spin_unlock_irq(&phba->hbalock);
1730 iocbq->iotag = iotag;
1733 spin_unlock_irq(&phba->hbalock);
1736 if (psli->iocbq_lookup)
1737 memcpy(new_arr, old_arr,
1738 ((psli->last_iotag + 1) *
1739 sizeof (struct lpfc_iocbq *)));
1740 psli->iocbq_lookup = new_arr;
1741 psli->iocbq_lookup_len = new_len;
1742 psli->last_iotag = iotag;
1743 psli->iocbq_lookup[iotag] = iocbq;
1744 spin_unlock_irq(&phba->hbalock);
1745 iocbq->iotag = iotag;
1750 spin_unlock_irq(&phba->hbalock);
1752 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1753 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1760 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1761 * @phba: Pointer to HBA context object.
1762 * @pring: Pointer to driver SLI ring object.
1763 * @iocb: Pointer to iocb slot in the ring.
1764 * @nextiocb: Pointer to driver iocb object which need to be
1765 * posted to firmware.
1767 * This function is called with hbalock held to post a new iocb to
1768 * the firmware. This function copies the new iocb to ring iocb slot and
1769 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1770 * a completion call back for this iocb else the function will free the
1774 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1775 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1777 lockdep_assert_held(&phba->hbalock);
1781 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1784 if (pring->ringno == LPFC_ELS_RING) {
1785 lpfc_debugfs_slow_ring_trc(phba,
1786 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1787 *(((uint32_t *) &nextiocb->iocb) + 4),
1788 *(((uint32_t *) &nextiocb->iocb) + 6),
1789 *(((uint32_t *) &nextiocb->iocb) + 7));
1793 * Issue iocb command to adapter
1795 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1797 pring->stats.iocb_cmd++;
1800 * If there is no completion routine to call, we can release the
1801 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1802 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1804 if (nextiocb->iocb_cmpl)
1805 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1807 __lpfc_sli_release_iocbq(phba, nextiocb);
1810 * Let the HBA know what IOCB slot will be the next one the
1811 * driver will put a command into.
1813 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1814 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1818 * lpfc_sli_update_full_ring - Update the chip attention register
1819 * @phba: Pointer to HBA context object.
1820 * @pring: Pointer to driver SLI ring object.
1822 * The caller is not required to hold any lock for calling this function.
1823 * This function updates the chip attention bits for the ring to inform firmware
1824 * that there are pending work to be done for this ring and requests an
1825 * interrupt when there is space available in the ring. This function is
1826 * called when the driver is unable to post more iocbs to the ring due
1827 * to unavailability of space in the ring.
1830 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1832 int ringno = pring->ringno;
1834 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1839 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1840 * The HBA will tell us when an IOCB entry is available.
1842 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1843 readl(phba->CAregaddr); /* flush */
1845 pring->stats.iocb_cmd_full++;
1849 * lpfc_sli_update_ring - Update chip attention register
1850 * @phba: Pointer to HBA context object.
1851 * @pring: Pointer to driver SLI ring object.
1853 * This function updates the chip attention register bit for the
1854 * given ring to inform HBA that there is more work to be done
1855 * in this ring. The caller is not required to hold any lock.
1858 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1860 int ringno = pring->ringno;
1863 * Tell the HBA that there is work to do in this ring.
1865 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1867 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1868 readl(phba->CAregaddr); /* flush */
1873 * lpfc_sli_resume_iocb - Process iocbs in the txq
1874 * @phba: Pointer to HBA context object.
1875 * @pring: Pointer to driver SLI ring object.
1877 * This function is called with hbalock held to post pending iocbs
1878 * in the txq to the firmware. This function is called when driver
1879 * detects space available in the ring.
1882 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1885 struct lpfc_iocbq *nextiocb;
1887 lockdep_assert_held(&phba->hbalock);
1891 * (a) there is anything on the txq to send
1893 * (c) link attention events can be processed (fcp ring only)
1894 * (d) IOCB processing is not blocked by the outstanding mbox command.
1897 if (lpfc_is_link_up(phba) &&
1898 (!list_empty(&pring->txq)) &&
1899 (pring->ringno != LPFC_FCP_RING ||
1900 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1902 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1903 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1904 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1907 lpfc_sli_update_ring(phba, pring);
1909 lpfc_sli_update_full_ring(phba, pring);
1916 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1917 * @phba: Pointer to HBA context object.
1918 * @hbqno: HBQ number.
1920 * This function is called with hbalock held to get the next
1921 * available slot for the given HBQ. If there is free slot
1922 * available for the HBQ it will return pointer to the next available
1923 * HBQ entry else it will return NULL.
1925 static struct lpfc_hbq_entry *
1926 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1928 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1930 lockdep_assert_held(&phba->hbalock);
1932 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1933 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1934 hbqp->next_hbqPutIdx = 0;
1936 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1937 uint32_t raw_index = phba->hbq_get[hbqno];
1938 uint32_t getidx = le32_to_cpu(raw_index);
1940 hbqp->local_hbqGetIdx = getidx;
1942 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1943 lpfc_printf_log(phba, KERN_ERR,
1944 LOG_SLI | LOG_VPORT,
1945 "1802 HBQ %d: local_hbqGetIdx "
1946 "%u is > than hbqp->entry_count %u\n",
1947 hbqno, hbqp->local_hbqGetIdx,
1950 phba->link_state = LPFC_HBA_ERROR;
1954 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1958 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1963 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1964 * @phba: Pointer to HBA context object.
1966 * This function is called with no lock held to free all the
1967 * hbq buffers while uninitializing the SLI interface. It also
1968 * frees the HBQ buffers returned by the firmware but not yet
1969 * processed by the upper layers.
1972 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1974 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1975 struct hbq_dmabuf *hbq_buf;
1976 unsigned long flags;
1979 hbq_count = lpfc_sli_hbq_count();
1980 /* Return all memory used by all HBQs */
1981 spin_lock_irqsave(&phba->hbalock, flags);
1982 for (i = 0; i < hbq_count; ++i) {
1983 list_for_each_entry_safe(dmabuf, next_dmabuf,
1984 &phba->hbqs[i].hbq_buffer_list, list) {
1985 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1986 list_del(&hbq_buf->dbuf.list);
1987 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1989 phba->hbqs[i].buffer_count = 0;
1992 /* Mark the HBQs not in use */
1993 phba->hbq_in_use = 0;
1994 spin_unlock_irqrestore(&phba->hbalock, flags);
1998 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1999 * @phba: Pointer to HBA context object.
2000 * @hbqno: HBQ number.
2001 * @hbq_buf: Pointer to HBQ buffer.
2003 * This function is called with the hbalock held to post a
2004 * hbq buffer to the firmware. If the function finds an empty
2005 * slot in the HBQ, it will post the buffer. The function will return
2006 * pointer to the hbq entry if it successfully post the buffer
2007 * else it will return NULL.
2010 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2011 struct hbq_dmabuf *hbq_buf)
2013 lockdep_assert_held(&phba->hbalock);
2014 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2018 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2019 * @phba: Pointer to HBA context object.
2020 * @hbqno: HBQ number.
2021 * @hbq_buf: Pointer to HBQ buffer.
2023 * This function is called with the hbalock held to post a hbq buffer to the
2024 * firmware. If the function finds an empty slot in the HBQ, it will post the
2025 * buffer and place it on the hbq_buffer_list. The function will return zero if
2026 * it successfully post the buffer else it will return an error.
2029 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2030 struct hbq_dmabuf *hbq_buf)
2032 struct lpfc_hbq_entry *hbqe;
2033 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2035 lockdep_assert_held(&phba->hbalock);
2036 /* Get next HBQ entry slot to use */
2037 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2039 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2041 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2042 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2043 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2044 hbqe->bde.tus.f.bdeFlags = 0;
2045 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2046 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2048 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2049 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2051 readl(phba->hbq_put + hbqno);
2052 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2059 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2060 * @phba: Pointer to HBA context object.
2061 * @hbqno: HBQ number.
2062 * @hbq_buf: Pointer to HBQ buffer.
2064 * This function is called with the hbalock held to post an RQE to the SLI4
2065 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2066 * the hbq_buffer_list and return zero, otherwise it will return an error.
2069 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2070 struct hbq_dmabuf *hbq_buf)
2073 struct lpfc_rqe hrqe;
2074 struct lpfc_rqe drqe;
2075 struct lpfc_queue *hrq;
2076 struct lpfc_queue *drq;
2078 if (hbqno != LPFC_ELS_HBQ)
2080 hrq = phba->sli4_hba.hdr_rq;
2081 drq = phba->sli4_hba.dat_rq;
2083 lockdep_assert_held(&phba->hbalock);
2084 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2085 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2086 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2087 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2088 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2091 hbq_buf->tag = (rc | (hbqno << 16));
2092 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2096 /* HBQ for ELS and CT traffic. */
2097 static struct lpfc_hbq_init lpfc_els_hbq = {
2102 .ring_mask = (1 << LPFC_ELS_RING),
2109 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2114 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2115 * @phba: Pointer to HBA context object.
2116 * @hbqno: HBQ number.
2117 * @count: Number of HBQ buffers to be posted.
2119 * This function is called with no lock held to post more hbq buffers to the
2120 * given HBQ. The function returns the number of HBQ buffers successfully
2124 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2126 uint32_t i, posted = 0;
2127 unsigned long flags;
2128 struct hbq_dmabuf *hbq_buffer;
2129 LIST_HEAD(hbq_buf_list);
2130 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2133 if ((phba->hbqs[hbqno].buffer_count + count) >
2134 lpfc_hbq_defs[hbqno]->entry_count)
2135 count = lpfc_hbq_defs[hbqno]->entry_count -
2136 phba->hbqs[hbqno].buffer_count;
2139 /* Allocate HBQ entries */
2140 for (i = 0; i < count; i++) {
2141 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2144 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2146 /* Check whether HBQ is still in use */
2147 spin_lock_irqsave(&phba->hbalock, flags);
2148 if (!phba->hbq_in_use)
2150 while (!list_empty(&hbq_buf_list)) {
2151 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2153 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2155 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2156 phba->hbqs[hbqno].buffer_count++;
2159 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2161 spin_unlock_irqrestore(&phba->hbalock, flags);
2164 spin_unlock_irqrestore(&phba->hbalock, flags);
2165 while (!list_empty(&hbq_buf_list)) {
2166 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2168 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2174 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2175 * @phba: Pointer to HBA context object.
2178 * This function posts more buffers to the HBQ. This function
2179 * is called with no lock held. The function returns the number of HBQ entries
2180 * successfully allocated.
2183 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2185 if (phba->sli_rev == LPFC_SLI_REV4)
2188 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2189 lpfc_hbq_defs[qno]->add_count);
2193 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2194 * @phba: Pointer to HBA context object.
2195 * @qno: HBQ queue number.
2197 * This function is called from SLI initialization code path with
2198 * no lock held to post initial HBQ buffers to firmware. The
2199 * function returns the number of HBQ entries successfully allocated.
2202 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2204 if (phba->sli_rev == LPFC_SLI_REV4)
2205 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2206 lpfc_hbq_defs[qno]->entry_count);
2208 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2209 lpfc_hbq_defs[qno]->init_count);
2213 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2214 * @phba: Pointer to HBA context object.
2215 * @hbqno: HBQ number.
2217 * This function removes the first hbq buffer on an hbq list and returns a
2218 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2220 static struct hbq_dmabuf *
2221 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2223 struct lpfc_dmabuf *d_buf;
2225 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2228 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2232 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2233 * @phba: Pointer to HBA context object.
2234 * @hbqno: HBQ number.
2236 * This function removes the first RQ buffer on an RQ buffer list and returns a
2237 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2239 static struct rqb_dmabuf *
2240 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2242 struct lpfc_dmabuf *h_buf;
2243 struct lpfc_rqb *rqbp;
2246 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2247 struct lpfc_dmabuf, list);
2250 rqbp->buffer_count--;
2251 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2255 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2256 * @phba: Pointer to HBA context object.
2257 * @tag: Tag of the hbq buffer.
2259 * This function searches for the hbq buffer associated with the given tag in
2260 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2261 * otherwise it returns NULL.
2263 static struct hbq_dmabuf *
2264 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2266 struct lpfc_dmabuf *d_buf;
2267 struct hbq_dmabuf *hbq_buf;
2271 if (hbqno >= LPFC_MAX_HBQS)
2274 spin_lock_irq(&phba->hbalock);
2275 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2276 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2277 if (hbq_buf->tag == tag) {
2278 spin_unlock_irq(&phba->hbalock);
2282 spin_unlock_irq(&phba->hbalock);
2283 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2284 "1803 Bad hbq tag. Data: x%x x%x\n",
2285 tag, phba->hbqs[tag >> 16].buffer_count);
2290 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2291 * @phba: Pointer to HBA context object.
2292 * @hbq_buffer: Pointer to HBQ buffer.
2294 * This function is called with hbalock. This function gives back
2295 * the hbq buffer to firmware. If the HBQ does not have space to
2296 * post the buffer, it will free the buffer.
2299 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2304 hbqno = hbq_buffer->tag >> 16;
2305 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2306 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2311 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2312 * @mbxCommand: mailbox command code.
2314 * This function is called by the mailbox event handler function to verify
2315 * that the completed mailbox command is a legitimate mailbox command. If the
2316 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2317 * and the mailbox event handler will take the HBA offline.
2320 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2324 switch (mbxCommand) {
2328 case MBX_WRITE_VPARMS:
2329 case MBX_RUN_BIU_DIAG:
2332 case MBX_CONFIG_LINK:
2333 case MBX_CONFIG_RING:
2334 case MBX_RESET_RING:
2335 case MBX_READ_CONFIG:
2336 case MBX_READ_RCONFIG:
2337 case MBX_READ_SPARM:
2338 case MBX_READ_STATUS:
2342 case MBX_READ_LNK_STAT:
2344 case MBX_UNREG_LOGIN:
2346 case MBX_DUMP_MEMORY:
2347 case MBX_DUMP_CONTEXT:
2350 case MBX_UPDATE_CFG:
2352 case MBX_DEL_LD_ENTRY:
2353 case MBX_RUN_PROGRAM:
2355 case MBX_SET_VARIABLE:
2356 case MBX_UNREG_D_ID:
2357 case MBX_KILL_BOARD:
2358 case MBX_CONFIG_FARP:
2361 case MBX_RUN_BIU_DIAG64:
2362 case MBX_CONFIG_PORT:
2363 case MBX_READ_SPARM64:
2364 case MBX_READ_RPI64:
2365 case MBX_REG_LOGIN64:
2366 case MBX_READ_TOPOLOGY:
2369 case MBX_LOAD_EXP_ROM:
2370 case MBX_ASYNCEVT_ENABLE:
2374 case MBX_PORT_CAPABILITIES:
2375 case MBX_PORT_IOV_CONTROL:
2376 case MBX_SLI4_CONFIG:
2377 case MBX_SLI4_REQ_FTRS:
2379 case MBX_UNREG_FCFI:
2384 case MBX_RESUME_RPI:
2385 case MBX_READ_EVENT_LOG_STATUS:
2386 case MBX_READ_EVENT_LOG:
2387 case MBX_SECURITY_MGMT:
2389 case MBX_ACCESS_VDATA:
2400 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2401 * @phba: Pointer to HBA context object.
2402 * @pmboxq: Pointer to mailbox command.
2404 * This is completion handler function for mailbox commands issued from
2405 * lpfc_sli_issue_mbox_wait function. This function is called by the
2406 * mailbox event handler function with no lock held. This function
2407 * will wake up thread waiting on the wait queue pointed by context1
2411 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2413 unsigned long drvr_flag;
2414 struct completion *pmbox_done;
2417 * If pmbox_done is empty, the driver thread gave up waiting and
2418 * continued running.
2420 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2421 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2422 pmbox_done = (struct completion *)pmboxq->context3;
2424 complete(pmbox_done);
2425 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2431 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2432 * @phba: Pointer to HBA context object.
2433 * @pmb: Pointer to mailbox object.
2435 * This function is the default mailbox completion handler. It
2436 * frees the memory resources associated with the completed mailbox
2437 * command. If the completed command is a REG_LOGIN mailbox command,
2438 * this function will issue a UREG_LOGIN to re-claim the RPI.
2441 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2443 struct lpfc_vport *vport = pmb->vport;
2444 struct lpfc_dmabuf *mp;
2445 struct lpfc_nodelist *ndlp;
2446 struct Scsi_Host *shost;
2450 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2453 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2458 * If a REG_LOGIN succeeded after node is destroyed or node
2459 * is in re-discovery driver need to cleanup the RPI.
2461 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2462 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2463 !pmb->u.mb.mbxStatus) {
2464 rpi = pmb->u.mb.un.varWords[0];
2465 vpi = pmb->u.mb.un.varRegLogin.vpi;
2466 lpfc_unreg_login(phba, vpi, rpi, pmb);
2468 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2469 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2470 if (rc != MBX_NOT_FINISHED)
2474 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2475 !(phba->pport->load_flag & FC_UNLOADING) &&
2476 !pmb->u.mb.mbxStatus) {
2477 shost = lpfc_shost_from_vport(vport);
2478 spin_lock_irq(shost->host_lock);
2479 vport->vpi_state |= LPFC_VPI_REGISTERED;
2480 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2481 spin_unlock_irq(shost->host_lock);
2484 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2485 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2487 pmb->ctx_buf = NULL;
2488 pmb->ctx_ndlp = NULL;
2491 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2492 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2494 /* Check to see if there are any deferred events to process */
2498 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2499 "1438 UNREG cmpl deferred mbox x%x "
2500 "on NPort x%x Data: x%x x%x %p\n",
2501 ndlp->nlp_rpi, ndlp->nlp_DID,
2502 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2504 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2505 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2506 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2507 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2508 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2510 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2511 lpfc_sli4_free_rpi(vport->phba,
2513 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2515 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2517 pmb->ctx_ndlp = NULL;
2521 /* Check security permission status on INIT_LINK mailbox command */
2522 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2523 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2524 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2525 "2860 SLI authentication is required "
2526 "for INIT_LINK but has not done yet\n");
2528 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2529 lpfc_sli4_mbox_cmd_free(phba, pmb);
2531 mempool_free(pmb, phba->mbox_mem_pool);
2534 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2535 * @phba: Pointer to HBA context object.
2536 * @pmb: Pointer to mailbox object.
2538 * This function is the unreg rpi mailbox completion handler. It
2539 * frees the memory resources associated with the completed mailbox
2540 * command. An additional refrenece is put on the ndlp to prevent
2541 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2542 * the unreg mailbox command completes, this routine puts the
2547 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2549 struct lpfc_vport *vport = pmb->vport;
2550 struct lpfc_nodelist *ndlp;
2552 ndlp = pmb->ctx_ndlp;
2553 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2554 if (phba->sli_rev == LPFC_SLI_REV4 &&
2555 (bf_get(lpfc_sli_intf_if_type,
2556 &phba->sli4_hba.sli_intf) >=
2557 LPFC_SLI_INTF_IF_TYPE_2)) {
2560 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2561 "0010 UNREG_LOGIN vpi:%x "
2562 "rpi:%x DID:%x defer x%x flg x%x "
2564 vport->vpi, ndlp->nlp_rpi,
2565 ndlp->nlp_DID, ndlp->nlp_defer_did,
2567 ndlp->nlp_usg_map, ndlp);
2568 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2571 /* Check to see if there are any deferred
2574 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2575 (ndlp->nlp_defer_did !=
2576 NLP_EVT_NOTHING_PENDING)) {
2578 vport, KERN_INFO, LOG_DISCOVERY,
2579 "4111 UNREG cmpl deferred "
2581 "NPort x%x Data: x%x %p\n",
2582 ndlp->nlp_rpi, ndlp->nlp_DID,
2583 ndlp->nlp_defer_did, ndlp);
2584 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2585 ndlp->nlp_defer_did =
2586 NLP_EVT_NOTHING_PENDING;
2587 lpfc_issue_els_plogi(
2588 vport, ndlp->nlp_DID, 0);
2590 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2597 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2603 mempool_free(pmb, phba->mbox_mem_pool);
2607 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2608 * @phba: Pointer to HBA context object.
2610 * This function is called with no lock held. This function processes all
2611 * the completed mailbox commands and gives it to upper layers. The interrupt
2612 * service routine processes mailbox completion interrupt and adds completed
2613 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2614 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2615 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2616 * function returns the mailbox commands to the upper layer by calling the
2617 * completion handler function of each mailbox.
2620 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2627 phba->sli.slistat.mbox_event++;
2629 /* Get all completed mailboxe buffers into the cmplq */
2630 spin_lock_irq(&phba->hbalock);
2631 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2632 spin_unlock_irq(&phba->hbalock);
2634 /* Get a Mailbox buffer to setup mailbox commands for callback */
2636 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2642 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2644 lpfc_debugfs_disc_trc(pmb->vport,
2645 LPFC_DISC_TRC_MBOX_VPORT,
2646 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2647 (uint32_t)pmbox->mbxCommand,
2648 pmbox->un.varWords[0],
2649 pmbox->un.varWords[1]);
2652 lpfc_debugfs_disc_trc(phba->pport,
2654 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2655 (uint32_t)pmbox->mbxCommand,
2656 pmbox->un.varWords[0],
2657 pmbox->un.varWords[1]);
2662 * It is a fatal error if unknown mbox command completion.
2664 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2666 /* Unknown mailbox command compl */
2667 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2668 "(%d):0323 Unknown Mailbox command "
2669 "x%x (x%x/x%x) Cmpl\n",
2670 pmb->vport ? pmb->vport->vpi : 0,
2672 lpfc_sli_config_mbox_subsys_get(phba,
2674 lpfc_sli_config_mbox_opcode_get(phba,
2676 phba->link_state = LPFC_HBA_ERROR;
2677 phba->work_hs = HS_FFER3;
2678 lpfc_handle_eratt(phba);
2682 if (pmbox->mbxStatus) {
2683 phba->sli.slistat.mbox_stat_err++;
2684 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2685 /* Mbox cmd cmpl error - RETRYing */
2686 lpfc_printf_log(phba, KERN_INFO,
2688 "(%d):0305 Mbox cmd cmpl "
2689 "error - RETRYing Data: x%x "
2690 "(x%x/x%x) x%x x%x x%x\n",
2691 pmb->vport ? pmb->vport->vpi : 0,
2693 lpfc_sli_config_mbox_subsys_get(phba,
2695 lpfc_sli_config_mbox_opcode_get(phba,
2698 pmbox->un.varWords[0],
2699 pmb->vport->port_state);
2700 pmbox->mbxStatus = 0;
2701 pmbox->mbxOwner = OWN_HOST;
2702 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2703 if (rc != MBX_NOT_FINISHED)
2708 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2709 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2710 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2711 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2713 pmb->vport ? pmb->vport->vpi : 0,
2715 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2716 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2718 *((uint32_t *) pmbox),
2719 pmbox->un.varWords[0],
2720 pmbox->un.varWords[1],
2721 pmbox->un.varWords[2],
2722 pmbox->un.varWords[3],
2723 pmbox->un.varWords[4],
2724 pmbox->un.varWords[5],
2725 pmbox->un.varWords[6],
2726 pmbox->un.varWords[7],
2727 pmbox->un.varWords[8],
2728 pmbox->un.varWords[9],
2729 pmbox->un.varWords[10]);
2732 pmb->mbox_cmpl(phba,pmb);
2738 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2739 * @phba: Pointer to HBA context object.
2740 * @pring: Pointer to driver SLI ring object.
2743 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2744 * is set in the tag the buffer is posted for a particular exchange,
2745 * the function will return the buffer without replacing the buffer.
2746 * If the buffer is for unsolicited ELS or CT traffic, this function
2747 * returns the buffer and also posts another buffer to the firmware.
2749 static struct lpfc_dmabuf *
2750 lpfc_sli_get_buff(struct lpfc_hba *phba,
2751 struct lpfc_sli_ring *pring,
2754 struct hbq_dmabuf *hbq_entry;
2756 if (tag & QUE_BUFTAG_BIT)
2757 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2758 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2761 return &hbq_entry->dbuf;
2765 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2766 * @phba: Pointer to HBA context object.
2767 * @pring: Pointer to driver SLI ring object.
2768 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2769 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2770 * @fch_type: the type for the first frame of the sequence.
2772 * This function is called with no lock held. This function uses the r_ctl and
2773 * type of the received sequence to find the correct callback function to call
2774 * to process the sequence.
2777 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2778 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2785 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
2791 /* unSolicited Responses */
2792 if (pring->prt[0].profile) {
2793 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2794 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2798 /* We must search, based on rctl / type
2799 for the right routine */
2800 for (i = 0; i < pring->num_mask; i++) {
2801 if ((pring->prt[i].rctl == fch_r_ctl) &&
2802 (pring->prt[i].type == fch_type)) {
2803 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2804 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2805 (phba, pring, saveq);
2813 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2814 * @phba: Pointer to HBA context object.
2815 * @pring: Pointer to driver SLI ring object.
2816 * @saveq: Pointer to the unsolicited iocb.
2818 * This function is called with no lock held by the ring event handler
2819 * when there is an unsolicited iocb posted to the response ring by the
2820 * firmware. This function gets the buffer associated with the iocbs
2821 * and calls the event handler for the ring. This function handles both
2822 * qring buffers and hbq buffers.
2823 * When the function returns 1 the caller can free the iocb object otherwise
2824 * upper layer functions will free the iocb objects.
2827 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2828 struct lpfc_iocbq *saveq)
2832 uint32_t Rctl, Type;
2833 struct lpfc_iocbq *iocbq;
2834 struct lpfc_dmabuf *dmzbuf;
2836 irsp = &(saveq->iocb);
2838 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2839 if (pring->lpfc_sli_rcv_async_status)
2840 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2842 lpfc_printf_log(phba,
2845 "0316 Ring %d handler: unexpected "
2846 "ASYNC_STATUS iocb received evt_code "
2849 irsp->un.asyncstat.evt_code);
2853 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2854 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2855 if (irsp->ulpBdeCount > 0) {
2856 dmzbuf = lpfc_sli_get_buff(phba, pring,
2857 irsp->un.ulpWord[3]);
2858 lpfc_in_buf_free(phba, dmzbuf);
2861 if (irsp->ulpBdeCount > 1) {
2862 dmzbuf = lpfc_sli_get_buff(phba, pring,
2863 irsp->unsli3.sli3Words[3]);
2864 lpfc_in_buf_free(phba, dmzbuf);
2867 if (irsp->ulpBdeCount > 2) {
2868 dmzbuf = lpfc_sli_get_buff(phba, pring,
2869 irsp->unsli3.sli3Words[7]);
2870 lpfc_in_buf_free(phba, dmzbuf);
2876 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2877 if (irsp->ulpBdeCount != 0) {
2878 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2879 irsp->un.ulpWord[3]);
2880 if (!saveq->context2)
2881 lpfc_printf_log(phba,
2884 "0341 Ring %d Cannot find buffer for "
2885 "an unsolicited iocb. tag 0x%x\n",
2887 irsp->un.ulpWord[3]);
2889 if (irsp->ulpBdeCount == 2) {
2890 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2891 irsp->unsli3.sli3Words[7]);
2892 if (!saveq->context3)
2893 lpfc_printf_log(phba,
2896 "0342 Ring %d Cannot find buffer for an"
2897 " unsolicited iocb. tag 0x%x\n",
2899 irsp->unsli3.sli3Words[7]);
2901 list_for_each_entry(iocbq, &saveq->list, list) {
2902 irsp = &(iocbq->iocb);
2903 if (irsp->ulpBdeCount != 0) {
2904 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2905 irsp->un.ulpWord[3]);
2906 if (!iocbq->context2)
2907 lpfc_printf_log(phba,
2910 "0343 Ring %d Cannot find "
2911 "buffer for an unsolicited iocb"
2912 ". tag 0x%x\n", pring->ringno,
2913 irsp->un.ulpWord[3]);
2915 if (irsp->ulpBdeCount == 2) {
2916 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2917 irsp->unsli3.sli3Words[7]);
2918 if (!iocbq->context3)
2919 lpfc_printf_log(phba,
2922 "0344 Ring %d Cannot find "
2923 "buffer for an unsolicited "
2926 irsp->unsli3.sli3Words[7]);
2930 if (irsp->ulpBdeCount != 0 &&
2931 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2932 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2935 /* search continue save q for same XRI */
2936 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2937 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2938 saveq->iocb.unsli3.rcvsli3.ox_id) {
2939 list_add_tail(&saveq->list, &iocbq->list);
2945 list_add_tail(&saveq->clist,
2946 &pring->iocb_continue_saveq);
2947 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2948 list_del_init(&iocbq->clist);
2950 irsp = &(saveq->iocb);
2954 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2955 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2956 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2957 Rctl = FC_RCTL_ELS_REQ;
2960 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2961 Rctl = w5p->hcsw.Rctl;
2962 Type = w5p->hcsw.Type;
2964 /* Firmware Workaround */
2965 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2966 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2967 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2968 Rctl = FC_RCTL_ELS_REQ;
2970 w5p->hcsw.Rctl = Rctl;
2971 w5p->hcsw.Type = Type;
2975 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2976 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2977 "0313 Ring %d handler: unexpected Rctl x%x "
2978 "Type x%x received\n",
2979 pring->ringno, Rctl, Type);
2985 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2986 * @phba: Pointer to HBA context object.
2987 * @pring: Pointer to driver SLI ring object.
2988 * @prspiocb: Pointer to response iocb object.
2990 * This function looks up the iocb_lookup table to get the command iocb
2991 * corresponding to the given response iocb using the iotag of the
2992 * response iocb. The driver calls this function with the hbalock held
2993 * for SLI3 ports or the ring lock held for SLI4 ports.
2994 * This function returns the command iocb object if it finds the command
2995 * iocb else returns NULL.
2997 static struct lpfc_iocbq *
2998 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2999 struct lpfc_sli_ring *pring,
3000 struct lpfc_iocbq *prspiocb)
3002 struct lpfc_iocbq *cmd_iocb = NULL;
3004 spinlock_t *temp_lock = NULL;
3005 unsigned long iflag = 0;
3007 if (phba->sli_rev == LPFC_SLI_REV4)
3008 temp_lock = &pring->ring_lock;
3010 temp_lock = &phba->hbalock;
3012 spin_lock_irqsave(temp_lock, iflag);
3013 iotag = prspiocb->iocb.ulpIoTag;
3015 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3016 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3017 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3018 /* remove from txcmpl queue list */
3019 list_del_init(&cmd_iocb->list);
3020 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3021 pring->txcmplq_cnt--;
3022 spin_unlock_irqrestore(temp_lock, iflag);
3027 spin_unlock_irqrestore(temp_lock, iflag);
3028 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3029 "0317 iotag x%x is out of "
3030 "range: max iotag x%x wd0 x%x\n",
3031 iotag, phba->sli.last_iotag,
3032 *(((uint32_t *) &prspiocb->iocb) + 7));
3037 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3038 * @phba: Pointer to HBA context object.
3039 * @pring: Pointer to driver SLI ring object.
3042 * This function looks up the iocb_lookup table to get the command iocb
3043 * corresponding to the given iotag. The driver calls this function with
3044 * the ring lock held because this function is an SLI4 port only helper.
3045 * This function returns the command iocb object if it finds the command
3046 * iocb else returns NULL.
3048 static struct lpfc_iocbq *
3049 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3050 struct lpfc_sli_ring *pring, uint16_t iotag)
3052 struct lpfc_iocbq *cmd_iocb = NULL;
3053 spinlock_t *temp_lock = NULL;
3054 unsigned long iflag = 0;
3056 if (phba->sli_rev == LPFC_SLI_REV4)
3057 temp_lock = &pring->ring_lock;
3059 temp_lock = &phba->hbalock;
3061 spin_lock_irqsave(temp_lock, iflag);
3062 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3063 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3064 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3065 /* remove from txcmpl queue list */
3066 list_del_init(&cmd_iocb->list);
3067 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3068 pring->txcmplq_cnt--;
3069 spin_unlock_irqrestore(temp_lock, iflag);
3074 spin_unlock_irqrestore(temp_lock, iflag);
3075 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3076 "0372 iotag x%x lookup error: max iotag (x%x) "
3078 iotag, phba->sli.last_iotag,
3079 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3084 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3085 * @phba: Pointer to HBA context object.
3086 * @pring: Pointer to driver SLI ring object.
3087 * @saveq: Pointer to the response iocb to be processed.
3089 * This function is called by the ring event handler for non-fcp
3090 * rings when there is a new response iocb in the response ring.
3091 * The caller is not required to hold any locks. This function
3092 * gets the command iocb associated with the response iocb and
3093 * calls the completion handler for the command iocb. If there
3094 * is no completion handler, the function will free the resources
3095 * associated with command iocb. If the response iocb is for
3096 * an already aborted command iocb, the status of the completion
3097 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3098 * This function always returns 1.
3101 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3102 struct lpfc_iocbq *saveq)
3104 struct lpfc_iocbq *cmdiocbp;
3106 unsigned long iflag;
3108 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3110 if (cmdiocbp->iocb_cmpl) {
3112 * If an ELS command failed send an event to mgmt
3115 if (saveq->iocb.ulpStatus &&
3116 (pring->ringno == LPFC_ELS_RING) &&
3117 (cmdiocbp->iocb.ulpCommand ==
3118 CMD_ELS_REQUEST64_CR))
3119 lpfc_send_els_failure_event(phba,
3123 * Post all ELS completions to the worker thread.
3124 * All other are passed to the completion callback.
3126 if (pring->ringno == LPFC_ELS_RING) {
3127 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3128 (cmdiocbp->iocb_flag &
3129 LPFC_DRIVER_ABORTED)) {
3130 spin_lock_irqsave(&phba->hbalock,
3132 cmdiocbp->iocb_flag &=
3133 ~LPFC_DRIVER_ABORTED;
3134 spin_unlock_irqrestore(&phba->hbalock,
3136 saveq->iocb.ulpStatus =
3137 IOSTAT_LOCAL_REJECT;
3138 saveq->iocb.un.ulpWord[4] =
3141 /* Firmware could still be in progress
3142 * of DMAing payload, so don't free data
3143 * buffer till after a hbeat.
3145 spin_lock_irqsave(&phba->hbalock,
3147 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3148 spin_unlock_irqrestore(&phba->hbalock,
3151 if (phba->sli_rev == LPFC_SLI_REV4) {
3152 if (saveq->iocb_flag &
3153 LPFC_EXCHANGE_BUSY) {
3154 /* Set cmdiocb flag for the
3155 * exchange busy so sgl (xri)
3156 * will not be released until
3157 * the abort xri is received
3161 &phba->hbalock, iflag);
3162 cmdiocbp->iocb_flag |=
3164 spin_unlock_irqrestore(
3165 &phba->hbalock, iflag);
3167 if (cmdiocbp->iocb_flag &
3168 LPFC_DRIVER_ABORTED) {
3170 * Clear LPFC_DRIVER_ABORTED
3171 * bit in case it was driver
3175 &phba->hbalock, iflag);
3176 cmdiocbp->iocb_flag &=
3177 ~LPFC_DRIVER_ABORTED;
3178 spin_unlock_irqrestore(
3179 &phba->hbalock, iflag);
3180 cmdiocbp->iocb.ulpStatus =
3181 IOSTAT_LOCAL_REJECT;
3182 cmdiocbp->iocb.un.ulpWord[4] =
3183 IOERR_ABORT_REQUESTED;
3185 * For SLI4, irsiocb contains
3186 * NO_XRI in sli_xritag, it
3187 * shall not affect releasing
3188 * sgl (xri) process.
3190 saveq->iocb.ulpStatus =
3191 IOSTAT_LOCAL_REJECT;
3192 saveq->iocb.un.ulpWord[4] =
3195 &phba->hbalock, iflag);
3197 LPFC_DELAY_MEM_FREE;
3198 spin_unlock_irqrestore(
3199 &phba->hbalock, iflag);
3203 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3205 lpfc_sli_release_iocbq(phba, cmdiocbp);
3208 * Unknown initiating command based on the response iotag.
3209 * This could be the case on the ELS ring because of
3212 if (pring->ringno != LPFC_ELS_RING) {
3214 * Ring <ringno> handler: unexpected completion IoTag
3217 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3218 "0322 Ring %d handler: "
3219 "unexpected completion IoTag x%x "
3220 "Data: x%x x%x x%x x%x\n",
3222 saveq->iocb.ulpIoTag,
3223 saveq->iocb.ulpStatus,
3224 saveq->iocb.un.ulpWord[4],
3225 saveq->iocb.ulpCommand,
3226 saveq->iocb.ulpContext);
3234 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3235 * @phba: Pointer to HBA context object.
3236 * @pring: Pointer to driver SLI ring object.
3238 * This function is called from the iocb ring event handlers when
3239 * put pointer is ahead of the get pointer for a ring. This function signal
3240 * an error attention condition to the worker thread and the worker
3241 * thread will transition the HBA to offline state.
3244 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3246 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3248 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3249 * rsp ring <portRspMax>
3251 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3252 "0312 Ring %d handler: portRspPut %d "
3253 "is bigger than rsp ring %d\n",
3254 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3255 pring->sli.sli3.numRiocb);
3257 phba->link_state = LPFC_HBA_ERROR;
3260 * All error attention handlers are posted to
3263 phba->work_ha |= HA_ERATT;
3264 phba->work_hs = HS_FFER3;
3266 lpfc_worker_wake_up(phba);
3272 * lpfc_poll_eratt - Error attention polling timer timeout handler
3273 * @ptr: Pointer to address of HBA context object.
3275 * This function is invoked by the Error Attention polling timer when the
3276 * timer times out. It will check the SLI Error Attention register for
3277 * possible attention events. If so, it will post an Error Attention event
3278 * and wake up worker thread to process it. Otherwise, it will set up the
3279 * Error Attention polling timer for the next poll.
3281 void lpfc_poll_eratt(struct timer_list *t)
3283 struct lpfc_hba *phba;
3285 uint64_t sli_intr, cnt;
3287 phba = from_timer(phba, t, eratt_poll);
3289 /* Here we will also keep track of interrupts per sec of the hba */
3290 sli_intr = phba->sli.slistat.sli_intr;
3292 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3293 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3296 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3298 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3299 do_div(cnt, phba->eratt_poll_interval);
3300 phba->sli.slistat.sli_ips = cnt;
3302 phba->sli.slistat.sli_prev_intr = sli_intr;
3304 /* Check chip HA register for error event */
3305 eratt = lpfc_sli_check_eratt(phba);
3308 /* Tell the worker thread there is work to do */
3309 lpfc_worker_wake_up(phba);
3311 /* Restart the timer for next eratt poll */
3312 mod_timer(&phba->eratt_poll,
3314 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3320 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3321 * @phba: Pointer to HBA context object.
3322 * @pring: Pointer to driver SLI ring object.
3323 * @mask: Host attention register mask for this ring.
3325 * This function is called from the interrupt context when there is a ring
3326 * event for the fcp ring. The caller does not hold any lock.
3327 * The function processes each response iocb in the response ring until it
3328 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3329 * LE bit set. The function will call the completion handler of the command iocb
3330 * if the response iocb indicates a completion for a command iocb or it is
3331 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3332 * function if this is an unsolicited iocb.
3333 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3334 * to check it explicitly.
3337 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3338 struct lpfc_sli_ring *pring, uint32_t mask)
3340 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3341 IOCB_t *irsp = NULL;
3342 IOCB_t *entry = NULL;
3343 struct lpfc_iocbq *cmdiocbq = NULL;
3344 struct lpfc_iocbq rspiocbq;
3346 uint32_t portRspPut, portRspMax;
3348 lpfc_iocb_type type;
3349 unsigned long iflag;
3350 uint32_t rsp_cmpl = 0;
3352 spin_lock_irqsave(&phba->hbalock, iflag);
3353 pring->stats.iocb_event++;
3356 * The next available response entry should never exceed the maximum
3357 * entries. If it does, treat it as an adapter hardware error.
3359 portRspMax = pring->sli.sli3.numRiocb;
3360 portRspPut = le32_to_cpu(pgp->rspPutInx);
3361 if (unlikely(portRspPut >= portRspMax)) {
3362 lpfc_sli_rsp_pointers_error(phba, pring);
3363 spin_unlock_irqrestore(&phba->hbalock, iflag);
3366 if (phba->fcp_ring_in_use) {
3367 spin_unlock_irqrestore(&phba->hbalock, iflag);
3370 phba->fcp_ring_in_use = 1;
3373 while (pring->sli.sli3.rspidx != portRspPut) {
3375 * Fetch an entry off the ring and copy it into a local data
3376 * structure. The copy involves a byte-swap since the
3377 * network byte order and pci byte orders are different.
3379 entry = lpfc_resp_iocb(phba, pring);
3380 phba->last_completion_time = jiffies;
3382 if (++pring->sli.sli3.rspidx >= portRspMax)
3383 pring->sli.sli3.rspidx = 0;
3385 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3386 (uint32_t *) &rspiocbq.iocb,
3387 phba->iocb_rsp_size);
3388 INIT_LIST_HEAD(&(rspiocbq.list));
3389 irsp = &rspiocbq.iocb;
3391 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3392 pring->stats.iocb_rsp++;
3395 if (unlikely(irsp->ulpStatus)) {
3397 * If resource errors reported from HBA, reduce
3398 * queuedepths of the SCSI device.
3400 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3401 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3402 IOERR_NO_RESOURCES)) {
3403 spin_unlock_irqrestore(&phba->hbalock, iflag);
3404 phba->lpfc_rampdown_queue_depth(phba);
3405 spin_lock_irqsave(&phba->hbalock, iflag);
3408 /* Rsp ring <ringno> error: IOCB */
3409 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3410 "0336 Rsp Ring %d error: IOCB Data: "
3411 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3413 irsp->un.ulpWord[0],
3414 irsp->un.ulpWord[1],
3415 irsp->un.ulpWord[2],
3416 irsp->un.ulpWord[3],
3417 irsp->un.ulpWord[4],
3418 irsp->un.ulpWord[5],
3419 *(uint32_t *)&irsp->un1,
3420 *((uint32_t *)&irsp->un1 + 1));
3424 case LPFC_ABORT_IOCB:
3427 * Idle exchange closed via ABTS from port. No iocb
3428 * resources need to be recovered.
3430 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3431 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3432 "0333 IOCB cmd 0x%x"
3433 " processed. Skipping"
3439 spin_unlock_irqrestore(&phba->hbalock, iflag);
3440 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3442 spin_lock_irqsave(&phba->hbalock, iflag);
3443 if (unlikely(!cmdiocbq))
3445 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3446 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3447 if (cmdiocbq->iocb_cmpl) {
3448 spin_unlock_irqrestore(&phba->hbalock, iflag);
3449 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3451 spin_lock_irqsave(&phba->hbalock, iflag);
3454 case LPFC_UNSOL_IOCB:
3455 spin_unlock_irqrestore(&phba->hbalock, iflag);
3456 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3457 spin_lock_irqsave(&phba->hbalock, iflag);
3460 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3461 char adaptermsg[LPFC_MAX_ADPTMSG];
3462 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3463 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3465 dev_warn(&((phba->pcidev)->dev),
3467 phba->brd_no, adaptermsg);
3469 /* Unknown IOCB command */
3470 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3471 "0334 Unknown IOCB command "
3472 "Data: x%x, x%x x%x x%x x%x\n",
3473 type, irsp->ulpCommand,
3482 * The response IOCB has been processed. Update the ring
3483 * pointer in SLIM. If the port response put pointer has not
3484 * been updated, sync the pgp->rspPutInx and fetch the new port
3485 * response put pointer.
3487 writel(pring->sli.sli3.rspidx,
3488 &phba->host_gp[pring->ringno].rspGetInx);
3490 if (pring->sli.sli3.rspidx == portRspPut)
3491 portRspPut = le32_to_cpu(pgp->rspPutInx);
3494 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3495 pring->stats.iocb_rsp_full++;
3496 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3497 writel(status, phba->CAregaddr);
3498 readl(phba->CAregaddr);
3500 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3501 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3502 pring->stats.iocb_cmd_empty++;
3504 /* Force update of the local copy of cmdGetInx */
3505 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3506 lpfc_sli_resume_iocb(phba, pring);
3508 if ((pring->lpfc_sli_cmd_available))
3509 (pring->lpfc_sli_cmd_available) (phba, pring);
3513 phba->fcp_ring_in_use = 0;
3514 spin_unlock_irqrestore(&phba->hbalock, iflag);
3519 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3520 * @phba: Pointer to HBA context object.
3521 * @pring: Pointer to driver SLI ring object.
3522 * @rspiocbp: Pointer to driver response IOCB object.
3524 * This function is called from the worker thread when there is a slow-path
3525 * response IOCB to process. This function chains all the response iocbs until
3526 * seeing the iocb with the LE bit set. The function will call
3527 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3528 * completion of a command iocb. The function will call the
3529 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3530 * The function frees the resources or calls the completion handler if this
3531 * iocb is an abort completion. The function returns NULL when the response
3532 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3533 * this function shall chain the iocb on to the iocb_continueq and return the
3534 * response iocb passed in.
3536 static struct lpfc_iocbq *
3537 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3538 struct lpfc_iocbq *rspiocbp)
3540 struct lpfc_iocbq *saveq;
3541 struct lpfc_iocbq *cmdiocbp;
3542 struct lpfc_iocbq *next_iocb;
3543 IOCB_t *irsp = NULL;
3544 uint32_t free_saveq;
3545 uint8_t iocb_cmd_type;
3546 lpfc_iocb_type type;
3547 unsigned long iflag;
3550 spin_lock_irqsave(&phba->hbalock, iflag);
3551 /* First add the response iocb to the countinueq list */
3552 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3553 pring->iocb_continueq_cnt++;
3555 /* Now, determine whether the list is completed for processing */
3556 irsp = &rspiocbp->iocb;
3559 * By default, the driver expects to free all resources
3560 * associated with this iocb completion.
3563 saveq = list_get_first(&pring->iocb_continueq,
3564 struct lpfc_iocbq, list);
3565 irsp = &(saveq->iocb);
3566 list_del_init(&pring->iocb_continueq);
3567 pring->iocb_continueq_cnt = 0;
3569 pring->stats.iocb_rsp++;
3572 * If resource errors reported from HBA, reduce
3573 * queuedepths of the SCSI device.
3575 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3576 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3577 IOERR_NO_RESOURCES)) {
3578 spin_unlock_irqrestore(&phba->hbalock, iflag);
3579 phba->lpfc_rampdown_queue_depth(phba);
3580 spin_lock_irqsave(&phba->hbalock, iflag);
3583 if (irsp->ulpStatus) {
3584 /* Rsp ring <ringno> error: IOCB */
3585 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3586 "0328 Rsp Ring %d error: "
3591 "x%x x%x x%x x%x\n",
3593 irsp->un.ulpWord[0],
3594 irsp->un.ulpWord[1],
3595 irsp->un.ulpWord[2],
3596 irsp->un.ulpWord[3],
3597 irsp->un.ulpWord[4],
3598 irsp->un.ulpWord[5],
3599 *(((uint32_t *) irsp) + 6),
3600 *(((uint32_t *) irsp) + 7),
3601 *(((uint32_t *) irsp) + 8),
3602 *(((uint32_t *) irsp) + 9),
3603 *(((uint32_t *) irsp) + 10),
3604 *(((uint32_t *) irsp) + 11),
3605 *(((uint32_t *) irsp) + 12),
3606 *(((uint32_t *) irsp) + 13),
3607 *(((uint32_t *) irsp) + 14),
3608 *(((uint32_t *) irsp) + 15));
3612 * Fetch the IOCB command type and call the correct completion
3613 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3614 * get freed back to the lpfc_iocb_list by the discovery
3617 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3618 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3621 spin_unlock_irqrestore(&phba->hbalock, iflag);
3622 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3623 spin_lock_irqsave(&phba->hbalock, iflag);
3626 case LPFC_UNSOL_IOCB:
3627 spin_unlock_irqrestore(&phba->hbalock, iflag);
3628 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3629 spin_lock_irqsave(&phba->hbalock, iflag);
3634 case LPFC_ABORT_IOCB:
3636 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3637 spin_unlock_irqrestore(&phba->hbalock, iflag);
3638 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3640 spin_lock_irqsave(&phba->hbalock, iflag);
3643 /* Call the specified completion routine */
3644 if (cmdiocbp->iocb_cmpl) {
3645 spin_unlock_irqrestore(&phba->hbalock,
3647 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3649 spin_lock_irqsave(&phba->hbalock,
3652 __lpfc_sli_release_iocbq(phba,
3657 case LPFC_UNKNOWN_IOCB:
3658 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3659 char adaptermsg[LPFC_MAX_ADPTMSG];
3660 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3661 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3663 dev_warn(&((phba->pcidev)->dev),
3665 phba->brd_no, adaptermsg);
3667 /* Unknown IOCB command */
3668 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3669 "0335 Unknown IOCB "
3670 "command Data: x%x "
3681 list_for_each_entry_safe(rspiocbp, next_iocb,
3682 &saveq->list, list) {
3683 list_del_init(&rspiocbp->list);
3684 __lpfc_sli_release_iocbq(phba, rspiocbp);
3686 __lpfc_sli_release_iocbq(phba, saveq);
3690 spin_unlock_irqrestore(&phba->hbalock, iflag);
3695 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3696 * @phba: Pointer to HBA context object.
3697 * @pring: Pointer to driver SLI ring object.
3698 * @mask: Host attention register mask for this ring.
3700 * This routine wraps the actual slow_ring event process routine from the
3701 * API jump table function pointer from the lpfc_hba struct.
3704 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3705 struct lpfc_sli_ring *pring, uint32_t mask)
3707 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3711 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3712 * @phba: Pointer to HBA context object.
3713 * @pring: Pointer to driver SLI ring object.
3714 * @mask: Host attention register mask for this ring.
3716 * This function is called from the worker thread when there is a ring event
3717 * for non-fcp rings. The caller does not hold any lock. The function will
3718 * remove each response iocb in the response ring and calls the handle
3719 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3722 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3723 struct lpfc_sli_ring *pring, uint32_t mask)
3725 struct lpfc_pgp *pgp;
3727 IOCB_t *irsp = NULL;
3728 struct lpfc_iocbq *rspiocbp = NULL;
3729 uint32_t portRspPut, portRspMax;
3730 unsigned long iflag;
3733 pgp = &phba->port_gp[pring->ringno];
3734 spin_lock_irqsave(&phba->hbalock, iflag);
3735 pring->stats.iocb_event++;
3738 * The next available response entry should never exceed the maximum
3739 * entries. If it does, treat it as an adapter hardware error.
3741 portRspMax = pring->sli.sli3.numRiocb;
3742 portRspPut = le32_to_cpu(pgp->rspPutInx);
3743 if (portRspPut >= portRspMax) {
3745 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3746 * rsp ring <portRspMax>
3748 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3749 "0303 Ring %d handler: portRspPut %d "
3750 "is bigger than rsp ring %d\n",
3751 pring->ringno, portRspPut, portRspMax);
3753 phba->link_state = LPFC_HBA_ERROR;
3754 spin_unlock_irqrestore(&phba->hbalock, iflag);
3756 phba->work_hs = HS_FFER3;
3757 lpfc_handle_eratt(phba);
3763 while (pring->sli.sli3.rspidx != portRspPut) {
3765 * Build a completion list and call the appropriate handler.
3766 * The process is to get the next available response iocb, get
3767 * a free iocb from the list, copy the response data into the
3768 * free iocb, insert to the continuation list, and update the
3769 * next response index to slim. This process makes response
3770 * iocb's in the ring available to DMA as fast as possible but
3771 * pays a penalty for a copy operation. Since the iocb is
3772 * only 32 bytes, this penalty is considered small relative to
3773 * the PCI reads for register values and a slim write. When
3774 * the ulpLe field is set, the entire Command has been
3777 entry = lpfc_resp_iocb(phba, pring);
3779 phba->last_completion_time = jiffies;
3780 rspiocbp = __lpfc_sli_get_iocbq(phba);
3781 if (rspiocbp == NULL) {
3782 printk(KERN_ERR "%s: out of buffers! Failing "
3783 "completion.\n", __func__);
3787 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3788 phba->iocb_rsp_size);
3789 irsp = &rspiocbp->iocb;
3791 if (++pring->sli.sli3.rspidx >= portRspMax)
3792 pring->sli.sli3.rspidx = 0;
3794 if (pring->ringno == LPFC_ELS_RING) {
3795 lpfc_debugfs_slow_ring_trc(phba,
3796 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3797 *(((uint32_t *) irsp) + 4),
3798 *(((uint32_t *) irsp) + 6),
3799 *(((uint32_t *) irsp) + 7));
3802 writel(pring->sli.sli3.rspidx,
3803 &phba->host_gp[pring->ringno].rspGetInx);
3805 spin_unlock_irqrestore(&phba->hbalock, iflag);
3806 /* Handle the response IOCB */
3807 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3808 spin_lock_irqsave(&phba->hbalock, iflag);
3811 * If the port response put pointer has not been updated, sync
3812 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3813 * response put pointer.
3815 if (pring->sli.sli3.rspidx == portRspPut) {
3816 portRspPut = le32_to_cpu(pgp->rspPutInx);
3818 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3820 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3821 /* At least one response entry has been freed */
3822 pring->stats.iocb_rsp_full++;
3823 /* SET RxRE_RSP in Chip Att register */
3824 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3825 writel(status, phba->CAregaddr);
3826 readl(phba->CAregaddr); /* flush */
3828 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3829 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3830 pring->stats.iocb_cmd_empty++;
3832 /* Force update of the local copy of cmdGetInx */
3833 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3834 lpfc_sli_resume_iocb(phba, pring);
3836 if ((pring->lpfc_sli_cmd_available))
3837 (pring->lpfc_sli_cmd_available) (phba, pring);
3841 spin_unlock_irqrestore(&phba->hbalock, iflag);
3846 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3847 * @phba: Pointer to HBA context object.
3848 * @pring: Pointer to driver SLI ring object.
3849 * @mask: Host attention register mask for this ring.
3851 * This function is called from the worker thread when there is a pending
3852 * ELS response iocb on the driver internal slow-path response iocb worker
3853 * queue. The caller does not hold any lock. The function will remove each
3854 * response iocb from the response worker queue and calls the handle
3855 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3858 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3859 struct lpfc_sli_ring *pring, uint32_t mask)
3861 struct lpfc_iocbq *irspiocbq;
3862 struct hbq_dmabuf *dmabuf;
3863 struct lpfc_cq_event *cq_event;
3864 unsigned long iflag;
3867 spin_lock_irqsave(&phba->hbalock, iflag);
3868 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3869 spin_unlock_irqrestore(&phba->hbalock, iflag);
3870 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3871 /* Get the response iocb from the head of work queue */
3872 spin_lock_irqsave(&phba->hbalock, iflag);
3873 list_remove_head(&phba->sli4_hba.sp_queue_event,
3874 cq_event, struct lpfc_cq_event, list);
3875 spin_unlock_irqrestore(&phba->hbalock, iflag);
3877 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3878 case CQE_CODE_COMPL_WQE:
3879 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3881 /* Translate ELS WCQE to response IOCBQ */
3882 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3885 lpfc_sli_sp_handle_rspiocb(phba, pring,
3889 case CQE_CODE_RECEIVE:
3890 case CQE_CODE_RECEIVE_V1:
3891 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3893 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3900 /* Limit the number of events to 64 to avoid soft lockups */
3907 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3908 * @phba: Pointer to HBA context object.
3909 * @pring: Pointer to driver SLI ring object.
3911 * This function aborts all iocbs in the given ring and frees all the iocb
3912 * objects in txq. This function issues an abort iocb for all the iocb commands
3913 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3914 * the return of this function. The caller is not required to hold any locks.
3917 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3919 LIST_HEAD(completions);
3920 struct lpfc_iocbq *iocb, *next_iocb;
3922 if (pring->ringno == LPFC_ELS_RING) {
3923 lpfc_fabric_abort_hba(phba);
3926 /* Error everything on txq and txcmplq
3929 if (phba->sli_rev >= LPFC_SLI_REV4) {
3930 spin_lock_irq(&pring->ring_lock);
3931 list_splice_init(&pring->txq, &completions);
3933 spin_unlock_irq(&pring->ring_lock);
3935 spin_lock_irq(&phba->hbalock);
3936 /* Next issue ABTS for everything on the txcmplq */
3937 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3938 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3939 spin_unlock_irq(&phba->hbalock);
3941 spin_lock_irq(&phba->hbalock);
3942 list_splice_init(&pring->txq, &completions);
3945 /* Next issue ABTS for everything on the txcmplq */
3946 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3947 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3948 spin_unlock_irq(&phba->hbalock);
3951 /* Cancel all the IOCBs from the completions list */
3952 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3957 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3958 * @phba: Pointer to HBA context object.
3959 * @pring: Pointer to driver SLI ring object.
3961 * This function aborts all iocbs in FCP rings and frees all the iocb
3962 * objects in txq. This function issues an abort iocb for all the iocb commands
3963 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3964 * the return of this function. The caller is not required to hold any locks.
3967 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3969 struct lpfc_sli *psli = &phba->sli;
3970 struct lpfc_sli_ring *pring;
3973 /* Look on all the FCP Rings for the iotag */
3974 if (phba->sli_rev >= LPFC_SLI_REV4) {
3975 for (i = 0; i < phba->cfg_hdw_queue; i++) {
3976 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
3977 lpfc_sli_abort_iocb_ring(phba, pring);
3980 pring = &psli->sli3_ring[LPFC_FCP_RING];
3981 lpfc_sli_abort_iocb_ring(phba, pring);
3986 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3987 * @phba: Pointer to HBA context object.
3989 * This function flushes all iocbs in the fcp ring and frees all the iocb
3990 * objects in txq and txcmplq. This function will not issue abort iocbs
3991 * for all the iocb commands in txcmplq, they will just be returned with
3992 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3993 * slot has been permanently disabled.
3996 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
4000 struct lpfc_sli *psli = &phba->sli;
4001 struct lpfc_sli_ring *pring;
4003 struct lpfc_iocbq *piocb, *next_iocb;
4005 spin_lock_irq(&phba->hbalock);
4006 /* Indicate the I/O queues are flushed */
4007 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
4008 spin_unlock_irq(&phba->hbalock);
4010 /* Look on all the FCP Rings for the iotag */
4011 if (phba->sli_rev >= LPFC_SLI_REV4) {
4012 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4013 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
4015 spin_lock_irq(&pring->ring_lock);
4016 /* Retrieve everything on txq */
4017 list_splice_init(&pring->txq, &txq);
4018 list_for_each_entry_safe(piocb, next_iocb,
4019 &pring->txcmplq, list)
4020 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4021 /* Retrieve everything on the txcmplq */
4022 list_splice_init(&pring->txcmplq, &txcmplq);
4024 pring->txcmplq_cnt = 0;
4025 spin_unlock_irq(&pring->ring_lock);
4028 lpfc_sli_cancel_iocbs(phba, &txq,
4029 IOSTAT_LOCAL_REJECT,
4031 /* Flush the txcmpq */
4032 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4033 IOSTAT_LOCAL_REJECT,
4037 pring = &psli->sli3_ring[LPFC_FCP_RING];
4039 spin_lock_irq(&phba->hbalock);
4040 /* Retrieve everything on txq */
4041 list_splice_init(&pring->txq, &txq);
4042 list_for_each_entry_safe(piocb, next_iocb,
4043 &pring->txcmplq, list)
4044 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4045 /* Retrieve everything on the txcmplq */
4046 list_splice_init(&pring->txcmplq, &txcmplq);
4048 pring->txcmplq_cnt = 0;
4049 spin_unlock_irq(&phba->hbalock);
4052 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4054 /* Flush the txcmpq */
4055 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4061 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
4062 * @phba: Pointer to HBA context object.
4064 * This function flushes all wqes in the nvme rings and frees all resources
4065 * in the txcmplq. This function does not issue abort wqes for the IO
4066 * commands in txcmplq, they will just be returned with
4067 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4068 * slot has been permanently disabled.
4071 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
4074 struct lpfc_sli_ring *pring;
4076 struct lpfc_iocbq *piocb, *next_iocb;
4078 if ((phba->sli_rev < LPFC_SLI_REV4) ||
4079 !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
4082 /* Hint to other driver operations that a flush is in progress. */
4083 spin_lock_irq(&phba->hbalock);
4084 phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
4085 spin_unlock_irq(&phba->hbalock);
4087 /* Cycle through all NVME rings and complete each IO with
4088 * a local driver reason code. This is a flush so no
4089 * abort exchange to FW.
4091 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4092 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
4094 spin_lock_irq(&pring->ring_lock);
4095 list_for_each_entry_safe(piocb, next_iocb,
4096 &pring->txcmplq, list)
4097 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4098 /* Retrieve everything on the txcmplq */
4099 list_splice_init(&pring->txcmplq, &txcmplq);
4100 pring->txcmplq_cnt = 0;
4101 spin_unlock_irq(&pring->ring_lock);
4103 /* Flush the txcmpq &&&PAE */
4104 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4105 IOSTAT_LOCAL_REJECT,
4111 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4112 * @phba: Pointer to HBA context object.
4113 * @mask: Bit mask to be checked.
4115 * This function reads the host status register and compares
4116 * with the provided bit mask to check if HBA completed
4117 * the restart. This function will wait in a loop for the
4118 * HBA to complete restart. If the HBA does not restart within
4119 * 15 iterations, the function will reset the HBA again. The
4120 * function returns 1 when HBA fail to restart otherwise returns
4124 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4130 /* Read the HBA Host Status Register */
4131 if (lpfc_readl(phba->HSregaddr, &status))
4135 * Check status register every 100ms for 5 retries, then every
4136 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4137 * every 2.5 sec for 4.
4138 * Break our of the loop if errors occurred during init.
4140 while (((status & mask) != mask) &&
4141 !(status & HS_FFERM) &&
4153 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4154 lpfc_sli_brdrestart(phba);
4156 /* Read the HBA Host Status Register */
4157 if (lpfc_readl(phba->HSregaddr, &status)) {
4163 /* Check to see if any errors occurred during init */
4164 if ((status & HS_FFERM) || (i >= 20)) {
4165 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4166 "2751 Adapter failed to restart, "
4167 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4169 readl(phba->MBslimaddr + 0xa8),
4170 readl(phba->MBslimaddr + 0xac));
4171 phba->link_state = LPFC_HBA_ERROR;
4179 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4180 * @phba: Pointer to HBA context object.
4181 * @mask: Bit mask to be checked.
4183 * This function checks the host status register to check if HBA is
4184 * ready. This function will wait in a loop for the HBA to be ready
4185 * If the HBA is not ready , the function will will reset the HBA PCI
4186 * function again. The function returns 1 when HBA fail to be ready
4187 * otherwise returns zero.
4190 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4195 /* Read the HBA Host Status Register */
4196 status = lpfc_sli4_post_status_check(phba);
4199 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4200 lpfc_sli_brdrestart(phba);
4201 status = lpfc_sli4_post_status_check(phba);
4204 /* Check to see if any errors occurred during init */
4206 phba->link_state = LPFC_HBA_ERROR;
4209 phba->sli4_hba.intr_enable = 0;
4215 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4216 * @phba: Pointer to HBA context object.
4217 * @mask: Bit mask to be checked.
4219 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4220 * from the API jump table function pointer from the lpfc_hba struct.
4223 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4225 return phba->lpfc_sli_brdready(phba, mask);
4228 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4231 * lpfc_reset_barrier - Make HBA ready for HBA reset
4232 * @phba: Pointer to HBA context object.
4234 * This function is called before resetting an HBA. This function is called
4235 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4237 void lpfc_reset_barrier(struct lpfc_hba *phba)
4239 uint32_t __iomem *resp_buf;
4240 uint32_t __iomem *mbox_buf;
4241 volatile uint32_t mbox;
4242 uint32_t hc_copy, ha_copy, resp_data;
4246 lockdep_assert_held(&phba->hbalock);
4248 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4249 if (hdrtype != 0x80 ||
4250 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4251 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4255 * Tell the other part of the chip to suspend temporarily all
4258 resp_buf = phba->MBslimaddr;
4260 /* Disable the error attention */
4261 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4263 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4264 readl(phba->HCregaddr); /* flush */
4265 phba->link_flag |= LS_IGNORE_ERATT;
4267 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4269 if (ha_copy & HA_ERATT) {
4270 /* Clear Chip error bit */
4271 writel(HA_ERATT, phba->HAregaddr);
4272 phba->pport->stopped = 1;
4276 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4277 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4279 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4280 mbox_buf = phba->MBslimaddr;
4281 writel(mbox, mbox_buf);
4283 for (i = 0; i < 50; i++) {
4284 if (lpfc_readl((resp_buf + 1), &resp_data))
4286 if (resp_data != ~(BARRIER_TEST_PATTERN))
4292 if (lpfc_readl((resp_buf + 1), &resp_data))
4294 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4295 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4296 phba->pport->stopped)
4302 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4304 for (i = 0; i < 500; i++) {
4305 if (lpfc_readl(resp_buf, &resp_data))
4307 if (resp_data != mbox)
4316 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4318 if (!(ha_copy & HA_ERATT))
4324 if (readl(phba->HAregaddr) & HA_ERATT) {
4325 writel(HA_ERATT, phba->HAregaddr);
4326 phba->pport->stopped = 1;
4330 phba->link_flag &= ~LS_IGNORE_ERATT;
4331 writel(hc_copy, phba->HCregaddr);
4332 readl(phba->HCregaddr); /* flush */
4336 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4337 * @phba: Pointer to HBA context object.
4339 * This function issues a kill_board mailbox command and waits for
4340 * the error attention interrupt. This function is called for stopping
4341 * the firmware processing. The caller is not required to hold any
4342 * locks. This function calls lpfc_hba_down_post function to free
4343 * any pending commands after the kill. The function will return 1 when it
4344 * fails to kill the board else will return 0.
4347 lpfc_sli_brdkill(struct lpfc_hba *phba)
4349 struct lpfc_sli *psli;
4359 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4360 "0329 Kill HBA Data: x%x x%x\n",
4361 phba->pport->port_state, psli->sli_flag);
4363 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4367 /* Disable the error attention */
4368 spin_lock_irq(&phba->hbalock);
4369 if (lpfc_readl(phba->HCregaddr, &status)) {
4370 spin_unlock_irq(&phba->hbalock);
4371 mempool_free(pmb, phba->mbox_mem_pool);
4374 status &= ~HC_ERINT_ENA;
4375 writel(status, phba->HCregaddr);
4376 readl(phba->HCregaddr); /* flush */
4377 phba->link_flag |= LS_IGNORE_ERATT;
4378 spin_unlock_irq(&phba->hbalock);
4380 lpfc_kill_board(phba, pmb);
4381 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4382 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4384 if (retval != MBX_SUCCESS) {
4385 if (retval != MBX_BUSY)
4386 mempool_free(pmb, phba->mbox_mem_pool);
4387 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4388 "2752 KILL_BOARD command failed retval %d\n",
4390 spin_lock_irq(&phba->hbalock);
4391 phba->link_flag &= ~LS_IGNORE_ERATT;
4392 spin_unlock_irq(&phba->hbalock);
4396 spin_lock_irq(&phba->hbalock);
4397 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4398 spin_unlock_irq(&phba->hbalock);
4400 mempool_free(pmb, phba->mbox_mem_pool);
4402 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4403 * attention every 100ms for 3 seconds. If we don't get ERATT after
4404 * 3 seconds we still set HBA_ERROR state because the status of the
4405 * board is now undefined.
4407 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4409 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4411 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4415 del_timer_sync(&psli->mbox_tmo);
4416 if (ha_copy & HA_ERATT) {
4417 writel(HA_ERATT, phba->HAregaddr);
4418 phba->pport->stopped = 1;
4420 spin_lock_irq(&phba->hbalock);
4421 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4422 psli->mbox_active = NULL;
4423 phba->link_flag &= ~LS_IGNORE_ERATT;
4424 spin_unlock_irq(&phba->hbalock);
4426 lpfc_hba_down_post(phba);
4427 phba->link_state = LPFC_HBA_ERROR;
4429 return ha_copy & HA_ERATT ? 0 : 1;
4433 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4434 * @phba: Pointer to HBA context object.
4436 * This function resets the HBA by writing HC_INITFF to the control
4437 * register. After the HBA resets, this function resets all the iocb ring
4438 * indices. This function disables PCI layer parity checking during
4440 * This function returns 0 always.
4441 * The caller is not required to hold any locks.
4444 lpfc_sli_brdreset(struct lpfc_hba *phba)
4446 struct lpfc_sli *psli;
4447 struct lpfc_sli_ring *pring;
4454 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4455 "0325 Reset HBA Data: x%x x%x\n",
4456 (phba->pport) ? phba->pport->port_state : 0,
4459 /* perform board reset */
4460 phba->fc_eventTag = 0;
4461 phba->link_events = 0;
4463 phba->pport->fc_myDID = 0;
4464 phba->pport->fc_prevDID = 0;
4467 /* Turn off parity checking and serr during the physical reset */
4468 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4471 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4473 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4475 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4477 /* Now toggle INITFF bit in the Host Control Register */
4478 writel(HC_INITFF, phba->HCregaddr);
4480 readl(phba->HCregaddr); /* flush */
4481 writel(0, phba->HCregaddr);
4482 readl(phba->HCregaddr); /* flush */
4484 /* Restore PCI cmd register */
4485 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4487 /* Initialize relevant SLI info */
4488 for (i = 0; i < psli->num_rings; i++) {
4489 pring = &psli->sli3_ring[i];
4491 pring->sli.sli3.rspidx = 0;
4492 pring->sli.sli3.next_cmdidx = 0;
4493 pring->sli.sli3.local_getidx = 0;
4494 pring->sli.sli3.cmdidx = 0;
4495 pring->missbufcnt = 0;
4498 phba->link_state = LPFC_WARM_START;
4503 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4504 * @phba: Pointer to HBA context object.
4506 * This function resets a SLI4 HBA. This function disables PCI layer parity
4507 * checking during resets the device. The caller is not required to hold
4510 * This function returns 0 always.
4513 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4515 struct lpfc_sli *psli = &phba->sli;
4520 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4521 "0295 Reset HBA Data: x%x x%x x%x\n",
4522 phba->pport->port_state, psli->sli_flag,
4525 /* perform board reset */
4526 phba->fc_eventTag = 0;
4527 phba->link_events = 0;
4528 phba->pport->fc_myDID = 0;
4529 phba->pport->fc_prevDID = 0;
4531 spin_lock_irq(&phba->hbalock);
4532 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4533 phba->fcf.fcf_flag = 0;
4534 spin_unlock_irq(&phba->hbalock);
4536 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4537 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4538 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4542 /* Now physically reset the device */
4543 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4544 "0389 Performing PCI function reset!\n");
4546 /* Turn off parity checking and serr during the physical reset */
4547 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4548 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4549 "3205 PCI read Config failed\n");
4553 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4554 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4556 /* Perform FCoE PCI function reset before freeing queue memory */
4557 rc = lpfc_pci_function_reset(phba);
4559 /* Restore PCI cmd register */
4560 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4566 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4567 * @phba: Pointer to HBA context object.
4569 * This function is called in the SLI initialization code path to
4570 * restart the HBA. The caller is not required to hold any lock.
4571 * This function writes MBX_RESTART mailbox command to the SLIM and
4572 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4573 * function to free any pending commands. The function enables
4574 * POST only during the first initialization. The function returns zero.
4575 * The function does not guarantee completion of MBX_RESTART mailbox
4576 * command before the return of this function.
4579 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4582 struct lpfc_sli *psli;
4583 volatile uint32_t word0;
4584 void __iomem *to_slim;
4585 uint32_t hba_aer_enabled;
4587 spin_lock_irq(&phba->hbalock);
4589 /* Take PCIe device Advanced Error Reporting (AER) state */
4590 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4595 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4596 "0337 Restart HBA Data: x%x x%x\n",
4597 (phba->pport) ? phba->pport->port_state : 0,
4601 mb = (MAILBOX_t *) &word0;
4602 mb->mbxCommand = MBX_RESTART;
4605 lpfc_reset_barrier(phba);
4607 to_slim = phba->MBslimaddr;
4608 writel(*(uint32_t *) mb, to_slim);
4609 readl(to_slim); /* flush */
4611 /* Only skip post after fc_ffinit is completed */
4612 if (phba->pport && phba->pport->port_state)
4613 word0 = 1; /* This is really setting up word1 */
4615 word0 = 0; /* This is really setting up word1 */
4616 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4617 writel(*(uint32_t *) mb, to_slim);
4618 readl(to_slim); /* flush */
4620 lpfc_sli_brdreset(phba);
4622 phba->pport->stopped = 0;
4623 phba->link_state = LPFC_INIT_START;
4625 spin_unlock_irq(&phba->hbalock);
4627 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4628 psli->stats_start = ktime_get_seconds();
4630 /* Give the INITFF and Post time to settle. */
4633 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4634 if (hba_aer_enabled)
4635 pci_disable_pcie_error_reporting(phba->pcidev);
4637 lpfc_hba_down_post(phba);
4643 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4644 * @phba: Pointer to HBA context object.
4646 * This function is called in the SLI initialization code path to restart
4647 * a SLI4 HBA. The caller is not required to hold any lock.
4648 * At the end of the function, it calls lpfc_hba_down_post function to
4649 * free any pending commands.
4652 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4654 struct lpfc_sli *psli = &phba->sli;
4655 uint32_t hba_aer_enabled;
4659 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4660 "0296 Restart HBA Data: x%x x%x\n",
4661 phba->pport->port_state, psli->sli_flag);
4663 /* Take PCIe device Advanced Error Reporting (AER) state */
4664 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4666 rc = lpfc_sli4_brdreset(phba);
4670 spin_lock_irq(&phba->hbalock);
4671 phba->pport->stopped = 0;
4672 phba->link_state = LPFC_INIT_START;
4674 spin_unlock_irq(&phba->hbalock);
4676 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4677 psli->stats_start = ktime_get_seconds();
4679 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4680 if (hba_aer_enabled)
4681 pci_disable_pcie_error_reporting(phba->pcidev);
4683 lpfc_hba_down_post(phba);
4684 lpfc_sli4_queue_destroy(phba);
4690 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4691 * @phba: Pointer to HBA context object.
4693 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4694 * API jump table function pointer from the lpfc_hba struct.
4697 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4699 return phba->lpfc_sli_brdrestart(phba);
4703 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4704 * @phba: Pointer to HBA context object.
4706 * This function is called after a HBA restart to wait for successful
4707 * restart of the HBA. Successful restart of the HBA is indicated by
4708 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4709 * iteration, the function will restart the HBA again. The function returns
4710 * zero if HBA successfully restarted else returns negative error code.
4713 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4715 uint32_t status, i = 0;
4717 /* Read the HBA Host Status Register */
4718 if (lpfc_readl(phba->HSregaddr, &status))
4721 /* Check status register to see what current state is */
4723 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4725 /* Check every 10ms for 10 retries, then every 100ms for 90
4726 * retries, then every 1 sec for 50 retires for a total of
4727 * ~60 seconds before reset the board again and check every
4728 * 1 sec for 50 retries. The up to 60 seconds before the
4729 * board ready is required by the Falcon FIPS zeroization
4730 * complete, and any reset the board in between shall cause
4731 * restart of zeroization, further delay the board ready.
4734 /* Adapter failed to init, timeout, status reg
4736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4737 "0436 Adapter failed to init, "
4738 "timeout, status reg x%x, "
4739 "FW Data: A8 x%x AC x%x\n", status,
4740 readl(phba->MBslimaddr + 0xa8),
4741 readl(phba->MBslimaddr + 0xac));
4742 phba->link_state = LPFC_HBA_ERROR;
4746 /* Check to see if any errors occurred during init */
4747 if (status & HS_FFERM) {
4748 /* ERROR: During chipset initialization */
4749 /* Adapter failed to init, chipset, status reg
4751 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4752 "0437 Adapter failed to init, "
4753 "chipset, status reg x%x, "
4754 "FW Data: A8 x%x AC x%x\n", status,
4755 readl(phba->MBslimaddr + 0xa8),
4756 readl(phba->MBslimaddr + 0xac));
4757 phba->link_state = LPFC_HBA_ERROR;
4770 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4771 lpfc_sli_brdrestart(phba);
4773 /* Read the HBA Host Status Register */
4774 if (lpfc_readl(phba->HSregaddr, &status))
4778 /* Check to see if any errors occurred during init */
4779 if (status & HS_FFERM) {
4780 /* ERROR: During chipset initialization */
4781 /* Adapter failed to init, chipset, status reg <status> */
4782 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4783 "0438 Adapter failed to init, chipset, "
4785 "FW Data: A8 x%x AC x%x\n", status,
4786 readl(phba->MBslimaddr + 0xa8),
4787 readl(phba->MBslimaddr + 0xac));
4788 phba->link_state = LPFC_HBA_ERROR;
4792 /* Clear all interrupt enable conditions */
4793 writel(0, phba->HCregaddr);
4794 readl(phba->HCregaddr); /* flush */
4796 /* setup host attn register */
4797 writel(0xffffffff, phba->HAregaddr);
4798 readl(phba->HAregaddr); /* flush */
4803 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4805 * This function calculates and returns the number of HBQs required to be
4809 lpfc_sli_hbq_count(void)
4811 return ARRAY_SIZE(lpfc_hbq_defs);
4815 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4817 * This function adds the number of hbq entries in every HBQ to get
4818 * the total number of hbq entries required for the HBA and returns
4822 lpfc_sli_hbq_entry_count(void)
4824 int hbq_count = lpfc_sli_hbq_count();
4828 for (i = 0; i < hbq_count; ++i)
4829 count += lpfc_hbq_defs[i]->entry_count;
4834 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4836 * This function calculates amount of memory required for all hbq entries
4837 * to be configured and returns the total memory required.
4840 lpfc_sli_hbq_size(void)
4842 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4846 * lpfc_sli_hbq_setup - configure and initialize HBQs
4847 * @phba: Pointer to HBA context object.
4849 * This function is called during the SLI initialization to configure
4850 * all the HBQs and post buffers to the HBQ. The caller is not
4851 * required to hold any locks. This function will return zero if successful
4852 * else it will return negative error code.
4855 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4857 int hbq_count = lpfc_sli_hbq_count();
4861 uint32_t hbq_entry_index;
4863 /* Get a Mailbox buffer to setup mailbox
4864 * commands for HBA initialization
4866 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4873 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4874 phba->link_state = LPFC_INIT_MBX_CMDS;
4875 phba->hbq_in_use = 1;
4877 hbq_entry_index = 0;
4878 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4879 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4880 phba->hbqs[hbqno].hbqPutIdx = 0;
4881 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4882 phba->hbqs[hbqno].entry_count =
4883 lpfc_hbq_defs[hbqno]->entry_count;
4884 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4885 hbq_entry_index, pmb);
4886 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4888 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4889 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4890 mbxStatus <status>, ring <num> */
4892 lpfc_printf_log(phba, KERN_ERR,
4893 LOG_SLI | LOG_VPORT,
4894 "1805 Adapter failed to init. "
4895 "Data: x%x x%x x%x\n",
4897 pmbox->mbxStatus, hbqno);
4899 phba->link_state = LPFC_HBA_ERROR;
4900 mempool_free(pmb, phba->mbox_mem_pool);
4904 phba->hbq_count = hbq_count;
4906 mempool_free(pmb, phba->mbox_mem_pool);
4908 /* Initially populate or replenish the HBQs */
4909 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4910 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4915 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4916 * @phba: Pointer to HBA context object.
4918 * This function is called during the SLI initialization to configure
4919 * all the HBQs and post buffers to the HBQ. The caller is not
4920 * required to hold any locks. This function will return zero if successful
4921 * else it will return negative error code.
4924 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4926 phba->hbq_in_use = 1;
4927 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4928 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4929 phba->hbq_count = 1;
4930 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4931 /* Initially populate or replenish the HBQs */
4936 * lpfc_sli_config_port - Issue config port mailbox command
4937 * @phba: Pointer to HBA context object.
4938 * @sli_mode: sli mode - 2/3
4940 * This function is called by the sli initialization code path
4941 * to issue config_port mailbox command. This function restarts the
4942 * HBA firmware and issues a config_port mailbox command to configure
4943 * the SLI interface in the sli mode specified by sli_mode
4944 * variable. The caller is not required to hold any locks.
4945 * The function returns 0 if successful, else returns negative error
4949 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4952 uint32_t resetcount = 0, rc = 0, done = 0;
4954 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4956 phba->link_state = LPFC_HBA_ERROR;
4960 phba->sli_rev = sli_mode;
4961 while (resetcount < 2 && !done) {
4962 spin_lock_irq(&phba->hbalock);
4963 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4964 spin_unlock_irq(&phba->hbalock);
4965 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4966 lpfc_sli_brdrestart(phba);
4967 rc = lpfc_sli_chipset_init(phba);
4971 spin_lock_irq(&phba->hbalock);
4972 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4973 spin_unlock_irq(&phba->hbalock);
4976 /* Call pre CONFIG_PORT mailbox command initialization. A
4977 * value of 0 means the call was successful. Any other
4978 * nonzero value is a failure, but if ERESTART is returned,
4979 * the driver may reset the HBA and try again.
4981 rc = lpfc_config_port_prep(phba);
4982 if (rc == -ERESTART) {
4983 phba->link_state = LPFC_LINK_UNKNOWN;
4988 phba->link_state = LPFC_INIT_MBX_CMDS;
4989 lpfc_config_port(phba, pmb);
4990 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4991 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4992 LPFC_SLI3_HBQ_ENABLED |
4993 LPFC_SLI3_CRP_ENABLED |
4994 LPFC_SLI3_DSS_ENABLED);
4995 if (rc != MBX_SUCCESS) {
4996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4997 "0442 Adapter failed to init, mbxCmd x%x "
4998 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4999 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5000 spin_lock_irq(&phba->hbalock);
5001 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5002 spin_unlock_irq(&phba->hbalock);
5005 /* Allow asynchronous mailbox command to go through */
5006 spin_lock_irq(&phba->hbalock);
5007 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5008 spin_unlock_irq(&phba->hbalock);
5011 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5012 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5013 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5014 "3110 Port did not grant ASABT\n");
5019 goto do_prep_failed;
5021 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5022 if (!pmb->u.mb.un.varCfgPort.cMA) {
5024 goto do_prep_failed;
5026 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5027 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5028 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5029 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5030 phba->max_vpi : phba->max_vports;
5034 phba->fips_level = 0;
5035 phba->fips_spec_rev = 0;
5036 if (pmb->u.mb.un.varCfgPort.gdss) {
5037 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
5038 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
5039 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
5040 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5041 "2850 Security Crypto Active. FIPS x%d "
5043 phba->fips_level, phba->fips_spec_rev);
5045 if (pmb->u.mb.un.varCfgPort.sec_err) {
5046 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5047 "2856 Config Port Security Crypto "
5049 pmb->u.mb.un.varCfgPort.sec_err);
5051 if (pmb->u.mb.un.varCfgPort.gerbm)
5052 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5053 if (pmb->u.mb.un.varCfgPort.gcrp)
5054 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5056 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5057 phba->port_gp = phba->mbox->us.s3_pgp.port;
5059 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5060 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5061 phba->cfg_enable_bg = 0;
5062 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5063 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5064 "0443 Adapter did not grant "
5069 phba->hbq_get = NULL;
5070 phba->port_gp = phba->mbox->us.s2.port;
5074 mempool_free(pmb, phba->mbox_mem_pool);
5080 * lpfc_sli_hba_setup - SLI initialization function
5081 * @phba: Pointer to HBA context object.
5083 * This function is the main SLI initialization function. This function
5084 * is called by the HBA initialization code, HBA reset code and HBA
5085 * error attention handler code. Caller is not required to hold any
5086 * locks. This function issues config_port mailbox command to configure
5087 * the SLI, setup iocb rings and HBQ rings. In the end the function
5088 * calls the config_port_post function to issue init_link mailbox
5089 * command and to start the discovery. The function will return zero
5090 * if successful, else it will return negative error code.
5093 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5099 switch (phba->cfg_sli_mode) {
5101 if (phba->cfg_enable_npiv) {
5102 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5103 "1824 NPIV enabled: Override sli_mode "
5104 "parameter (%d) to auto (0).\n",
5105 phba->cfg_sli_mode);
5114 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5115 "1819 Unrecognized sli_mode parameter: %d.\n",
5116 phba->cfg_sli_mode);
5120 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5122 rc = lpfc_sli_config_port(phba, mode);
5124 if (rc && phba->cfg_sli_mode == 3)
5125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5126 "1820 Unable to select SLI-3. "
5127 "Not supported by adapter.\n");
5128 if (rc && mode != 2)
5129 rc = lpfc_sli_config_port(phba, 2);
5130 else if (rc && mode == 2)
5131 rc = lpfc_sli_config_port(phba, 3);
5133 goto lpfc_sli_hba_setup_error;
5135 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5136 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5137 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5139 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5140 "2709 This device supports "
5141 "Advanced Error Reporting (AER)\n");
5142 spin_lock_irq(&phba->hbalock);
5143 phba->hba_flag |= HBA_AER_ENABLED;
5144 spin_unlock_irq(&phba->hbalock);
5146 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5147 "2708 This device does not support "
5148 "Advanced Error Reporting (AER): %d\n",
5150 phba->cfg_aer_support = 0;
5154 if (phba->sli_rev == 3) {
5155 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5156 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5158 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5159 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5160 phba->sli3_options = 0;
5163 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5164 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5165 phba->sli_rev, phba->max_vpi);
5166 rc = lpfc_sli_ring_map(phba);
5169 goto lpfc_sli_hba_setup_error;
5171 /* Initialize VPIs. */
5172 if (phba->sli_rev == LPFC_SLI_REV3) {
5174 * The VPI bitmask and physical ID array are allocated
5175 * and initialized once only - at driver load. A port
5176 * reset doesn't need to reinitialize this memory.
5178 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5179 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5180 phba->vpi_bmask = kcalloc(longs,
5181 sizeof(unsigned long),
5183 if (!phba->vpi_bmask) {
5185 goto lpfc_sli_hba_setup_error;
5188 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5191 if (!phba->vpi_ids) {
5192 kfree(phba->vpi_bmask);
5194 goto lpfc_sli_hba_setup_error;
5196 for (i = 0; i < phba->max_vpi; i++)
5197 phba->vpi_ids[i] = i;
5202 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5203 rc = lpfc_sli_hbq_setup(phba);
5205 goto lpfc_sli_hba_setup_error;
5207 spin_lock_irq(&phba->hbalock);
5208 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5209 spin_unlock_irq(&phba->hbalock);
5211 rc = lpfc_config_port_post(phba);
5213 goto lpfc_sli_hba_setup_error;
5217 lpfc_sli_hba_setup_error:
5218 phba->link_state = LPFC_HBA_ERROR;
5219 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5220 "0445 Firmware initialization failed\n");
5225 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5226 * @phba: Pointer to HBA context object.
5227 * @mboxq: mailbox pointer.
5228 * This function issue a dump mailbox command to read config region
5229 * 23 and parse the records in the region and populate driver
5233 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5235 LPFC_MBOXQ_t *mboxq;
5236 struct lpfc_dmabuf *mp;
5237 struct lpfc_mqe *mqe;
5238 uint32_t data_length;
5241 /* Program the default value of vlan_id and fc_map */
5242 phba->valid_vlan = 0;
5243 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5244 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5245 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5247 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5251 mqe = &mboxq->u.mqe;
5252 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5254 goto out_free_mboxq;
5257 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5258 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5260 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5261 "(%d):2571 Mailbox cmd x%x Status x%x "
5262 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5263 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5264 "CQ: x%x x%x x%x x%x\n",
5265 mboxq->vport ? mboxq->vport->vpi : 0,
5266 bf_get(lpfc_mqe_command, mqe),
5267 bf_get(lpfc_mqe_status, mqe),
5268 mqe->un.mb_words[0], mqe->un.mb_words[1],
5269 mqe->un.mb_words[2], mqe->un.mb_words[3],
5270 mqe->un.mb_words[4], mqe->un.mb_words[5],
5271 mqe->un.mb_words[6], mqe->un.mb_words[7],
5272 mqe->un.mb_words[8], mqe->un.mb_words[9],
5273 mqe->un.mb_words[10], mqe->un.mb_words[11],
5274 mqe->un.mb_words[12], mqe->un.mb_words[13],
5275 mqe->un.mb_words[14], mqe->un.mb_words[15],
5276 mqe->un.mb_words[16], mqe->un.mb_words[50],
5278 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5279 mboxq->mcqe.trailer);
5282 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5285 goto out_free_mboxq;
5287 data_length = mqe->un.mb_words[5];
5288 if (data_length > DMP_RGN23_SIZE) {
5289 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5292 goto out_free_mboxq;
5295 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5296 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5301 mempool_free(mboxq, phba->mbox_mem_pool);
5306 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5307 * @phba: pointer to lpfc hba data structure.
5308 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5309 * @vpd: pointer to the memory to hold resulting port vpd data.
5310 * @vpd_size: On input, the number of bytes allocated to @vpd.
5311 * On output, the number of data bytes in @vpd.
5313 * This routine executes a READ_REV SLI4 mailbox command. In
5314 * addition, this routine gets the port vpd data.
5318 * -ENOMEM - could not allocated memory.
5321 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5322 uint8_t *vpd, uint32_t *vpd_size)
5326 struct lpfc_dmabuf *dmabuf;
5327 struct lpfc_mqe *mqe;
5329 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5334 * Get a DMA buffer for the vpd data resulting from the READ_REV
5337 dma_size = *vpd_size;
5338 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5339 &dmabuf->phys, GFP_KERNEL);
5340 if (!dmabuf->virt) {
5346 * The SLI4 implementation of READ_REV conflicts at word1,
5347 * bits 31:16 and SLI4 adds vpd functionality not present
5348 * in SLI3. This code corrects the conflicts.
5350 lpfc_read_rev(phba, mboxq);
5351 mqe = &mboxq->u.mqe;
5352 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5353 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5354 mqe->un.read_rev.word1 &= 0x0000FFFF;
5355 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5356 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5358 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5360 dma_free_coherent(&phba->pcidev->dev, dma_size,
5361 dmabuf->virt, dmabuf->phys);
5367 * The available vpd length cannot be bigger than the
5368 * DMA buffer passed to the port. Catch the less than
5369 * case and update the caller's size.
5371 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5372 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5374 memcpy(vpd, dmabuf->virt, *vpd_size);
5376 dma_free_coherent(&phba->pcidev->dev, dma_size,
5377 dmabuf->virt, dmabuf->phys);
5383 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5384 * @phba: pointer to lpfc hba data structure.
5386 * This routine retrieves SLI4 device physical port name this PCI function
5391 * otherwise - failed to retrieve controller attributes
5394 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5396 LPFC_MBOXQ_t *mboxq;
5397 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5398 struct lpfc_controller_attribute *cntl_attr;
5399 void *virtaddr = NULL;
5400 uint32_t alloclen, reqlen;
5401 uint32_t shdr_status, shdr_add_status;
5402 union lpfc_sli4_cfg_shdr *shdr;
5405 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5409 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5410 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5411 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5412 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5413 LPFC_SLI4_MBX_NEMBED);
5415 if (alloclen < reqlen) {
5416 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5417 "3084 Allocated DMA memory size (%d) is "
5418 "less than the requested DMA memory size "
5419 "(%d)\n", alloclen, reqlen);
5421 goto out_free_mboxq;
5423 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5424 virtaddr = mboxq->sge_array->addr[0];
5425 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5426 shdr = &mbx_cntl_attr->cfg_shdr;
5427 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5428 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5429 if (shdr_status || shdr_add_status || rc) {
5430 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5431 "3085 Mailbox x%x (x%x/x%x) failed, "
5432 "rc:x%x, status:x%x, add_status:x%x\n",
5433 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5434 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5435 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5436 rc, shdr_status, shdr_add_status);
5438 goto out_free_mboxq;
5441 cntl_attr = &mbx_cntl_attr->cntl_attr;
5442 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5443 phba->sli4_hba.lnk_info.lnk_tp =
5444 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5445 phba->sli4_hba.lnk_info.lnk_no =
5446 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5448 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5449 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5450 sizeof(phba->BIOSVersion));
5452 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5453 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5454 phba->sli4_hba.lnk_info.lnk_tp,
5455 phba->sli4_hba.lnk_info.lnk_no,
5458 if (rc != MBX_TIMEOUT) {
5459 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5460 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5462 mempool_free(mboxq, phba->mbox_mem_pool);
5468 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5469 * @phba: pointer to lpfc hba data structure.
5471 * This routine retrieves SLI4 device physical port name this PCI function
5476 * otherwise - failed to retrieve physical port name
5479 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5481 LPFC_MBOXQ_t *mboxq;
5482 struct lpfc_mbx_get_port_name *get_port_name;
5483 uint32_t shdr_status, shdr_add_status;
5484 union lpfc_sli4_cfg_shdr *shdr;
5485 char cport_name = 0;
5488 /* We assume nothing at this point */
5489 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5490 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5492 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5495 /* obtain link type and link number via READ_CONFIG */
5496 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5497 lpfc_sli4_read_config(phba);
5498 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5499 goto retrieve_ppname;
5501 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5502 rc = lpfc_sli4_get_ctl_attr(phba);
5504 goto out_free_mboxq;
5507 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5508 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5509 sizeof(struct lpfc_mbx_get_port_name) -
5510 sizeof(struct lpfc_sli4_cfg_mhdr),
5511 LPFC_SLI4_MBX_EMBED);
5512 get_port_name = &mboxq->u.mqe.un.get_port_name;
5513 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5514 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5515 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5516 phba->sli4_hba.lnk_info.lnk_tp);
5517 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5518 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5519 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5520 if (shdr_status || shdr_add_status || rc) {
5521 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5522 "3087 Mailbox x%x (x%x/x%x) failed: "
5523 "rc:x%x, status:x%x, add_status:x%x\n",
5524 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5525 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5526 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5527 rc, shdr_status, shdr_add_status);
5529 goto out_free_mboxq;
5531 switch (phba->sli4_hba.lnk_info.lnk_no) {
5532 case LPFC_LINK_NUMBER_0:
5533 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5534 &get_port_name->u.response);
5535 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5537 case LPFC_LINK_NUMBER_1:
5538 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5539 &get_port_name->u.response);
5540 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5542 case LPFC_LINK_NUMBER_2:
5543 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5544 &get_port_name->u.response);
5545 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5547 case LPFC_LINK_NUMBER_3:
5548 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5549 &get_port_name->u.response);
5550 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5556 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5557 phba->Port[0] = cport_name;
5558 phba->Port[1] = '\0';
5559 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5560 "3091 SLI get port name: %s\n", phba->Port);
5564 if (rc != MBX_TIMEOUT) {
5565 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5566 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5568 mempool_free(mboxq, phba->mbox_mem_pool);
5574 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5575 * @phba: pointer to lpfc hba data structure.
5577 * This routine is called to explicitly arm the SLI4 device's completion and
5581 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5584 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5585 struct lpfc_sli4_hdw_queue *qp;
5586 struct lpfc_queue *eq;
5588 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5589 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
5590 if (sli4_hba->nvmels_cq)
5591 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5594 if (sli4_hba->hdwq) {
5595 /* Loop thru all Hardware Queues */
5596 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5597 qp = &sli4_hba->hdwq[qidx];
5598 /* ARM the corresponding CQ */
5599 sli4_hba->sli4_write_cq_db(phba, qp->fcp_cq, 0,
5601 sli4_hba->sli4_write_cq_db(phba, qp->nvme_cq, 0,
5605 /* Loop thru all IRQ vectors */
5606 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5607 eq = sli4_hba->hba_eq_hdl[qidx].eq;
5608 /* ARM the corresponding EQ */
5609 sli4_hba->sli4_write_eq_db(phba, eq,
5610 0, LPFC_QUEUE_REARM);
5614 if (phba->nvmet_support) {
5615 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5616 sli4_hba->sli4_write_cq_db(phba,
5617 sli4_hba->nvmet_cqset[qidx], 0,
5624 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5625 * @phba: Pointer to HBA context object.
5626 * @type: The resource extent type.
5627 * @extnt_count: buffer to hold port available extent count.
5628 * @extnt_size: buffer to hold element count per extent.
5630 * This function calls the port and retrievs the number of available
5631 * extents and their size for a particular extent type.
5633 * Returns: 0 if successful. Nonzero otherwise.
5636 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5637 uint16_t *extnt_count, uint16_t *extnt_size)
5642 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5645 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5649 /* Find out how many extents are available for this resource type */
5650 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5651 sizeof(struct lpfc_sli4_cfg_mhdr));
5652 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5653 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5654 length, LPFC_SLI4_MBX_EMBED);
5656 /* Send an extents count of 0 - the GET doesn't use it. */
5657 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5658 LPFC_SLI4_MBX_EMBED);
5664 if (!phba->sli4_hba.intr_enable)
5665 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5667 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5668 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5675 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5676 if (bf_get(lpfc_mbox_hdr_status,
5677 &rsrc_info->header.cfg_shdr.response)) {
5678 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5679 "2930 Failed to get resource extents "
5680 "Status 0x%x Add'l Status 0x%x\n",
5681 bf_get(lpfc_mbox_hdr_status,
5682 &rsrc_info->header.cfg_shdr.response),
5683 bf_get(lpfc_mbox_hdr_add_status,
5684 &rsrc_info->header.cfg_shdr.response));
5689 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5691 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5694 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5695 "3162 Retrieved extents type-%d from port: count:%d, "
5696 "size:%d\n", type, *extnt_count, *extnt_size);
5699 mempool_free(mbox, phba->mbox_mem_pool);
5704 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5705 * @phba: Pointer to HBA context object.
5706 * @type: The extent type to check.
5708 * This function reads the current available extents from the port and checks
5709 * if the extent count or extent size has changed since the last access.
5710 * Callers use this routine post port reset to understand if there is a
5711 * extent reprovisioning requirement.
5714 * -Error: error indicates problem.
5715 * 1: Extent count or size has changed.
5719 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5721 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5722 uint16_t size_diff, rsrc_ext_size;
5724 struct lpfc_rsrc_blks *rsrc_entry;
5725 struct list_head *rsrc_blk_list = NULL;
5729 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5736 case LPFC_RSC_TYPE_FCOE_RPI:
5737 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5739 case LPFC_RSC_TYPE_FCOE_VPI:
5740 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5742 case LPFC_RSC_TYPE_FCOE_XRI:
5743 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5745 case LPFC_RSC_TYPE_FCOE_VFI:
5746 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5752 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5754 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5758 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5765 * lpfc_sli4_cfg_post_extnts -
5766 * @phba: Pointer to HBA context object.
5767 * @extnt_cnt - number of available extents.
5768 * @type - the extent type (rpi, xri, vfi, vpi).
5769 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5770 * @mbox - pointer to the caller's allocated mailbox structure.
5772 * This function executes the extents allocation request. It also
5773 * takes care of the amount of memory needed to allocate or get the
5774 * allocated extents. It is the caller's responsibility to evaluate
5778 * -Error: Error value describes the condition found.
5782 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5783 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5788 uint32_t alloc_len, mbox_tmo;
5790 /* Calculate the total requested length of the dma memory */
5791 req_len = extnt_cnt * sizeof(uint16_t);
5794 * Calculate the size of an embedded mailbox. The uint32_t
5795 * accounts for extents-specific word.
5797 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5801 * Presume the allocation and response will fit into an embedded
5802 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5804 *emb = LPFC_SLI4_MBX_EMBED;
5805 if (req_len > emb_len) {
5806 req_len = extnt_cnt * sizeof(uint16_t) +
5807 sizeof(union lpfc_sli4_cfg_shdr) +
5809 *emb = LPFC_SLI4_MBX_NEMBED;
5812 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5813 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5815 if (alloc_len < req_len) {
5816 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5817 "2982 Allocated DMA memory size (x%x) is "
5818 "less than the requested DMA memory "
5819 "size (x%x)\n", alloc_len, req_len);
5822 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5826 if (!phba->sli4_hba.intr_enable)
5827 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5829 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5830 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5839 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5840 * @phba: Pointer to HBA context object.
5841 * @type: The resource extent type to allocate.
5843 * This function allocates the number of elements for the specified
5847 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5850 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5851 uint16_t rsrc_id, rsrc_start, j, k;
5854 unsigned long longs;
5855 unsigned long *bmask;
5856 struct lpfc_rsrc_blks *rsrc_blks;
5859 struct lpfc_id_range *id_array = NULL;
5860 void *virtaddr = NULL;
5861 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5862 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5863 struct list_head *ext_blk_list;
5865 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5871 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5872 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5873 "3009 No available Resource Extents "
5874 "for resource type 0x%x: Count: 0x%x, "
5875 "Size 0x%x\n", type, rsrc_cnt,
5880 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5881 "2903 Post resource extents type-0x%x: "
5882 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5884 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5888 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5895 * Figure out where the response is located. Then get local pointers
5896 * to the response data. The port does not guarantee to respond to
5897 * all extents counts request so update the local variable with the
5898 * allocated count from the port.
5900 if (emb == LPFC_SLI4_MBX_EMBED) {
5901 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5902 id_array = &rsrc_ext->u.rsp.id[0];
5903 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5905 virtaddr = mbox->sge_array->addr[0];
5906 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5907 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5908 id_array = &n_rsrc->id;
5911 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5912 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5915 * Based on the resource size and count, correct the base and max
5918 length = sizeof(struct lpfc_rsrc_blks);
5920 case LPFC_RSC_TYPE_FCOE_RPI:
5921 phba->sli4_hba.rpi_bmask = kcalloc(longs,
5922 sizeof(unsigned long),
5924 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5928 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
5931 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5932 kfree(phba->sli4_hba.rpi_bmask);
5938 * The next_rpi was initialized with the maximum available
5939 * count but the port may allocate a smaller number. Catch
5940 * that case and update the next_rpi.
5942 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5944 /* Initialize local ptrs for common extent processing later. */
5945 bmask = phba->sli4_hba.rpi_bmask;
5946 ids = phba->sli4_hba.rpi_ids;
5947 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5949 case LPFC_RSC_TYPE_FCOE_VPI:
5950 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
5952 if (unlikely(!phba->vpi_bmask)) {
5956 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
5958 if (unlikely(!phba->vpi_ids)) {
5959 kfree(phba->vpi_bmask);
5964 /* Initialize local ptrs for common extent processing later. */
5965 bmask = phba->vpi_bmask;
5966 ids = phba->vpi_ids;
5967 ext_blk_list = &phba->lpfc_vpi_blk_list;
5969 case LPFC_RSC_TYPE_FCOE_XRI:
5970 phba->sli4_hba.xri_bmask = kcalloc(longs,
5971 sizeof(unsigned long),
5973 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5977 phba->sli4_hba.max_cfg_param.xri_used = 0;
5978 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
5981 if (unlikely(!phba->sli4_hba.xri_ids)) {
5982 kfree(phba->sli4_hba.xri_bmask);
5987 /* Initialize local ptrs for common extent processing later. */
5988 bmask = phba->sli4_hba.xri_bmask;
5989 ids = phba->sli4_hba.xri_ids;
5990 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5992 case LPFC_RSC_TYPE_FCOE_VFI:
5993 phba->sli4_hba.vfi_bmask = kcalloc(longs,
5994 sizeof(unsigned long),
5996 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6000 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6003 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6004 kfree(phba->sli4_hba.vfi_bmask);
6009 /* Initialize local ptrs for common extent processing later. */
6010 bmask = phba->sli4_hba.vfi_bmask;
6011 ids = phba->sli4_hba.vfi_ids;
6012 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6015 /* Unsupported Opcode. Fail call. */
6019 ext_blk_list = NULL;
6024 * Complete initializing the extent configuration with the
6025 * allocated ids assigned to this function. The bitmask serves
6026 * as an index into the array and manages the available ids. The
6027 * array just stores the ids communicated to the port via the wqes.
6029 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6031 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6034 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6037 rsrc_blks = kzalloc(length, GFP_KERNEL);
6038 if (unlikely(!rsrc_blks)) {
6044 rsrc_blks->rsrc_start = rsrc_id;
6045 rsrc_blks->rsrc_size = rsrc_size;
6046 list_add_tail(&rsrc_blks->list, ext_blk_list);
6047 rsrc_start = rsrc_id;
6048 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6049 phba->sli4_hba.io_xri_start = rsrc_start +
6050 lpfc_sli4_get_iocb_cnt(phba);
6053 while (rsrc_id < (rsrc_start + rsrc_size)) {
6058 /* Entire word processed. Get next word.*/
6063 lpfc_sli4_mbox_cmd_free(phba, mbox);
6070 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6071 * @phba: Pointer to HBA context object.
6072 * @type: the extent's type.
6074 * This function deallocates all extents of a particular resource type.
6075 * SLI4 does not allow for deallocating a particular extent range. It
6076 * is the caller's responsibility to release all kernel memory resources.
6079 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6082 uint32_t length, mbox_tmo = 0;
6084 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6085 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6087 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6092 * This function sends an embedded mailbox because it only sends the
6093 * the resource type. All extents of this type are released by the
6096 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6097 sizeof(struct lpfc_sli4_cfg_mhdr));
6098 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6099 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6100 length, LPFC_SLI4_MBX_EMBED);
6102 /* Send an extents count of 0 - the dealloc doesn't use it. */
6103 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6104 LPFC_SLI4_MBX_EMBED);
6109 if (!phba->sli4_hba.intr_enable)
6110 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6112 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6113 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6120 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6121 if (bf_get(lpfc_mbox_hdr_status,
6122 &dealloc_rsrc->header.cfg_shdr.response)) {
6123 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6124 "2919 Failed to release resource extents "
6125 "for type %d - Status 0x%x Add'l Status 0x%x. "
6126 "Resource memory not released.\n",
6128 bf_get(lpfc_mbox_hdr_status,
6129 &dealloc_rsrc->header.cfg_shdr.response),
6130 bf_get(lpfc_mbox_hdr_add_status,
6131 &dealloc_rsrc->header.cfg_shdr.response));
6136 /* Release kernel memory resources for the specific type. */
6138 case LPFC_RSC_TYPE_FCOE_VPI:
6139 kfree(phba->vpi_bmask);
6140 kfree(phba->vpi_ids);
6141 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6142 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6143 &phba->lpfc_vpi_blk_list, list) {
6144 list_del_init(&rsrc_blk->list);
6147 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6149 case LPFC_RSC_TYPE_FCOE_XRI:
6150 kfree(phba->sli4_hba.xri_bmask);
6151 kfree(phba->sli4_hba.xri_ids);
6152 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6153 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6154 list_del_init(&rsrc_blk->list);
6158 case LPFC_RSC_TYPE_FCOE_VFI:
6159 kfree(phba->sli4_hba.vfi_bmask);
6160 kfree(phba->sli4_hba.vfi_ids);
6161 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6162 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6163 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6164 list_del_init(&rsrc_blk->list);
6168 case LPFC_RSC_TYPE_FCOE_RPI:
6169 /* RPI bitmask and physical id array are cleaned up earlier. */
6170 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6171 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6172 list_del_init(&rsrc_blk->list);
6180 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6183 mempool_free(mbox, phba->mbox_mem_pool);
6188 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6193 len = sizeof(struct lpfc_mbx_set_feature) -
6194 sizeof(struct lpfc_sli4_cfg_mhdr);
6195 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6196 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6197 LPFC_SLI4_MBX_EMBED);
6200 case LPFC_SET_UE_RECOVERY:
6201 bf_set(lpfc_mbx_set_feature_UER,
6202 &mbox->u.mqe.un.set_feature, 1);
6203 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6204 mbox->u.mqe.un.set_feature.param_len = 8;
6206 case LPFC_SET_MDS_DIAGS:
6207 bf_set(lpfc_mbx_set_feature_mds,
6208 &mbox->u.mqe.un.set_feature, 1);
6209 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6210 &mbox->u.mqe.un.set_feature, 1);
6211 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6212 mbox->u.mqe.un.set_feature.param_len = 8;
6220 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6221 * @phba: Pointer to HBA context object.
6223 * Disable FW logging into host memory on the adapter. To
6224 * be done before reading logs from the host memory.
6227 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6229 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6231 ras_fwlog->ras_active = false;
6233 /* Disable FW logging to host memory */
6234 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6235 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6239 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6240 * @phba: Pointer to HBA context object.
6242 * This function is called to free memory allocated for RAS FW logging
6243 * support in the driver.
6246 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6248 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6249 struct lpfc_dmabuf *dmabuf, *next;
6251 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6252 list_for_each_entry_safe(dmabuf, next,
6253 &ras_fwlog->fwlog_buff_list,
6255 list_del(&dmabuf->list);
6256 dma_free_coherent(&phba->pcidev->dev,
6257 LPFC_RAS_MAX_ENTRY_SIZE,
6258 dmabuf->virt, dmabuf->phys);
6263 if (ras_fwlog->lwpd.virt) {
6264 dma_free_coherent(&phba->pcidev->dev,
6265 sizeof(uint32_t) * 2,
6266 ras_fwlog->lwpd.virt,
6267 ras_fwlog->lwpd.phys);
6268 ras_fwlog->lwpd.virt = NULL;
6271 ras_fwlog->ras_active = false;
6275 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6276 * @phba: Pointer to HBA context object.
6277 * @fwlog_buff_count: Count of buffers to be created.
6279 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6280 * to update FW log is posted to the adapter.
6281 * Buffer count is calculated based on module param ras_fwlog_buffsize
6282 * Size of each buffer posted to FW is 64K.
6286 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6287 uint32_t fwlog_buff_count)
6289 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6290 struct lpfc_dmabuf *dmabuf;
6293 /* Initialize List */
6294 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6296 /* Allocate memory for the LWPD */
6297 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6298 sizeof(uint32_t) * 2,
6299 &ras_fwlog->lwpd.phys,
6301 if (!ras_fwlog->lwpd.virt) {
6302 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6303 "6185 LWPD Memory Alloc Failed\n");
6308 ras_fwlog->fw_buffcount = fwlog_buff_count;
6309 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6310 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6314 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6315 "6186 Memory Alloc failed FW logging");
6319 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6320 LPFC_RAS_MAX_ENTRY_SIZE,
6321 &dmabuf->phys, GFP_KERNEL);
6322 if (!dmabuf->virt) {
6325 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6326 "6187 DMA Alloc Failed FW logging");
6329 dmabuf->buffer_tag = i;
6330 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6335 lpfc_sli4_ras_dma_free(phba);
6341 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6342 * @phba: pointer to lpfc hba data structure.
6343 * @pmboxq: pointer to the driver internal queue element for mailbox command.
6345 * Completion handler for driver's RAS MBX command to the device.
6348 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6351 union lpfc_sli4_cfg_shdr *shdr;
6352 uint32_t shdr_status, shdr_add_status;
6353 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6357 shdr = (union lpfc_sli4_cfg_shdr *)
6358 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6359 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6360 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6362 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6363 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
6364 "6188 FW LOG mailbox "
6365 "completed with status x%x add_status x%x,"
6366 " mbx status x%x\n",
6367 shdr_status, shdr_add_status, mb->mbxStatus);
6369 ras_fwlog->ras_hwsupport = false;
6373 ras_fwlog->ras_active = true;
6374 mempool_free(pmb, phba->mbox_mem_pool);
6379 /* Free RAS DMA memory */
6380 lpfc_sli4_ras_dma_free(phba);
6381 mempool_free(pmb, phba->mbox_mem_pool);
6385 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6386 * @phba: pointer to lpfc hba data structure.
6387 * @fwlog_level: Logging verbosity level.
6388 * @fwlog_enable: Enable/Disable logging.
6390 * Initialize memory and post mailbox command to enable FW logging in host
6394 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6395 uint32_t fwlog_level,
6396 uint32_t fwlog_enable)
6398 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6399 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6400 struct lpfc_dmabuf *dmabuf;
6402 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6405 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6406 phba->cfg_ras_fwlog_buffsize);
6407 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6410 * If re-enabling FW logging support use earlier allocated
6411 * DMA buffers while posting MBX command.
6413 if (!ras_fwlog->lwpd.virt) {
6414 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6416 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6417 "6189 FW Log Memory Allocation Failed");
6422 /* Setup Mailbox command */
6423 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6426 "6190 RAS MBX Alloc Failed");
6431 ras_fwlog->fw_loglevel = fwlog_level;
6432 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6433 sizeof(struct lpfc_sli4_cfg_mhdr));
6435 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6436 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6437 len, LPFC_SLI4_MBX_EMBED);
6439 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6440 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6442 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6443 ras_fwlog->fw_loglevel);
6444 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6445 ras_fwlog->fw_buffcount);
6446 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6447 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6449 /* Update DMA buffer address */
6450 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6451 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6453 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6454 putPaddrLow(dmabuf->phys);
6456 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6457 putPaddrHigh(dmabuf->phys);
6460 /* Update LPWD address */
6461 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6462 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6464 mbox->vport = phba->pport;
6465 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6467 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6469 if (rc == MBX_NOT_FINISHED) {
6470 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6471 "6191 FW-Log Mailbox failed. "
6472 "status %d mbxStatus : x%x", rc,
6473 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6474 mempool_free(mbox, phba->mbox_mem_pool);
6481 lpfc_sli4_ras_dma_free(phba);
6487 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6488 * @phba: Pointer to HBA context object.
6490 * Check if RAS is supported on the adapter and initialize it.
6493 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6495 /* Check RAS FW Log needs to be enabled or not */
6496 if (lpfc_check_fwlog_support(phba))
6499 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6500 LPFC_RAS_ENABLE_LOGGING);
6504 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6505 * @phba: Pointer to HBA context object.
6507 * This function allocates all SLI4 resource identifiers.
6510 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6512 int i, rc, error = 0;
6513 uint16_t count, base;
6514 unsigned long longs;
6516 if (!phba->sli4_hba.rpi_hdrs_in_use)
6517 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6518 if (phba->sli4_hba.extents_in_use) {
6520 * The port supports resource extents. The XRI, VPI, VFI, RPI
6521 * resource extent count must be read and allocated before
6522 * provisioning the resource id arrays.
6524 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6525 LPFC_IDX_RSRC_RDY) {
6527 * Extent-based resources are set - the driver could
6528 * be in a port reset. Figure out if any corrective
6529 * actions need to be taken.
6531 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6532 LPFC_RSC_TYPE_FCOE_VFI);
6535 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6536 LPFC_RSC_TYPE_FCOE_VPI);
6539 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6540 LPFC_RSC_TYPE_FCOE_XRI);
6543 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6544 LPFC_RSC_TYPE_FCOE_RPI);
6549 * It's possible that the number of resources
6550 * provided to this port instance changed between
6551 * resets. Detect this condition and reallocate
6552 * resources. Otherwise, there is no action.
6555 lpfc_printf_log(phba, KERN_INFO,
6556 LOG_MBOX | LOG_INIT,
6557 "2931 Detected extent resource "
6558 "change. Reallocating all "
6560 rc = lpfc_sli4_dealloc_extent(phba,
6561 LPFC_RSC_TYPE_FCOE_VFI);
6562 rc = lpfc_sli4_dealloc_extent(phba,
6563 LPFC_RSC_TYPE_FCOE_VPI);
6564 rc = lpfc_sli4_dealloc_extent(phba,
6565 LPFC_RSC_TYPE_FCOE_XRI);
6566 rc = lpfc_sli4_dealloc_extent(phba,
6567 LPFC_RSC_TYPE_FCOE_RPI);
6572 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6576 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6580 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6584 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6587 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6592 * The port does not support resource extents. The XRI, VPI,
6593 * VFI, RPI resource ids were determined from READ_CONFIG.
6594 * Just allocate the bitmasks and provision the resource id
6595 * arrays. If a port reset is active, the resources don't
6596 * need any action - just exit.
6598 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6599 LPFC_IDX_RSRC_RDY) {
6600 lpfc_sli4_dealloc_resource_identifiers(phba);
6601 lpfc_sli4_remove_rpis(phba);
6604 count = phba->sli4_hba.max_cfg_param.max_rpi;
6606 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6607 "3279 Invalid provisioning of "
6612 base = phba->sli4_hba.max_cfg_param.rpi_base;
6613 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6614 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6615 sizeof(unsigned long),
6617 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6621 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6623 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6625 goto free_rpi_bmask;
6628 for (i = 0; i < count; i++)
6629 phba->sli4_hba.rpi_ids[i] = base + i;
6632 count = phba->sli4_hba.max_cfg_param.max_vpi;
6634 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6635 "3280 Invalid provisioning of "
6640 base = phba->sli4_hba.max_cfg_param.vpi_base;
6641 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6642 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6644 if (unlikely(!phba->vpi_bmask)) {
6648 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6650 if (unlikely(!phba->vpi_ids)) {
6652 goto free_vpi_bmask;
6655 for (i = 0; i < count; i++)
6656 phba->vpi_ids[i] = base + i;
6659 count = phba->sli4_hba.max_cfg_param.max_xri;
6661 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6662 "3281 Invalid provisioning of "
6667 base = phba->sli4_hba.max_cfg_param.xri_base;
6668 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6669 phba->sli4_hba.xri_bmask = kcalloc(longs,
6670 sizeof(unsigned long),
6672 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6676 phba->sli4_hba.max_cfg_param.xri_used = 0;
6677 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6679 if (unlikely(!phba->sli4_hba.xri_ids)) {
6681 goto free_xri_bmask;
6684 for (i = 0; i < count; i++)
6685 phba->sli4_hba.xri_ids[i] = base + i;
6688 count = phba->sli4_hba.max_cfg_param.max_vfi;
6690 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6691 "3282 Invalid provisioning of "
6696 base = phba->sli4_hba.max_cfg_param.vfi_base;
6697 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6698 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6699 sizeof(unsigned long),
6701 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6705 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6707 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6709 goto free_vfi_bmask;
6712 for (i = 0; i < count; i++)
6713 phba->sli4_hba.vfi_ids[i] = base + i;
6716 * Mark all resources ready. An HBA reset doesn't need
6717 * to reset the initialization.
6719 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6725 kfree(phba->sli4_hba.vfi_bmask);
6726 phba->sli4_hba.vfi_bmask = NULL;
6728 kfree(phba->sli4_hba.xri_ids);
6729 phba->sli4_hba.xri_ids = NULL;
6731 kfree(phba->sli4_hba.xri_bmask);
6732 phba->sli4_hba.xri_bmask = NULL;
6734 kfree(phba->vpi_ids);
6735 phba->vpi_ids = NULL;
6737 kfree(phba->vpi_bmask);
6738 phba->vpi_bmask = NULL;
6740 kfree(phba->sli4_hba.rpi_ids);
6741 phba->sli4_hba.rpi_ids = NULL;
6743 kfree(phba->sli4_hba.rpi_bmask);
6744 phba->sli4_hba.rpi_bmask = NULL;
6750 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6751 * @phba: Pointer to HBA context object.
6753 * This function allocates the number of elements for the specified
6757 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6759 if (phba->sli4_hba.extents_in_use) {
6760 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6761 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6762 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6763 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6765 kfree(phba->vpi_bmask);
6766 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6767 kfree(phba->vpi_ids);
6768 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6769 kfree(phba->sli4_hba.xri_bmask);
6770 kfree(phba->sli4_hba.xri_ids);
6771 kfree(phba->sli4_hba.vfi_bmask);
6772 kfree(phba->sli4_hba.vfi_ids);
6773 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6774 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6781 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6782 * @phba: Pointer to HBA context object.
6783 * @type: The resource extent type.
6784 * @extnt_count: buffer to hold port extent count response
6785 * @extnt_size: buffer to hold port extent size response.
6787 * This function calls the port to read the host allocated extents
6788 * for a particular type.
6791 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6792 uint16_t *extnt_cnt, uint16_t *extnt_size)
6796 uint16_t curr_blks = 0;
6797 uint32_t req_len, emb_len;
6798 uint32_t alloc_len, mbox_tmo;
6799 struct list_head *blk_list_head;
6800 struct lpfc_rsrc_blks *rsrc_blk;
6802 void *virtaddr = NULL;
6803 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6804 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6805 union lpfc_sli4_cfg_shdr *shdr;
6808 case LPFC_RSC_TYPE_FCOE_VPI:
6809 blk_list_head = &phba->lpfc_vpi_blk_list;
6811 case LPFC_RSC_TYPE_FCOE_XRI:
6812 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6814 case LPFC_RSC_TYPE_FCOE_VFI:
6815 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6817 case LPFC_RSC_TYPE_FCOE_RPI:
6818 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6824 /* Count the number of extents currently allocatd for this type. */
6825 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6826 if (curr_blks == 0) {
6828 * The GET_ALLOCATED mailbox does not return the size,
6829 * just the count. The size should be just the size
6830 * stored in the current allocated block and all sizes
6831 * for an extent type are the same so set the return
6834 *extnt_size = rsrc_blk->rsrc_size;
6840 * Calculate the size of an embedded mailbox. The uint32_t
6841 * accounts for extents-specific word.
6843 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6847 * Presume the allocation and response will fit into an embedded
6848 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6850 emb = LPFC_SLI4_MBX_EMBED;
6852 if (req_len > emb_len) {
6853 req_len = curr_blks * sizeof(uint16_t) +
6854 sizeof(union lpfc_sli4_cfg_shdr) +
6856 emb = LPFC_SLI4_MBX_NEMBED;
6859 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6862 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6864 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6865 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6867 if (alloc_len < req_len) {
6868 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6869 "2983 Allocated DMA memory size (x%x) is "
6870 "less than the requested DMA memory "
6871 "size (x%x)\n", alloc_len, req_len);
6875 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6881 if (!phba->sli4_hba.intr_enable)
6882 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6884 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6885 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6894 * Figure out where the response is located. Then get local pointers
6895 * to the response data. The port does not guarantee to respond to
6896 * all extents counts request so update the local variable with the
6897 * allocated count from the port.
6899 if (emb == LPFC_SLI4_MBX_EMBED) {
6900 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6901 shdr = &rsrc_ext->header.cfg_shdr;
6902 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6904 virtaddr = mbox->sge_array->addr[0];
6905 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6906 shdr = &n_rsrc->cfg_shdr;
6907 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6910 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6911 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6912 "2984 Failed to read allocated resources "
6913 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6915 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6916 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6921 lpfc_sli4_mbox_cmd_free(phba, mbox);
6926 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
6927 * @phba: pointer to lpfc hba data structure.
6928 * @pring: Pointer to driver SLI ring object.
6929 * @sgl_list: linked link of sgl buffers to post
6930 * @cnt: number of linked list buffers
6932 * This routine walks the list of buffers that have been allocated and
6933 * repost them to the port by using SGL block post. This is needed after a
6934 * pci_function_reset/warm_start or start. It attempts to construct blocks
6935 * of buffer sgls which contains contiguous xris and uses the non-embedded
6936 * SGL block post mailbox commands to post them to the port. For single
6937 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6938 * mailbox command for posting.
6940 * Returns: 0 = success, non-zero failure.
6943 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6944 struct list_head *sgl_list, int cnt)
6946 struct lpfc_sglq *sglq_entry = NULL;
6947 struct lpfc_sglq *sglq_entry_next = NULL;
6948 struct lpfc_sglq *sglq_entry_first = NULL;
6949 int status, total_cnt;
6950 int post_cnt = 0, num_posted = 0, block_cnt = 0;
6951 int last_xritag = NO_XRI;
6952 LIST_HEAD(prep_sgl_list);
6953 LIST_HEAD(blck_sgl_list);
6954 LIST_HEAD(allc_sgl_list);
6955 LIST_HEAD(post_sgl_list);
6956 LIST_HEAD(free_sgl_list);
6958 spin_lock_irq(&phba->hbalock);
6959 spin_lock(&phba->sli4_hba.sgl_list_lock);
6960 list_splice_init(sgl_list, &allc_sgl_list);
6961 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6962 spin_unlock_irq(&phba->hbalock);
6965 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6966 &allc_sgl_list, list) {
6967 list_del_init(&sglq_entry->list);
6969 if ((last_xritag != NO_XRI) &&
6970 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6971 /* a hole in xri block, form a sgl posting block */
6972 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6973 post_cnt = block_cnt - 1;
6974 /* prepare list for next posting block */
6975 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6978 /* prepare list for next posting block */
6979 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6980 /* enough sgls for non-embed sgl mbox command */
6981 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6982 list_splice_init(&prep_sgl_list,
6984 post_cnt = block_cnt;
6990 /* keep track of last sgl's xritag */
6991 last_xritag = sglq_entry->sli4_xritag;
6993 /* end of repost sgl list condition for buffers */
6994 if (num_posted == total_cnt) {
6995 if (post_cnt == 0) {
6996 list_splice_init(&prep_sgl_list,
6998 post_cnt = block_cnt;
6999 } else if (block_cnt == 1) {
7000 status = lpfc_sli4_post_sgl(phba,
7001 sglq_entry->phys, 0,
7002 sglq_entry->sli4_xritag);
7004 /* successful, put sgl to posted list */
7005 list_add_tail(&sglq_entry->list,
7008 /* Failure, put sgl to free list */
7009 lpfc_printf_log(phba, KERN_WARNING,
7011 "3159 Failed to post "
7012 "sgl, xritag:x%x\n",
7013 sglq_entry->sli4_xritag);
7014 list_add_tail(&sglq_entry->list,
7021 /* continue until a nembed page worth of sgls */
7025 /* post the buffer list sgls as a block */
7026 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7030 /* success, put sgl list to posted sgl list */
7031 list_splice_init(&blck_sgl_list, &post_sgl_list);
7033 /* Failure, put sgl list to free sgl list */
7034 sglq_entry_first = list_first_entry(&blck_sgl_list,
7037 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7038 "3160 Failed to post sgl-list, "
7040 sglq_entry_first->sli4_xritag,
7041 (sglq_entry_first->sli4_xritag +
7043 list_splice_init(&blck_sgl_list, &free_sgl_list);
7044 total_cnt -= post_cnt;
7047 /* don't reset xirtag due to hole in xri block */
7049 last_xritag = NO_XRI;
7051 /* reset sgl post count for next round of posting */
7055 /* free the sgls failed to post */
7056 lpfc_free_sgl_list(phba, &free_sgl_list);
7058 /* push sgls posted to the available list */
7059 if (!list_empty(&post_sgl_list)) {
7060 spin_lock_irq(&phba->hbalock);
7061 spin_lock(&phba->sli4_hba.sgl_list_lock);
7062 list_splice_init(&post_sgl_list, sgl_list);
7063 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7064 spin_unlock_irq(&phba->hbalock);
7066 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7067 "3161 Failure to post sgl to port.\n");
7071 /* return the number of XRIs actually posted */
7076 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7077 * @phba: pointer to lpfc hba data structure.
7079 * This routine walks the list of nvme buffers that have been allocated and
7080 * repost them to the port by using SGL block post. This is needed after a
7081 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7082 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7083 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7085 * Returns: 0 = success, non-zero failure.
7088 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7090 LIST_HEAD(post_nblist);
7091 int num_posted, rc = 0;
7093 /* get all NVME buffers need to repost to a local list */
7094 lpfc_io_buf_flush(phba, &post_nblist);
7096 /* post the list of nvme buffer sgls to port if available */
7097 if (!list_empty(&post_nblist)) {
7098 num_posted = lpfc_sli4_post_io_sgl_list(
7099 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7100 /* failed to post any nvme buffer, return error */
7101 if (num_posted == 0)
7108 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7112 len = sizeof(struct lpfc_mbx_set_host_data) -
7113 sizeof(struct lpfc_sli4_cfg_mhdr);
7114 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7115 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7116 LPFC_SLI4_MBX_EMBED);
7118 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7119 mbox->u.mqe.un.set_host_data.param_len =
7120 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7121 snprintf(mbox->u.mqe.un.set_host_data.data,
7122 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7123 "Linux %s v"LPFC_DRIVER_VERSION,
7124 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7128 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7129 struct lpfc_queue *drq, int count, int idx)
7132 struct lpfc_rqe hrqe;
7133 struct lpfc_rqe drqe;
7134 struct lpfc_rqb *rqbp;
7135 unsigned long flags;
7136 struct rqb_dmabuf *rqb_buffer;
7137 LIST_HEAD(rqb_buf_list);
7139 spin_lock_irqsave(&phba->hbalock, flags);
7141 for (i = 0; i < count; i++) {
7142 /* IF RQ is already full, don't bother */
7143 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7145 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7148 rqb_buffer->hrq = hrq;
7149 rqb_buffer->drq = drq;
7150 rqb_buffer->idx = idx;
7151 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7153 while (!list_empty(&rqb_buf_list)) {
7154 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7157 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7158 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7159 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7160 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7161 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7163 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7164 "6421 Cannot post to HRQ %d: %x %x %x "
7172 rqbp->rqb_free_buffer(phba, rqb_buffer);
7174 list_add_tail(&rqb_buffer->hbuf.list,
7175 &rqbp->rqb_buffer_list);
7176 rqbp->buffer_count++;
7179 spin_unlock_irqrestore(&phba->hbalock, flags);
7184 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
7185 * @phba: Pointer to HBA context object.
7187 * This function is the main SLI4 device initialization PCI function. This
7188 * function is called by the HBA initialization code, HBA reset code and
7189 * HBA error attention handler code. Caller is not required to hold any
7193 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7195 int rc, i, cnt, len;
7196 LPFC_MBOXQ_t *mboxq;
7197 struct lpfc_mqe *mqe;
7200 uint32_t ftr_rsp = 0;
7201 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7202 struct lpfc_vport *vport = phba->pport;
7203 struct lpfc_dmabuf *mp;
7204 struct lpfc_rqb *rqbp;
7206 /* Perform a PCI function reset to start from clean */
7207 rc = lpfc_pci_function_reset(phba);
7211 /* Check the HBA Host Status Register for readyness */
7212 rc = lpfc_sli4_post_status_check(phba);
7216 spin_lock_irq(&phba->hbalock);
7217 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7218 spin_unlock_irq(&phba->hbalock);
7222 * Allocate a single mailbox container for initializing the
7225 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7229 /* Issue READ_REV to collect vpd and FW information. */
7230 vpd_size = SLI4_PAGE_SIZE;
7231 vpd = kzalloc(vpd_size, GFP_KERNEL);
7237 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7243 mqe = &mboxq->u.mqe;
7244 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7245 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7246 phba->hba_flag |= HBA_FCOE_MODE;
7247 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7249 phba->hba_flag &= ~HBA_FCOE_MODE;
7252 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7254 phba->hba_flag |= HBA_FIP_SUPPORT;
7256 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7258 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
7260 if (phba->sli_rev != LPFC_SLI_REV4) {
7261 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7262 "0376 READ_REV Error. SLI Level %d "
7263 "FCoE enabled %d\n",
7264 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7271 * Continue initialization with default values even if driver failed
7272 * to read FCoE param config regions, only read parameters if the
7275 if (phba->hba_flag & HBA_FCOE_MODE &&
7276 lpfc_sli4_read_fcoe_params(phba))
7277 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7278 "2570 Failed to read FCoE parameters\n");
7281 * Retrieve sli4 device physical port name, failure of doing it
7282 * is considered as non-fatal.
7284 rc = lpfc_sli4_retrieve_pport_name(phba);
7286 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7287 "3080 Successful retrieving SLI4 device "
7288 "physical port name: %s.\n", phba->Port);
7290 rc = lpfc_sli4_get_ctl_attr(phba);
7292 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7293 "8351 Successful retrieving SLI4 device "
7297 * Evaluate the read rev and vpd data. Populate the driver
7298 * state with the results. If this routine fails, the failure
7299 * is not fatal as the driver will use generic values.
7301 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7302 if (unlikely(!rc)) {
7303 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7304 "0377 Error %d parsing vpd. "
7305 "Using defaults.\n", rc);
7310 /* Save information as VPD data */
7311 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7312 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7315 * This is because first G7 ASIC doesn't support the standard
7316 * 0x5a NVME cmd descriptor type/subtype
7318 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7319 LPFC_SLI_INTF_IF_TYPE_6) &&
7320 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7321 (phba->vpd.rev.smRev == 0) &&
7322 (phba->cfg_nvme_embed_cmd == 1))
7323 phba->cfg_nvme_embed_cmd = 0;
7325 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7326 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7328 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7330 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7332 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7334 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7335 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7336 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7337 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7338 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7339 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7340 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7341 "(%d):0380 READ_REV Status x%x "
7342 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7343 mboxq->vport ? mboxq->vport->vpi : 0,
7344 bf_get(lpfc_mqe_status, mqe),
7345 phba->vpd.rev.opFwName,
7346 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7347 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7349 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
7350 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
7351 if (phba->pport->cfg_lun_queue_depth > rc) {
7352 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7353 "3362 LUN queue depth changed from %d to %d\n",
7354 phba->pport->cfg_lun_queue_depth, rc);
7355 phba->pport->cfg_lun_queue_depth = rc;
7358 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7359 LPFC_SLI_INTF_IF_TYPE_0) {
7360 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7361 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7362 if (rc == MBX_SUCCESS) {
7363 phba->hba_flag |= HBA_RECOVERABLE_UE;
7364 /* Set 1Sec interval to detect UE */
7365 phba->eratt_poll_interval = 1;
7366 phba->sli4_hba.ue_to_sr = bf_get(
7367 lpfc_mbx_set_feature_UESR,
7368 &mboxq->u.mqe.un.set_feature);
7369 phba->sli4_hba.ue_to_rp = bf_get(
7370 lpfc_mbx_set_feature_UERP,
7371 &mboxq->u.mqe.un.set_feature);
7375 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7376 /* Enable MDS Diagnostics only if the SLI Port supports it */
7377 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7378 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7379 if (rc != MBX_SUCCESS)
7380 phba->mds_diags_support = 0;
7384 * Discover the port's supported feature set and match it against the
7387 lpfc_request_features(phba, mboxq);
7388 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7395 * The port must support FCP initiator mode as this is the
7396 * only mode running in the host.
7398 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7399 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7400 "0378 No support for fcpi mode.\n");
7404 /* Performance Hints are ONLY for FCoE */
7405 if (phba->hba_flag & HBA_FCOE_MODE) {
7406 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7407 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7409 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7413 * If the port cannot support the host's requested features
7414 * then turn off the global config parameters to disable the
7415 * feature in the driver. This is not a fatal error.
7417 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7418 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7419 phba->cfg_enable_bg = 0;
7420 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7425 if (phba->max_vpi && phba->cfg_enable_npiv &&
7426 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7430 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7431 "0379 Feature Mismatch Data: x%08x %08x "
7432 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7433 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7434 phba->cfg_enable_npiv, phba->max_vpi);
7435 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7436 phba->cfg_enable_bg = 0;
7437 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7438 phba->cfg_enable_npiv = 0;
7441 /* These SLI3 features are assumed in SLI4 */
7442 spin_lock_irq(&phba->hbalock);
7443 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7444 spin_unlock_irq(&phba->hbalock);
7447 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7448 * calls depends on these resources to complete port setup.
7450 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7452 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7453 "2920 Failed to alloc Resource IDs "
7458 lpfc_set_host_data(phba, mboxq);
7460 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7462 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7463 "2134 Failed to set host os driver version %x",
7467 /* Read the port's service parameters. */
7468 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7470 phba->link_state = LPFC_HBA_ERROR;
7475 mboxq->vport = vport;
7476 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7477 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7478 if (rc == MBX_SUCCESS) {
7479 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7484 * This memory was allocated by the lpfc_read_sparam routine. Release
7485 * it to the mbuf pool.
7487 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7489 mboxq->ctx_buf = NULL;
7491 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7492 "0382 READ_SPARAM command failed "
7493 "status %d, mbxStatus x%x\n",
7494 rc, bf_get(lpfc_mqe_status, mqe));
7495 phba->link_state = LPFC_HBA_ERROR;
7500 lpfc_update_vport_wwn(vport);
7502 /* Update the fc_host data structures with new wwn. */
7503 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7504 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7506 /* Create all the SLI4 queues */
7507 rc = lpfc_sli4_queue_create(phba);
7509 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7510 "3089 Failed to allocate queues\n");
7514 /* Set up all the queues to the device */
7515 rc = lpfc_sli4_queue_setup(phba);
7517 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7518 "0381 Error %d during queue setup.\n ", rc);
7519 goto out_stop_timers;
7521 /* Initialize the driver internal SLI layer lists. */
7522 lpfc_sli4_setup(phba);
7523 lpfc_sli4_queue_init(phba);
7525 /* update host els xri-sgl sizes and mappings */
7526 rc = lpfc_sli4_els_sgl_update(phba);
7528 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7529 "1400 Failed to update xri-sgl size and "
7530 "mapping: %d\n", rc);
7531 goto out_destroy_queue;
7534 /* register the els sgl pool to the port */
7535 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7536 phba->sli4_hba.els_xri_cnt);
7537 if (unlikely(rc < 0)) {
7538 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7539 "0582 Error %d during els sgl post "
7542 goto out_destroy_queue;
7544 phba->sli4_hba.els_xri_cnt = rc;
7546 if (phba->nvmet_support) {
7547 /* update host nvmet xri-sgl sizes and mappings */
7548 rc = lpfc_sli4_nvmet_sgl_update(phba);
7550 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7551 "6308 Failed to update nvmet-sgl size "
7552 "and mapping: %d\n", rc);
7553 goto out_destroy_queue;
7556 /* register the nvmet sgl pool to the port */
7557 rc = lpfc_sli4_repost_sgl_list(
7559 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7560 phba->sli4_hba.nvmet_xri_cnt);
7561 if (unlikely(rc < 0)) {
7562 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7563 "3117 Error %d during nvmet "
7566 goto out_destroy_queue;
7568 phba->sli4_hba.nvmet_xri_cnt = rc;
7570 cnt = phba->cfg_iocb_cnt * 1024;
7571 /* We need 1 iocbq for every SGL, for IO processing */
7572 cnt += phba->sli4_hba.nvmet_xri_cnt;
7574 /* update host common xri-sgl sizes and mappings */
7575 rc = lpfc_sli4_io_sgl_update(phba);
7577 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7578 "6082 Failed to update nvme-sgl size "
7579 "and mapping: %d\n", rc);
7580 goto out_destroy_queue;
7583 /* register the allocated common sgl pool to the port */
7584 rc = lpfc_sli4_repost_io_sgl_list(phba);
7586 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7587 "6116 Error %d during nvme sgl post "
7589 /* Some NVME buffers were moved to abort nvme list */
7590 /* A pci function reset will repost them */
7592 goto out_destroy_queue;
7594 cnt = phba->cfg_iocb_cnt * 1024;
7597 if (!phba->sli.iocbq_lookup) {
7598 /* Initialize and populate the iocb list per host */
7599 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7600 "2821 initialize iocb list %d total %d\n",
7601 phba->cfg_iocb_cnt, cnt);
7602 rc = lpfc_init_iocb_list(phba, cnt);
7604 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7605 "1413 Failed to init iocb list.\n");
7606 goto out_destroy_queue;
7610 if (phba->nvmet_support)
7611 lpfc_nvmet_create_targetport(phba);
7613 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7614 /* Post initial buffers to all RQs created */
7615 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7616 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7617 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7618 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7619 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7620 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7621 rqbp->buffer_count = 0;
7623 lpfc_post_rq_buffer(
7624 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7625 phba->sli4_hba.nvmet_mrq_data[i],
7626 phba->cfg_nvmet_mrq_post, i);
7630 /* Post the rpi header region to the device. */
7631 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7633 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7634 "0393 Error %d during rpi post operation\n",
7637 goto out_destroy_queue;
7639 lpfc_sli4_node_prep(phba);
7641 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7642 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7644 * The FC Port needs to register FCFI (index 0)
7646 lpfc_reg_fcfi(phba, mboxq);
7647 mboxq->vport = phba->pport;
7648 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7649 if (rc != MBX_SUCCESS)
7650 goto out_unset_queue;
7652 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7653 &mboxq->u.mqe.un.reg_fcfi);
7655 /* We are a NVME Target mode with MRQ > 1 */
7657 /* First register the FCFI */
7658 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7659 mboxq->vport = phba->pport;
7660 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7661 if (rc != MBX_SUCCESS)
7662 goto out_unset_queue;
7664 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7665 &mboxq->u.mqe.un.reg_fcfi_mrq);
7667 /* Next register the MRQs */
7668 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7669 mboxq->vport = phba->pport;
7670 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7671 if (rc != MBX_SUCCESS)
7672 goto out_unset_queue;
7675 /* Check if the port is configured to be disabled */
7676 lpfc_sli_read_link_ste(phba);
7679 /* Don't post more new bufs if repost already recovered
7682 if (phba->nvmet_support == 0) {
7683 if (phba->sli4_hba.io_xri_cnt == 0) {
7684 len = lpfc_new_io_buf(
7685 phba, phba->sli4_hba.io_xri_max);
7688 goto out_unset_queue;
7691 if (phba->cfg_xri_rebalancing)
7692 lpfc_create_multixri_pools(phba);
7695 phba->cfg_xri_rebalancing = 0;
7698 /* Allow asynchronous mailbox command to go through */
7699 spin_lock_irq(&phba->hbalock);
7700 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7701 spin_unlock_irq(&phba->hbalock);
7703 /* Post receive buffers to the device */
7704 lpfc_sli4_rb_setup(phba);
7706 /* Reset HBA FCF states after HBA reset */
7707 phba->fcf.fcf_flag = 0;
7708 phba->fcf.current_rec.flag = 0;
7710 /* Start the ELS watchdog timer */
7711 mod_timer(&vport->els_tmofunc,
7712 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7714 /* Start heart beat timer */
7715 mod_timer(&phba->hb_tmofunc,
7716 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7717 phba->hb_outstanding = 0;
7718 phba->last_completion_time = jiffies;
7720 /* start eq_delay heartbeat */
7721 if (phba->cfg_auto_imax)
7722 queue_delayed_work(phba->wq, &phba->eq_delay_work,
7723 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7725 /* Start error attention (ERATT) polling timer */
7726 mod_timer(&phba->eratt_poll,
7727 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7729 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7730 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7731 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7733 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7734 "2829 This device supports "
7735 "Advanced Error Reporting (AER)\n");
7736 spin_lock_irq(&phba->hbalock);
7737 phba->hba_flag |= HBA_AER_ENABLED;
7738 spin_unlock_irq(&phba->hbalock);
7740 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7741 "2830 This device does not support "
7742 "Advanced Error Reporting (AER)\n");
7743 phba->cfg_aer_support = 0;
7749 * The port is ready, set the host's link state to LINK_DOWN
7750 * in preparation for link interrupts.
7752 spin_lock_irq(&phba->hbalock);
7753 phba->link_state = LPFC_LINK_DOWN;
7755 /* Check if physical ports are trunked */
7756 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7757 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7758 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7759 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7760 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7761 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7762 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7763 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
7764 spin_unlock_irq(&phba->hbalock);
7766 /* Arm the CQs and then EQs on device */
7767 lpfc_sli4_arm_cqeq_intr(phba);
7769 /* Indicate device interrupt mode */
7770 phba->sli4_hba.intr_enable = 1;
7772 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7773 (phba->hba_flag & LINK_DISABLED)) {
7774 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7775 "3103 Adapter Link is disabled.\n");
7776 lpfc_down_link(phba, mboxq);
7777 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7778 if (rc != MBX_SUCCESS) {
7779 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7780 "3104 Adapter failed to issue "
7781 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7782 goto out_io_buff_free;
7784 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7785 /* don't perform init_link on SLI4 FC port loopback test */
7786 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7787 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7789 goto out_io_buff_free;
7792 mempool_free(mboxq, phba->mbox_mem_pool);
7795 /* Free allocated IO Buffers */
7798 /* Unset all the queues set up in this routine when error out */
7799 lpfc_sli4_queue_unset(phba);
7801 lpfc_free_iocb_list(phba);
7802 lpfc_sli4_queue_destroy(phba);
7804 lpfc_stop_hba_timers(phba);
7806 mempool_free(mboxq, phba->mbox_mem_pool);
7811 * lpfc_mbox_timeout - Timeout call back function for mbox timer
7812 * @ptr: context object - pointer to hba structure.
7814 * This is the callback function for mailbox timer. The mailbox
7815 * timer is armed when a new mailbox command is issued and the timer
7816 * is deleted when the mailbox complete. The function is called by
7817 * the kernel timer code when a mailbox does not complete within
7818 * expected time. This function wakes up the worker thread to
7819 * process the mailbox timeout and returns. All the processing is
7820 * done by the worker thread function lpfc_mbox_timeout_handler.
7823 lpfc_mbox_timeout(struct timer_list *t)
7825 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
7826 unsigned long iflag;
7827 uint32_t tmo_posted;
7829 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7830 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7832 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7833 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7836 lpfc_worker_wake_up(phba);
7841 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7843 * @phba: Pointer to HBA context object.
7845 * This function checks if any mailbox completions are present on the mailbox
7849 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7853 struct lpfc_queue *mcq;
7854 struct lpfc_mcqe *mcqe;
7855 bool pending_completions = false;
7858 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7861 /* Check for completions on mailbox completion queue */
7863 mcq = phba->sli4_hba.mbx_cq;
7864 idx = mcq->hba_index;
7865 qe_valid = mcq->qe_valid;
7866 while (bf_get_le32(lpfc_cqe_valid,
7867 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
7868 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
7869 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7870 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7871 pending_completions = true;
7874 idx = (idx + 1) % mcq->entry_count;
7875 if (mcq->hba_index == idx)
7878 /* if the index wrapped around, toggle the valid bit */
7879 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7880 qe_valid = (qe_valid) ? 0 : 1;
7882 return pending_completions;
7887 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7889 * @phba: Pointer to HBA context object.
7891 * For sli4, it is possible to miss an interrupt. As such mbox completions
7892 * maybe missed causing erroneous mailbox timeouts to occur. This function
7893 * checks to see if mbox completions are on the mailbox completion queue
7894 * and will process all the completions associated with the eq for the
7895 * mailbox completion queue.
7898 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7900 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
7902 struct lpfc_queue *fpeq = NULL;
7903 struct lpfc_queue *eq;
7906 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7909 /* Find the EQ associated with the mbox CQ */
7910 if (sli4_hba->hdwq) {
7911 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
7912 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
7913 if (eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
7922 /* Turn off interrupts from this EQ */
7924 sli4_hba->sli4_eq_clr_intr(fpeq);
7926 /* Check to see if a mbox completion is pending */
7928 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7931 * If a mbox completion is pending, process all the events on EQ
7932 * associated with the mbox completion queue (this could include
7933 * mailbox commands, async events, els commands, receive queue data
7938 /* process and rearm the EQ */
7939 lpfc_sli4_process_eq(phba, fpeq);
7941 /* Always clear and re-arm the EQ */
7942 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
7944 return mbox_pending;
7949 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7950 * @phba: Pointer to HBA context object.
7952 * This function is called from worker thread when a mailbox command times out.
7953 * The caller is not required to hold any locks. This function will reset the
7954 * HBA and recover all the pending commands.
7957 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7959 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
7960 MAILBOX_t *mb = NULL;
7962 struct lpfc_sli *psli = &phba->sli;
7964 /* If the mailbox completed, process the completion and return */
7965 if (lpfc_sli4_process_missed_mbox_completions(phba))
7970 /* Check the pmbox pointer first. There is a race condition
7971 * between the mbox timeout handler getting executed in the
7972 * worklist and the mailbox actually completing. When this
7973 * race condition occurs, the mbox_active will be NULL.
7975 spin_lock_irq(&phba->hbalock);
7976 if (pmbox == NULL) {
7977 lpfc_printf_log(phba, KERN_WARNING,
7979 "0353 Active Mailbox cleared - mailbox timeout "
7981 spin_unlock_irq(&phba->hbalock);
7985 /* Mbox cmd <mbxCommand> timeout */
7986 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7987 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
7989 phba->pport->port_state,
7991 phba->sli.mbox_active);
7992 spin_unlock_irq(&phba->hbalock);
7994 /* Setting state unknown so lpfc_sli_abort_iocb_ring
7995 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
7996 * it to fail all outstanding SCSI IO.
7998 spin_lock_irq(&phba->pport->work_port_lock);
7999 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8000 spin_unlock_irq(&phba->pport->work_port_lock);
8001 spin_lock_irq(&phba->hbalock);
8002 phba->link_state = LPFC_LINK_UNKNOWN;
8003 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8004 spin_unlock_irq(&phba->hbalock);
8006 lpfc_sli_abort_fcp_rings(phba);
8008 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8009 "0345 Resetting board due to mailbox timeout\n");
8011 /* Reset the HBA device */
8012 lpfc_reset_hba(phba);
8016 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
8017 * @phba: Pointer to HBA context object.
8018 * @pmbox: Pointer to mailbox object.
8019 * @flag: Flag indicating how the mailbox need to be processed.
8021 * This function is called by discovery code and HBA management code
8022 * to submit a mailbox command to firmware with SLI-3 interface spec. This
8023 * function gets the hbalock to protect the data structures.
8024 * The mailbox command can be submitted in polling mode, in which case
8025 * this function will wait in a polling loop for the completion of the
8027 * If the mailbox is submitted in no_wait mode (not polling) the
8028 * function will submit the command and returns immediately without waiting
8029 * for the mailbox completion. The no_wait is supported only when HBA
8030 * is in SLI2/SLI3 mode - interrupts are enabled.
8031 * The SLI interface allows only one mailbox pending at a time. If the
8032 * mailbox is issued in polling mode and there is already a mailbox
8033 * pending, then the function will return an error. If the mailbox is issued
8034 * in NO_WAIT mode and there is a mailbox pending already, the function
8035 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
8036 * The sli layer owns the mailbox object until the completion of mailbox
8037 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
8038 * return codes the caller owns the mailbox command after the return of
8042 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8046 struct lpfc_sli *psli = &phba->sli;
8047 uint32_t status, evtctr;
8048 uint32_t ha_copy, hc_copy;
8050 unsigned long timeout;
8051 unsigned long drvr_flag = 0;
8052 uint32_t word0, ldata;
8053 void __iomem *to_slim;
8054 int processing_queue = 0;
8056 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8058 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8059 /* processing mbox queue from intr_handler */
8060 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8061 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8064 processing_queue = 1;
8065 pmbox = lpfc_mbox_get(phba);
8067 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8072 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8073 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8075 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8076 lpfc_printf_log(phba, KERN_ERR,
8077 LOG_MBOX | LOG_VPORT,
8078 "1806 Mbox x%x failed. No vport\n",
8079 pmbox->u.mb.mbxCommand);
8081 goto out_not_finished;
8085 /* If the PCI channel is in offline state, do not post mbox. */
8086 if (unlikely(pci_channel_offline(phba->pcidev))) {
8087 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8088 goto out_not_finished;
8091 /* If HBA has a deferred error attention, fail the iocb. */
8092 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8093 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8094 goto out_not_finished;
8100 status = MBX_SUCCESS;
8102 if (phba->link_state == LPFC_HBA_ERROR) {
8103 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8105 /* Mbox command <mbxCommand> cannot issue */
8106 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8107 "(%d):0311 Mailbox command x%x cannot "
8108 "issue Data: x%x x%x\n",
8109 pmbox->vport ? pmbox->vport->vpi : 0,
8110 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8111 goto out_not_finished;
8114 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8115 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8116 !(hc_copy & HC_MBINT_ENA)) {
8117 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8118 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8119 "(%d):2528 Mailbox command x%x cannot "
8120 "issue Data: x%x x%x\n",
8121 pmbox->vport ? pmbox->vport->vpi : 0,
8122 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8123 goto out_not_finished;
8127 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8128 /* Polling for a mbox command when another one is already active
8129 * is not allowed in SLI. Also, the driver must have established
8130 * SLI2 mode to queue and process multiple mbox commands.
8133 if (flag & MBX_POLL) {
8134 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8136 /* Mbox command <mbxCommand> cannot issue */
8137 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8138 "(%d):2529 Mailbox command x%x "
8139 "cannot issue Data: x%x x%x\n",
8140 pmbox->vport ? pmbox->vport->vpi : 0,
8141 pmbox->u.mb.mbxCommand,
8142 psli->sli_flag, flag);
8143 goto out_not_finished;
8146 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8147 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8148 /* Mbox command <mbxCommand> cannot issue */
8149 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8150 "(%d):2530 Mailbox command x%x "
8151 "cannot issue Data: x%x x%x\n",
8152 pmbox->vport ? pmbox->vport->vpi : 0,
8153 pmbox->u.mb.mbxCommand,
8154 psli->sli_flag, flag);
8155 goto out_not_finished;
8158 /* Another mailbox command is still being processed, queue this
8159 * command to be processed later.
8161 lpfc_mbox_put(phba, pmbox);
8163 /* Mbox cmd issue - BUSY */
8164 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8165 "(%d):0308 Mbox cmd issue - BUSY Data: "
8166 "x%x x%x x%x x%x\n",
8167 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8169 phba->pport ? phba->pport->port_state : 0xff,
8170 psli->sli_flag, flag);
8172 psli->slistat.mbox_busy++;
8173 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8176 lpfc_debugfs_disc_trc(pmbox->vport,
8177 LPFC_DISC_TRC_MBOX_VPORT,
8178 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
8179 (uint32_t)mbx->mbxCommand,
8180 mbx->un.varWords[0], mbx->un.varWords[1]);
8183 lpfc_debugfs_disc_trc(phba->pport,
8185 "MBOX Bsy: cmd:x%x mb:x%x x%x",
8186 (uint32_t)mbx->mbxCommand,
8187 mbx->un.varWords[0], mbx->un.varWords[1]);
8193 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8195 /* If we are not polling, we MUST be in SLI2 mode */
8196 if (flag != MBX_POLL) {
8197 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8198 (mbx->mbxCommand != MBX_KILL_BOARD)) {
8199 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8200 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8201 /* Mbox command <mbxCommand> cannot issue */
8202 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8203 "(%d):2531 Mailbox command x%x "
8204 "cannot issue Data: x%x x%x\n",
8205 pmbox->vport ? pmbox->vport->vpi : 0,
8206 pmbox->u.mb.mbxCommand,
8207 psli->sli_flag, flag);
8208 goto out_not_finished;
8210 /* timeout active mbox command */
8211 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8213 mod_timer(&psli->mbox_tmo, jiffies + timeout);
8216 /* Mailbox cmd <cmd> issue */
8217 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8218 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8220 pmbox->vport ? pmbox->vport->vpi : 0,
8222 phba->pport ? phba->pport->port_state : 0xff,
8223 psli->sli_flag, flag);
8225 if (mbx->mbxCommand != MBX_HEARTBEAT) {
8227 lpfc_debugfs_disc_trc(pmbox->vport,
8228 LPFC_DISC_TRC_MBOX_VPORT,
8229 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8230 (uint32_t)mbx->mbxCommand,
8231 mbx->un.varWords[0], mbx->un.varWords[1]);
8234 lpfc_debugfs_disc_trc(phba->pport,
8236 "MBOX Send: cmd:x%x mb:x%x x%x",
8237 (uint32_t)mbx->mbxCommand,
8238 mbx->un.varWords[0], mbx->un.varWords[1]);
8242 psli->slistat.mbox_cmd++;
8243 evtctr = psli->slistat.mbox_event;
8245 /* next set own bit for the adapter and copy over command word */
8246 mbx->mbxOwner = OWN_CHIP;
8248 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8249 /* Populate mbox extension offset word. */
8250 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8251 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8252 = (uint8_t *)phba->mbox_ext
8253 - (uint8_t *)phba->mbox;
8256 /* Copy the mailbox extension data */
8257 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8258 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8259 (uint8_t *)phba->mbox_ext,
8260 pmbox->in_ext_byte_len);
8262 /* Copy command data to host SLIM area */
8263 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8265 /* Populate mbox extension offset word. */
8266 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8267 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8268 = MAILBOX_HBA_EXT_OFFSET;
8270 /* Copy the mailbox extension data */
8271 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8272 lpfc_memcpy_to_slim(phba->MBslimaddr +
8273 MAILBOX_HBA_EXT_OFFSET,
8274 pmbox->ctx_buf, pmbox->in_ext_byte_len);
8276 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8277 /* copy command data into host mbox for cmpl */
8278 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8281 /* First copy mbox command data to HBA SLIM, skip past first
8283 to_slim = phba->MBslimaddr + sizeof (uint32_t);
8284 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8285 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8287 /* Next copy over first word, with mbxOwner set */
8288 ldata = *((uint32_t *)mbx);
8289 to_slim = phba->MBslimaddr;
8290 writel(ldata, to_slim);
8291 readl(to_slim); /* flush */
8293 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8294 /* switch over to host mailbox */
8295 psli->sli_flag |= LPFC_SLI_ACTIVE;
8302 /* Set up reference to mailbox command */
8303 psli->mbox_active = pmbox;
8304 /* Interrupt board to do it */
8305 writel(CA_MBATT, phba->CAregaddr);
8306 readl(phba->CAregaddr); /* flush */
8307 /* Don't wait for it to finish, just return */
8311 /* Set up null reference to mailbox command */
8312 psli->mbox_active = NULL;
8313 /* Interrupt board to do it */
8314 writel(CA_MBATT, phba->CAregaddr);
8315 readl(phba->CAregaddr); /* flush */
8317 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8318 /* First read mbox status word */
8319 word0 = *((uint32_t *)phba->mbox);
8320 word0 = le32_to_cpu(word0);
8322 /* First read mbox status word */
8323 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8324 spin_unlock_irqrestore(&phba->hbalock,
8326 goto out_not_finished;
8330 /* Read the HBA Host Attention Register */
8331 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8332 spin_unlock_irqrestore(&phba->hbalock,
8334 goto out_not_finished;
8336 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8339 /* Wait for command to complete */
8340 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8341 (!(ha_copy & HA_MBATT) &&
8342 (phba->link_state > LPFC_WARM_START))) {
8343 if (time_after(jiffies, timeout)) {
8344 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8345 spin_unlock_irqrestore(&phba->hbalock,
8347 goto out_not_finished;
8350 /* Check if we took a mbox interrupt while we were
8352 if (((word0 & OWN_CHIP) != OWN_CHIP)
8353 && (evtctr != psli->slistat.mbox_event))
8357 spin_unlock_irqrestore(&phba->hbalock,
8360 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8363 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8364 /* First copy command data */
8365 word0 = *((uint32_t *)phba->mbox);
8366 word0 = le32_to_cpu(word0);
8367 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8370 /* Check real SLIM for any errors */
8371 slimword0 = readl(phba->MBslimaddr);
8372 slimmb = (MAILBOX_t *) & slimword0;
8373 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8374 && slimmb->mbxStatus) {
8381 /* First copy command data */
8382 word0 = readl(phba->MBslimaddr);
8384 /* Read the HBA Host Attention Register */
8385 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8386 spin_unlock_irqrestore(&phba->hbalock,
8388 goto out_not_finished;
8392 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8393 /* copy results back to user */
8394 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8396 /* Copy the mailbox extension data */
8397 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8398 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8400 pmbox->out_ext_byte_len);
8403 /* First copy command data */
8404 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8406 /* Copy the mailbox extension data */
8407 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8408 lpfc_memcpy_from_slim(
8411 MAILBOX_HBA_EXT_OFFSET,
8412 pmbox->out_ext_byte_len);
8416 writel(HA_MBATT, phba->HAregaddr);
8417 readl(phba->HAregaddr); /* flush */
8419 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8420 status = mbx->mbxStatus;
8423 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8427 if (processing_queue) {
8428 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8429 lpfc_mbox_cmpl_put(phba, pmbox);
8431 return MBX_NOT_FINISHED;
8435 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8436 * @phba: Pointer to HBA context object.
8438 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8439 * the driver internal pending mailbox queue. It will then try to wait out the
8440 * possible outstanding mailbox command before return.
8443 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8444 * the outstanding mailbox command timed out.
8447 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8449 struct lpfc_sli *psli = &phba->sli;
8451 unsigned long timeout = 0;
8453 /* Mark the asynchronous mailbox command posting as blocked */
8454 spin_lock_irq(&phba->hbalock);
8455 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8456 /* Determine how long we might wait for the active mailbox
8457 * command to be gracefully completed by firmware.
8459 if (phba->sli.mbox_active)
8460 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8461 phba->sli.mbox_active) *
8463 spin_unlock_irq(&phba->hbalock);
8465 /* Make sure the mailbox is really active */
8467 lpfc_sli4_process_missed_mbox_completions(phba);
8469 /* Wait for the outstnading mailbox command to complete */
8470 while (phba->sli.mbox_active) {
8471 /* Check active mailbox complete status every 2ms */
8473 if (time_after(jiffies, timeout)) {
8474 /* Timeout, marked the outstanding cmd not complete */
8480 /* Can not cleanly block async mailbox command, fails it */
8482 spin_lock_irq(&phba->hbalock);
8483 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8484 spin_unlock_irq(&phba->hbalock);
8490 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8491 * @phba: Pointer to HBA context object.
8493 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8494 * commands from the driver internal pending mailbox queue. It makes sure
8495 * that there is no outstanding mailbox command before resuming posting
8496 * asynchronous mailbox commands. If, for any reason, there is outstanding
8497 * mailbox command, it will try to wait it out before resuming asynchronous
8498 * mailbox command posting.
8501 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8503 struct lpfc_sli *psli = &phba->sli;
8505 spin_lock_irq(&phba->hbalock);
8506 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8507 /* Asynchronous mailbox posting is not blocked, do nothing */
8508 spin_unlock_irq(&phba->hbalock);
8512 /* Outstanding synchronous mailbox command is guaranteed to be done,
8513 * successful or timeout, after timing-out the outstanding mailbox
8514 * command shall always be removed, so just unblock posting async
8515 * mailbox command and resume
8517 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8518 spin_unlock_irq(&phba->hbalock);
8520 /* wake up worker thread to post asynchronlous mailbox command */
8521 lpfc_worker_wake_up(phba);
8525 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8526 * @phba: Pointer to HBA context object.
8527 * @mboxq: Pointer to mailbox object.
8529 * The function waits for the bootstrap mailbox register ready bit from
8530 * port for twice the regular mailbox command timeout value.
8532 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8533 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8536 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8539 unsigned long timeout;
8540 struct lpfc_register bmbx_reg;
8542 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8546 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8547 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8551 if (time_after(jiffies, timeout))
8552 return MBXERR_ERROR;
8553 } while (!db_ready);
8559 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8560 * @phba: Pointer to HBA context object.
8561 * @mboxq: Pointer to mailbox object.
8563 * The function posts a mailbox to the port. The mailbox is expected
8564 * to be comletely filled in and ready for the port to operate on it.
8565 * This routine executes a synchronous completion operation on the
8566 * mailbox by polling for its completion.
8568 * The caller must not be holding any locks when calling this routine.
8571 * MBX_SUCCESS - mailbox posted successfully
8572 * Any of the MBX error values.
8575 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8577 int rc = MBX_SUCCESS;
8578 unsigned long iflag;
8579 uint32_t mcqe_status;
8581 struct lpfc_sli *psli = &phba->sli;
8582 struct lpfc_mqe *mb = &mboxq->u.mqe;
8583 struct lpfc_bmbx_create *mbox_rgn;
8584 struct dma_address *dma_address;
8587 * Only one mailbox can be active to the bootstrap mailbox region
8588 * at a time and there is no queueing provided.
8590 spin_lock_irqsave(&phba->hbalock, iflag);
8591 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8592 spin_unlock_irqrestore(&phba->hbalock, iflag);
8593 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8594 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8595 "cannot issue Data: x%x x%x\n",
8596 mboxq->vport ? mboxq->vport->vpi : 0,
8597 mboxq->u.mb.mbxCommand,
8598 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8599 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8600 psli->sli_flag, MBX_POLL);
8601 return MBXERR_ERROR;
8603 /* The server grabs the token and owns it until release */
8604 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8605 phba->sli.mbox_active = mboxq;
8606 spin_unlock_irqrestore(&phba->hbalock, iflag);
8608 /* wait for bootstrap mbox register for readyness */
8609 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8613 * Initialize the bootstrap memory region to avoid stale data areas
8614 * in the mailbox post. Then copy the caller's mailbox contents to
8615 * the bmbx mailbox region.
8617 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8618 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8619 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8620 sizeof(struct lpfc_mqe));
8622 /* Post the high mailbox dma address to the port and wait for ready. */
8623 dma_address = &phba->sli4_hba.bmbx.dma_address;
8624 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8626 /* wait for bootstrap mbox register for hi-address write done */
8627 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8631 /* Post the low mailbox dma address to the port. */
8632 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8634 /* wait for bootstrap mbox register for low address write done */
8635 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8640 * Read the CQ to ensure the mailbox has completed.
8641 * If so, update the mailbox status so that the upper layers
8642 * can complete the request normally.
8644 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8645 sizeof(struct lpfc_mqe));
8646 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8647 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8648 sizeof(struct lpfc_mcqe));
8649 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8651 * When the CQE status indicates a failure and the mailbox status
8652 * indicates success then copy the CQE status into the mailbox status
8653 * (and prefix it with x4000).
8655 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8656 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8657 bf_set(lpfc_mqe_status, mb,
8658 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8661 lpfc_sli4_swap_str(phba, mboxq);
8663 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8664 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8665 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8666 " x%x x%x CQ: x%x x%x x%x x%x\n",
8667 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8668 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8669 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8670 bf_get(lpfc_mqe_status, mb),
8671 mb->un.mb_words[0], mb->un.mb_words[1],
8672 mb->un.mb_words[2], mb->un.mb_words[3],
8673 mb->un.mb_words[4], mb->un.mb_words[5],
8674 mb->un.mb_words[6], mb->un.mb_words[7],
8675 mb->un.mb_words[8], mb->un.mb_words[9],
8676 mb->un.mb_words[10], mb->un.mb_words[11],
8677 mb->un.mb_words[12], mboxq->mcqe.word0,
8678 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8679 mboxq->mcqe.trailer);
8681 /* We are holding the token, no needed for lock when release */
8682 spin_lock_irqsave(&phba->hbalock, iflag);
8683 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8684 phba->sli.mbox_active = NULL;
8685 spin_unlock_irqrestore(&phba->hbalock, iflag);
8690 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8691 * @phba: Pointer to HBA context object.
8692 * @pmbox: Pointer to mailbox object.
8693 * @flag: Flag indicating how the mailbox need to be processed.
8695 * This function is called by discovery code and HBA management code to submit
8696 * a mailbox command to firmware with SLI-4 interface spec.
8698 * Return codes the caller owns the mailbox command after the return of the
8702 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8705 struct lpfc_sli *psli = &phba->sli;
8706 unsigned long iflags;
8709 /* dump from issue mailbox command if setup */
8710 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8712 rc = lpfc_mbox_dev_check(phba);
8714 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8715 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8716 "cannot issue Data: x%x x%x\n",
8717 mboxq->vport ? mboxq->vport->vpi : 0,
8718 mboxq->u.mb.mbxCommand,
8719 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8720 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8721 psli->sli_flag, flag);
8722 goto out_not_finished;
8725 /* Detect polling mode and jump to a handler */
8726 if (!phba->sli4_hba.intr_enable) {
8727 if (flag == MBX_POLL)
8728 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8731 if (rc != MBX_SUCCESS)
8732 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8733 "(%d):2541 Mailbox command x%x "
8734 "(x%x/x%x) failure: "
8735 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8737 mboxq->vport ? mboxq->vport->vpi : 0,
8738 mboxq->u.mb.mbxCommand,
8739 lpfc_sli_config_mbox_subsys_get(phba,
8741 lpfc_sli_config_mbox_opcode_get(phba,
8743 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8744 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8745 bf_get(lpfc_mcqe_ext_status,
8747 psli->sli_flag, flag);
8749 } else if (flag == MBX_POLL) {
8750 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8751 "(%d):2542 Try to issue mailbox command "
8752 "x%x (x%x/x%x) synchronously ahead of async "
8753 "mailbox command queue: x%x x%x\n",
8754 mboxq->vport ? mboxq->vport->vpi : 0,
8755 mboxq->u.mb.mbxCommand,
8756 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8757 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8758 psli->sli_flag, flag);
8759 /* Try to block the asynchronous mailbox posting */
8760 rc = lpfc_sli4_async_mbox_block(phba);
8762 /* Successfully blocked, now issue sync mbox cmd */
8763 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8764 if (rc != MBX_SUCCESS)
8765 lpfc_printf_log(phba, KERN_WARNING,
8767 "(%d):2597 Sync Mailbox command "
8768 "x%x (x%x/x%x) failure: "
8769 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8771 mboxq->vport ? mboxq->vport->vpi : 0,
8772 mboxq->u.mb.mbxCommand,
8773 lpfc_sli_config_mbox_subsys_get(phba,
8775 lpfc_sli_config_mbox_opcode_get(phba,
8777 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8778 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8779 bf_get(lpfc_mcqe_ext_status,
8781 psli->sli_flag, flag);
8782 /* Unblock the async mailbox posting afterward */
8783 lpfc_sli4_async_mbox_unblock(phba);
8788 /* Now, interrupt mode asynchrous mailbox command */
8789 rc = lpfc_mbox_cmd_check(phba, mboxq);
8791 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8792 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8793 "cannot issue Data: x%x x%x\n",
8794 mboxq->vport ? mboxq->vport->vpi : 0,
8795 mboxq->u.mb.mbxCommand,
8796 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8797 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8798 psli->sli_flag, flag);
8799 goto out_not_finished;
8802 /* Put the mailbox command to the driver internal FIFO */
8803 psli->slistat.mbox_busy++;
8804 spin_lock_irqsave(&phba->hbalock, iflags);
8805 lpfc_mbox_put(phba, mboxq);
8806 spin_unlock_irqrestore(&phba->hbalock, iflags);
8807 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8808 "(%d):0354 Mbox cmd issue - Enqueue Data: "
8809 "x%x (x%x/x%x) x%x x%x x%x\n",
8810 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8811 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8812 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8813 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8814 phba->pport->port_state,
8815 psli->sli_flag, MBX_NOWAIT);
8816 /* Wake up worker thread to transport mailbox command from head */
8817 lpfc_worker_wake_up(phba);
8822 return MBX_NOT_FINISHED;
8826 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8827 * @phba: Pointer to HBA context object.
8829 * This function is called by worker thread to send a mailbox command to
8830 * SLI4 HBA firmware.
8834 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8836 struct lpfc_sli *psli = &phba->sli;
8837 LPFC_MBOXQ_t *mboxq;
8838 int rc = MBX_SUCCESS;
8839 unsigned long iflags;
8840 struct lpfc_mqe *mqe;
8843 /* Check interrupt mode before post async mailbox command */
8844 if (unlikely(!phba->sli4_hba.intr_enable))
8845 return MBX_NOT_FINISHED;
8847 /* Check for mailbox command service token */
8848 spin_lock_irqsave(&phba->hbalock, iflags);
8849 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8850 spin_unlock_irqrestore(&phba->hbalock, iflags);
8851 return MBX_NOT_FINISHED;
8853 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8854 spin_unlock_irqrestore(&phba->hbalock, iflags);
8855 return MBX_NOT_FINISHED;
8857 if (unlikely(phba->sli.mbox_active)) {
8858 spin_unlock_irqrestore(&phba->hbalock, iflags);
8859 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8860 "0384 There is pending active mailbox cmd\n");
8861 return MBX_NOT_FINISHED;
8863 /* Take the mailbox command service token */
8864 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8866 /* Get the next mailbox command from head of queue */
8867 mboxq = lpfc_mbox_get(phba);
8869 /* If no more mailbox command waiting for post, we're done */
8871 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8872 spin_unlock_irqrestore(&phba->hbalock, iflags);
8875 phba->sli.mbox_active = mboxq;
8876 spin_unlock_irqrestore(&phba->hbalock, iflags);
8878 /* Check device readiness for posting mailbox command */
8879 rc = lpfc_mbox_dev_check(phba);
8881 /* Driver clean routine will clean up pending mailbox */
8882 goto out_not_finished;
8884 /* Prepare the mbox command to be posted */
8885 mqe = &mboxq->u.mqe;
8886 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8888 /* Start timer for the mbox_tmo and log some mailbox post messages */
8889 mod_timer(&psli->mbox_tmo, (jiffies +
8890 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
8892 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8893 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
8895 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8896 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8897 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8898 phba->pport->port_state, psli->sli_flag);
8900 if (mbx_cmnd != MBX_HEARTBEAT) {
8902 lpfc_debugfs_disc_trc(mboxq->vport,
8903 LPFC_DISC_TRC_MBOX_VPORT,
8904 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8905 mbx_cmnd, mqe->un.mb_words[0],
8906 mqe->un.mb_words[1]);
8908 lpfc_debugfs_disc_trc(phba->pport,
8910 "MBOX Send: cmd:x%x mb:x%x x%x",
8911 mbx_cmnd, mqe->un.mb_words[0],
8912 mqe->un.mb_words[1]);
8915 psli->slistat.mbox_cmd++;
8917 /* Post the mailbox command to the port */
8918 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8919 if (rc != MBX_SUCCESS) {
8920 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8921 "(%d):2533 Mailbox command x%x (x%x/x%x) "
8922 "cannot issue Data: x%x x%x\n",
8923 mboxq->vport ? mboxq->vport->vpi : 0,
8924 mboxq->u.mb.mbxCommand,
8925 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8926 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8927 psli->sli_flag, MBX_NOWAIT);
8928 goto out_not_finished;
8934 spin_lock_irqsave(&phba->hbalock, iflags);
8935 if (phba->sli.mbox_active) {
8936 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8937 __lpfc_mbox_cmpl_put(phba, mboxq);
8938 /* Release the token */
8939 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8940 phba->sli.mbox_active = NULL;
8942 spin_unlock_irqrestore(&phba->hbalock, iflags);
8944 return MBX_NOT_FINISHED;
8948 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8949 * @phba: Pointer to HBA context object.
8950 * @pmbox: Pointer to mailbox object.
8951 * @flag: Flag indicating how the mailbox need to be processed.
8953 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8954 * the API jump table function pointer from the lpfc_hba struct.
8956 * Return codes the caller owns the mailbox command after the return of the
8960 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8962 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8966 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
8967 * @phba: The hba struct for which this call is being executed.
8968 * @dev_grp: The HBA PCI-Device group number.
8970 * This routine sets up the mbox interface API function jump table in @phba
8972 * Returns: 0 - success, -ENODEV - failure.
8975 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8979 case LPFC_PCI_DEV_LP:
8980 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8981 phba->lpfc_sli_handle_slow_ring_event =
8982 lpfc_sli_handle_slow_ring_event_s3;
8983 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8984 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8985 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8987 case LPFC_PCI_DEV_OC:
8988 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8989 phba->lpfc_sli_handle_slow_ring_event =
8990 lpfc_sli_handle_slow_ring_event_s4;
8991 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8992 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8993 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8997 "1420 Invalid HBA PCI-device group: 0x%x\n",
9006 * __lpfc_sli_ringtx_put - Add an iocb to the txq
9007 * @phba: Pointer to HBA context object.
9008 * @pring: Pointer to driver SLI ring object.
9009 * @piocb: Pointer to address of newly added command iocb.
9011 * This function is called with hbalock held to add a command
9012 * iocb to the txq when SLI layer cannot submit the command iocb
9016 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9017 struct lpfc_iocbq *piocb)
9019 lockdep_assert_held(&phba->hbalock);
9020 /* Insert the caller's iocb in the txq tail for later processing. */
9021 list_add_tail(&piocb->list, &pring->txq);
9025 * lpfc_sli_next_iocb - Get the next iocb in the txq
9026 * @phba: Pointer to HBA context object.
9027 * @pring: Pointer to driver SLI ring object.
9028 * @piocb: Pointer to address of newly added command iocb.
9030 * This function is called with hbalock held before a new
9031 * iocb is submitted to the firmware. This function checks
9032 * txq to flush the iocbs in txq to Firmware before
9033 * submitting new iocbs to the Firmware.
9034 * If there are iocbs in the txq which need to be submitted
9035 * to firmware, lpfc_sli_next_iocb returns the first element
9036 * of the txq after dequeuing it from txq.
9037 * If there is no iocb in the txq then the function will return
9038 * *piocb and *piocb is set to NULL. Caller needs to check
9039 * *piocb to find if there are more commands in the txq.
9041 static struct lpfc_iocbq *
9042 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9043 struct lpfc_iocbq **piocb)
9045 struct lpfc_iocbq * nextiocb;
9047 lockdep_assert_held(&phba->hbalock);
9049 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9059 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
9060 * @phba: Pointer to HBA context object.
9061 * @ring_number: SLI ring number to issue iocb on.
9062 * @piocb: Pointer to command iocb.
9063 * @flag: Flag indicating if this command can be put into txq.
9065 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9066 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9067 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9068 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9069 * this function allows only iocbs for posting buffers. This function finds
9070 * next available slot in the command ring and posts the command to the
9071 * available slot and writes the port attention register to request HBA start
9072 * processing new iocb. If there is no slot available in the ring and
9073 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9074 * the function returns IOCB_BUSY.
9076 * This function is called with hbalock held. The function will return success
9077 * after it successfully submit the iocb to firmware or after adding to the
9081 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9082 struct lpfc_iocbq *piocb, uint32_t flag)
9084 struct lpfc_iocbq *nextiocb;
9086 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9088 lockdep_assert_held(&phba->hbalock);
9090 if (piocb->iocb_cmpl && (!piocb->vport) &&
9091 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9092 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9093 lpfc_printf_log(phba, KERN_ERR,
9094 LOG_SLI | LOG_VPORT,
9095 "1807 IOCB x%x failed. No vport\n",
9096 piocb->iocb.ulpCommand);
9102 /* If the PCI channel is in offline state, do not post iocbs. */
9103 if (unlikely(pci_channel_offline(phba->pcidev)))
9106 /* If HBA has a deferred error attention, fail the iocb. */
9107 if (unlikely(phba->hba_flag & DEFER_ERATT))
9111 * We should never get an IOCB if we are in a < LINK_DOWN state
9113 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9117 * Check to see if we are blocking IOCB processing because of a
9118 * outstanding event.
9120 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9123 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9125 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
9126 * can be issued if the link is not up.
9128 switch (piocb->iocb.ulpCommand) {
9129 case CMD_GEN_REQUEST64_CR:
9130 case CMD_GEN_REQUEST64_CX:
9131 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9132 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9133 FC_RCTL_DD_UNSOL_CMD) ||
9134 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9135 MENLO_TRANSPORT_TYPE))
9139 case CMD_QUE_RING_BUF_CN:
9140 case CMD_QUE_RING_BUF64_CN:
9142 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9143 * completion, iocb_cmpl MUST be 0.
9145 if (piocb->iocb_cmpl)
9146 piocb->iocb_cmpl = NULL;
9148 case CMD_CREATE_XRI_CR:
9149 case CMD_CLOSE_XRI_CN:
9150 case CMD_CLOSE_XRI_CX:
9157 * For FCP commands, we must be in a state where we can process link
9160 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9161 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9165 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9166 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9167 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9170 lpfc_sli_update_ring(phba, pring);
9172 lpfc_sli_update_full_ring(phba, pring);
9175 return IOCB_SUCCESS;
9180 pring->stats.iocb_cmd_delay++;
9184 if (!(flag & SLI_IOCB_RET_IOCB)) {
9185 __lpfc_sli_ringtx_put(phba, pring, piocb);
9186 return IOCB_SUCCESS;
9193 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9194 * @phba: Pointer to HBA context object.
9195 * @piocb: Pointer to command iocb.
9196 * @sglq: Pointer to the scatter gather queue object.
9198 * This routine converts the bpl or bde that is in the IOCB
9199 * to a sgl list for the sli4 hardware. The physical address
9200 * of the bpl/bde is converted back to a virtual address.
9201 * If the IOCB contains a BPL then the list of BDE's is
9202 * converted to sli4_sge's. If the IOCB contains a single
9203 * BDE then it is converted to a single sli_sge.
9204 * The IOCB is still in cpu endianess so the contents of
9205 * the bpl can be used without byte swapping.
9207 * Returns valid XRI = Success, NO_XRI = Failure.
9210 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9211 struct lpfc_sglq *sglq)
9213 uint16_t xritag = NO_XRI;
9214 struct ulp_bde64 *bpl = NULL;
9215 struct ulp_bde64 bde;
9216 struct sli4_sge *sgl = NULL;
9217 struct lpfc_dmabuf *dmabuf;
9221 uint32_t offset = 0; /* accumulated offset in the sg request list */
9222 int inbound = 0; /* number of sg reply entries inbound from firmware */
9224 if (!piocbq || !sglq)
9227 sgl = (struct sli4_sge *)sglq->sgl;
9228 icmd = &piocbq->iocb;
9229 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9230 return sglq->sli4_xritag;
9231 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9232 numBdes = icmd->un.genreq64.bdl.bdeSize /
9233 sizeof(struct ulp_bde64);
9234 /* The addrHigh and addrLow fields within the IOCB
9235 * have not been byteswapped yet so there is no
9236 * need to swap them back.
9238 if (piocbq->context3)
9239 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9243 bpl = (struct ulp_bde64 *)dmabuf->virt;
9247 for (i = 0; i < numBdes; i++) {
9248 /* Should already be byte swapped. */
9249 sgl->addr_hi = bpl->addrHigh;
9250 sgl->addr_lo = bpl->addrLow;
9252 sgl->word2 = le32_to_cpu(sgl->word2);
9253 if ((i+1) == numBdes)
9254 bf_set(lpfc_sli4_sge_last, sgl, 1);
9256 bf_set(lpfc_sli4_sge_last, sgl, 0);
9257 /* swap the size field back to the cpu so we
9258 * can assign it to the sgl.
9260 bde.tus.w = le32_to_cpu(bpl->tus.w);
9261 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9262 /* The offsets in the sgl need to be accumulated
9263 * separately for the request and reply lists.
9264 * The request is always first, the reply follows.
9266 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9267 /* add up the reply sg entries */
9268 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9270 /* first inbound? reset the offset */
9273 bf_set(lpfc_sli4_sge_offset, sgl, offset);
9274 bf_set(lpfc_sli4_sge_type, sgl,
9275 LPFC_SGE_TYPE_DATA);
9276 offset += bde.tus.f.bdeSize;
9278 sgl->word2 = cpu_to_le32(sgl->word2);
9282 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9283 /* The addrHigh and addrLow fields of the BDE have not
9284 * been byteswapped yet so they need to be swapped
9285 * before putting them in the sgl.
9288 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9290 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9291 sgl->word2 = le32_to_cpu(sgl->word2);
9292 bf_set(lpfc_sli4_sge_last, sgl, 1);
9293 sgl->word2 = cpu_to_le32(sgl->word2);
9295 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9297 return sglq->sli4_xritag;
9301 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
9302 * @phba: Pointer to HBA context object.
9303 * @piocb: Pointer to command iocb.
9304 * @wqe: Pointer to the work queue entry.
9306 * This routine converts the iocb command to its Work Queue Entry
9307 * equivalent. The wqe pointer should not have any fields set when
9308 * this routine is called because it will memcpy over them.
9309 * This routine does not set the CQ_ID or the WQEC bits in the
9312 * Returns: 0 = Success, IOCB_ERROR = Failure.
9315 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9316 union lpfc_wqe128 *wqe)
9318 uint32_t xmit_len = 0, total_len = 0;
9322 uint8_t command_type = ELS_COMMAND_NON_FIP;
9325 uint16_t abrt_iotag;
9326 struct lpfc_iocbq *abrtiocbq;
9327 struct ulp_bde64 *bpl = NULL;
9328 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9330 struct ulp_bde64 bde;
9331 struct lpfc_nodelist *ndlp;
9335 fip = phba->hba_flag & HBA_FIP_SUPPORT;
9336 /* The fcp commands will set command type */
9337 if (iocbq->iocb_flag & LPFC_IO_FCP)
9338 command_type = FCP_COMMAND;
9339 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9340 command_type = ELS_COMMAND_FIP;
9342 command_type = ELS_COMMAND_NON_FIP;
9344 if (phba->fcp_embed_io)
9345 memset(wqe, 0, sizeof(union lpfc_wqe128));
9346 /* Some of the fields are in the right position already */
9347 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9348 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
9349 /* The ct field has moved so reset */
9350 wqe->generic.wqe_com.word7 = 0;
9351 wqe->generic.wqe_com.word10 = 0;
9354 abort_tag = (uint32_t) iocbq->iotag;
9355 xritag = iocbq->sli4_xritag;
9356 /* words0-2 bpl convert bde */
9357 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9358 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9359 sizeof(struct ulp_bde64);
9360 bpl = (struct ulp_bde64 *)
9361 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9365 /* Should already be byte swapped. */
9366 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9367 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9368 /* swap the size field back to the cpu so we
9369 * can assign it to the sgl.
9371 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
9372 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9374 for (i = 0; i < numBdes; i++) {
9375 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9376 total_len += bde.tus.f.bdeSize;
9379 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9381 iocbq->iocb.ulpIoTag = iocbq->iotag;
9382 cmnd = iocbq->iocb.ulpCommand;
9384 switch (iocbq->iocb.ulpCommand) {
9385 case CMD_ELS_REQUEST64_CR:
9386 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9387 ndlp = iocbq->context_un.ndlp;
9389 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9390 if (!iocbq->iocb.ulpLe) {
9391 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9392 "2007 Only Limited Edition cmd Format"
9393 " supported 0x%x\n",
9394 iocbq->iocb.ulpCommand);
9398 wqe->els_req.payload_len = xmit_len;
9399 /* Els_reguest64 has a TMO */
9400 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9401 iocbq->iocb.ulpTimeout);
9402 /* Need a VF for word 4 set the vf bit*/
9403 bf_set(els_req64_vf, &wqe->els_req, 0);
9404 /* And a VFID for word 12 */
9405 bf_set(els_req64_vfid, &wqe->els_req, 0);
9406 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9407 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9408 iocbq->iocb.ulpContext);
9409 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9410 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9411 /* CCP CCPE PV PRI in word10 were set in the memcpy */
9412 if (command_type == ELS_COMMAND_FIP)
9413 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9414 >> LPFC_FIP_ELS_ID_SHIFT);
9415 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9416 iocbq->context2)->virt);
9417 if_type = bf_get(lpfc_sli_intf_if_type,
9418 &phba->sli4_hba.sli_intf);
9419 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9420 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9421 *pcmd == ELS_CMD_SCR ||
9422 *pcmd == ELS_CMD_RSCN_XMT ||
9423 *pcmd == ELS_CMD_FDISC ||
9424 *pcmd == ELS_CMD_LOGO ||
9425 *pcmd == ELS_CMD_PLOGI)) {
9426 bf_set(els_req64_sp, &wqe->els_req, 1);
9427 bf_set(els_req64_sid, &wqe->els_req,
9428 iocbq->vport->fc_myDID);
9429 if ((*pcmd == ELS_CMD_FLOGI) &&
9430 !(phba->fc_topology ==
9431 LPFC_TOPOLOGY_LOOP))
9432 bf_set(els_req64_sid, &wqe->els_req, 0);
9433 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9434 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9435 phba->vpi_ids[iocbq->vport->vpi]);
9436 } else if (pcmd && iocbq->context1) {
9437 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9438 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9439 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9442 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9443 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9444 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9445 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9446 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9447 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9448 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9449 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9450 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9452 case CMD_XMIT_SEQUENCE64_CX:
9453 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9454 iocbq->iocb.un.ulpWord[3]);
9455 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9456 iocbq->iocb.unsli3.rcvsli3.ox_id);
9457 /* The entire sequence is transmitted for this IOCB */
9458 xmit_len = total_len;
9459 cmnd = CMD_XMIT_SEQUENCE64_CR;
9460 if (phba->link_flag & LS_LOOPBACK_MODE)
9461 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9463 case CMD_XMIT_SEQUENCE64_CR:
9464 /* word3 iocb=io_tag32 wqe=reserved */
9465 wqe->xmit_sequence.rsvd3 = 0;
9466 /* word4 relative_offset memcpy */
9467 /* word5 r_ctl/df_ctl memcpy */
9468 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9469 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9470 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9471 LPFC_WQE_IOD_WRITE);
9472 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9473 LPFC_WQE_LENLOC_WORD12);
9474 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9475 wqe->xmit_sequence.xmit_len = xmit_len;
9476 command_type = OTHER_COMMAND;
9478 case CMD_XMIT_BCAST64_CN:
9479 /* word3 iocb=iotag32 wqe=seq_payload_len */
9480 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9481 /* word4 iocb=rsvd wqe=rsvd */
9482 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9483 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9484 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9485 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9486 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9487 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9488 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9489 LPFC_WQE_LENLOC_WORD3);
9490 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9492 case CMD_FCP_IWRITE64_CR:
9493 command_type = FCP_COMMAND_DATA_OUT;
9494 /* word3 iocb=iotag wqe=payload_offset_len */
9495 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9496 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9497 xmit_len + sizeof(struct fcp_rsp));
9498 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9500 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9501 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9502 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9503 iocbq->iocb.ulpFCP2Rcvy);
9504 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9505 /* Always open the exchange */
9506 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9507 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9508 LPFC_WQE_LENLOC_WORD4);
9509 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9510 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9511 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9512 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9513 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9514 if (iocbq->priority) {
9515 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9516 (iocbq->priority << 1));
9518 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9519 (phba->cfg_XLanePriority << 1));
9522 /* Note, word 10 is already initialized to 0 */
9524 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9525 if (phba->cfg_enable_pbde)
9526 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9528 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9530 if (phba->fcp_embed_io) {
9531 struct lpfc_io_buf *lpfc_cmd;
9532 struct sli4_sge *sgl;
9533 struct fcp_cmnd *fcp_cmnd;
9536 /* 128 byte wqe support here */
9538 lpfc_cmd = iocbq->context1;
9539 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9540 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9542 /* Word 0-2 - FCP_CMND */
9543 wqe->generic.bde.tus.f.bdeFlags =
9544 BUFF_TYPE_BDE_IMMED;
9545 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9546 wqe->generic.bde.addrHigh = 0;
9547 wqe->generic.bde.addrLow = 88; /* Word 22 */
9549 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9550 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9552 /* Word 22-29 FCP CMND Payload */
9553 ptr = &wqe->words[22];
9554 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9557 case CMD_FCP_IREAD64_CR:
9558 /* word3 iocb=iotag wqe=payload_offset_len */
9559 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9560 bf_set(payload_offset_len, &wqe->fcp_iread,
9561 xmit_len + sizeof(struct fcp_rsp));
9562 bf_set(cmd_buff_len, &wqe->fcp_iread,
9564 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9565 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9566 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9567 iocbq->iocb.ulpFCP2Rcvy);
9568 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9569 /* Always open the exchange */
9570 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9571 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9572 LPFC_WQE_LENLOC_WORD4);
9573 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9574 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9575 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9576 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9577 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9578 if (iocbq->priority) {
9579 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9580 (iocbq->priority << 1));
9582 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9583 (phba->cfg_XLanePriority << 1));
9586 /* Note, word 10 is already initialized to 0 */
9588 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9589 if (phba->cfg_enable_pbde)
9590 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9592 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9594 if (phba->fcp_embed_io) {
9595 struct lpfc_io_buf *lpfc_cmd;
9596 struct sli4_sge *sgl;
9597 struct fcp_cmnd *fcp_cmnd;
9600 /* 128 byte wqe support here */
9602 lpfc_cmd = iocbq->context1;
9603 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9604 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9606 /* Word 0-2 - FCP_CMND */
9607 wqe->generic.bde.tus.f.bdeFlags =
9608 BUFF_TYPE_BDE_IMMED;
9609 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9610 wqe->generic.bde.addrHigh = 0;
9611 wqe->generic.bde.addrLow = 88; /* Word 22 */
9613 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9614 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9616 /* Word 22-29 FCP CMND Payload */
9617 ptr = &wqe->words[22];
9618 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9621 case CMD_FCP_ICMND64_CR:
9622 /* word3 iocb=iotag wqe=payload_offset_len */
9623 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9624 bf_set(payload_offset_len, &wqe->fcp_icmd,
9625 xmit_len + sizeof(struct fcp_rsp));
9626 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9628 /* word3 iocb=IO_TAG wqe=reserved */
9629 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9630 /* Always open the exchange */
9631 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9632 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9633 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9634 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9635 LPFC_WQE_LENLOC_NONE);
9636 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9637 iocbq->iocb.ulpFCP2Rcvy);
9638 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9639 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9640 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9641 if (iocbq->priority) {
9642 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9643 (iocbq->priority << 1));
9645 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9646 (phba->cfg_XLanePriority << 1));
9649 /* Note, word 10 is already initialized to 0 */
9651 if (phba->fcp_embed_io) {
9652 struct lpfc_io_buf *lpfc_cmd;
9653 struct sli4_sge *sgl;
9654 struct fcp_cmnd *fcp_cmnd;
9657 /* 128 byte wqe support here */
9659 lpfc_cmd = iocbq->context1;
9660 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9661 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9663 /* Word 0-2 - FCP_CMND */
9664 wqe->generic.bde.tus.f.bdeFlags =
9665 BUFF_TYPE_BDE_IMMED;
9666 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9667 wqe->generic.bde.addrHigh = 0;
9668 wqe->generic.bde.addrLow = 88; /* Word 22 */
9670 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9671 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
9673 /* Word 22-29 FCP CMND Payload */
9674 ptr = &wqe->words[22];
9675 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9678 case CMD_GEN_REQUEST64_CR:
9679 /* For this command calculate the xmit length of the
9683 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9684 sizeof(struct ulp_bde64);
9685 for (i = 0; i < numBdes; i++) {
9686 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9687 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9689 xmit_len += bde.tus.f.bdeSize;
9691 /* word3 iocb=IO_TAG wqe=request_payload_len */
9692 wqe->gen_req.request_payload_len = xmit_len;
9693 /* word4 iocb=parameter wqe=relative_offset memcpy */
9694 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
9695 /* word6 context tag copied in memcpy */
9696 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9697 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9698 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9699 "2015 Invalid CT %x command 0x%x\n",
9700 ct, iocbq->iocb.ulpCommand);
9703 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9704 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9705 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9706 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9707 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9708 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9709 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9710 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9711 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9712 command_type = OTHER_COMMAND;
9714 case CMD_XMIT_ELS_RSP64_CX:
9715 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9716 /* words0-2 BDE memcpy */
9717 /* word3 iocb=iotag32 wqe=response_payload_len */
9718 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9720 wqe->xmit_els_rsp.word4 = 0;
9721 /* word5 iocb=rsvd wge=did */
9722 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9723 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9725 if_type = bf_get(lpfc_sli_intf_if_type,
9726 &phba->sli4_hba.sli_intf);
9727 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9728 if (iocbq->vport->fc_flag & FC_PT2PT) {
9729 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9730 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9731 iocbq->vport->fc_myDID);
9732 if (iocbq->vport->fc_myDID == Fabric_DID) {
9734 &wqe->xmit_els_rsp.wqe_dest, 0);
9738 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9739 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9740 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9741 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9742 iocbq->iocb.unsli3.rcvsli3.ox_id);
9743 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9744 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9745 phba->vpi_ids[iocbq->vport->vpi]);
9746 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9747 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9748 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9749 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9750 LPFC_WQE_LENLOC_WORD3);
9751 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9752 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9753 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9754 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9755 iocbq->context2)->virt);
9756 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9757 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9758 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9759 iocbq->vport->fc_myDID);
9760 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9761 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9762 phba->vpi_ids[phba->pport->vpi]);
9764 command_type = OTHER_COMMAND;
9766 case CMD_CLOSE_XRI_CN:
9767 case CMD_ABORT_XRI_CN:
9768 case CMD_ABORT_XRI_CX:
9769 /* words 0-2 memcpy should be 0 rserved */
9770 /* port will send abts */
9771 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9772 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9773 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9774 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9778 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9780 * The link is down, or the command was ELS_FIP
9781 * so the fw does not need to send abts
9784 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9786 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9787 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9788 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9789 wqe->abort_cmd.rsrvd5 = 0;
9790 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9791 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9792 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9794 * The abort handler will send us CMD_ABORT_XRI_CN or
9795 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9797 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9798 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9799 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9800 LPFC_WQE_LENLOC_NONE);
9801 cmnd = CMD_ABORT_XRI_CX;
9802 command_type = OTHER_COMMAND;
9805 case CMD_XMIT_BLS_RSP64_CX:
9806 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9807 /* As BLS ABTS RSP WQE is very different from other WQEs,
9808 * we re-construct this WQE here based on information in
9809 * iocbq from scratch.
9811 memset(wqe, 0, sizeof(union lpfc_wqe));
9812 /* OX_ID is invariable to who sent ABTS to CT exchange */
9813 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
9814 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9815 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
9816 LPFC_ABTS_UNSOL_INT) {
9817 /* ABTS sent by initiator to CT exchange, the
9818 * RX_ID field will be filled with the newly
9819 * allocated responder XRI.
9821 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9822 iocbq->sli4_xritag);
9824 /* ABTS sent by responder to CT exchange, the
9825 * RX_ID field will be filled with the responder
9828 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9829 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
9831 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9832 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
9835 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9837 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9838 iocbq->iocb.ulpContext);
9839 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
9840 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
9841 phba->vpi_ids[phba->pport->vpi]);
9842 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9843 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9844 LPFC_WQE_LENLOC_NONE);
9845 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9846 command_type = OTHER_COMMAND;
9847 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9848 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9849 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9850 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9851 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9852 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9853 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9857 case CMD_SEND_FRAME:
9858 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9859 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9861 case CMD_XRI_ABORTED_CX:
9862 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
9863 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9864 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9865 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9866 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9868 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9869 "2014 Invalid command 0x%x\n",
9870 iocbq->iocb.ulpCommand);
9875 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9876 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9877 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9878 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9879 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9880 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9881 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9882 LPFC_IO_DIF_INSERT);
9883 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9884 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9885 wqe->generic.wqe_com.abort_tag = abort_tag;
9886 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9887 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9888 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9889 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9894 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9895 * @phba: Pointer to HBA context object.
9896 * @ring_number: SLI ring number to issue iocb on.
9897 * @piocb: Pointer to command iocb.
9898 * @flag: Flag indicating if this command can be put into txq.
9900 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9901 * an iocb command to an HBA with SLI-4 interface spec.
9903 * This function is called with hbalock held. The function will return success
9904 * after it successfully submit the iocb to firmware or after adding to the
9908 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9909 struct lpfc_iocbq *piocb, uint32_t flag)
9911 struct lpfc_sglq *sglq;
9912 union lpfc_wqe128 wqe;
9913 struct lpfc_queue *wq;
9914 struct lpfc_sli_ring *pring;
9917 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9918 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9919 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq;
9921 wq = phba->sli4_hba.els_wq;
9924 /* Get corresponding ring */
9928 * The WQE can be either 64 or 128 bytes,
9931 lockdep_assert_held(&pring->ring_lock);
9933 if (piocb->sli4_xritag == NO_XRI) {
9934 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
9935 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
9938 if (!list_empty(&pring->txq)) {
9939 if (!(flag & SLI_IOCB_RET_IOCB)) {
9940 __lpfc_sli_ringtx_put(phba,
9942 return IOCB_SUCCESS;
9947 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
9949 if (!(flag & SLI_IOCB_RET_IOCB)) {
9950 __lpfc_sli_ringtx_put(phba,
9953 return IOCB_SUCCESS;
9959 } else if (piocb->iocb_flag & LPFC_IO_FCP)
9960 /* These IO's already have an XRI and a mapped sgl. */
9964 * This is a continuation of a commandi,(CX) so this
9965 * sglq is on the active list
9967 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
9973 piocb->sli4_lxritag = sglq->sli4_lxritag;
9974 piocb->sli4_xritag = sglq->sli4_xritag;
9975 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
9979 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
9982 if (lpfc_sli4_wq_put(wq, &wqe))
9984 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9990 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
9992 * This routine wraps the actual lockless version for issusing IOCB function
9993 * pointer from the lpfc_hba struct.
9996 * IOCB_ERROR - Error
9997 * IOCB_SUCCESS - Success
10001 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10002 struct lpfc_iocbq *piocb, uint32_t flag)
10004 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10008 * lpfc_sli_api_table_setup - Set up sli api function jump table
10009 * @phba: The hba struct for which this call is being executed.
10010 * @dev_grp: The HBA PCI-Device group number.
10012 * This routine sets up the SLI interface API function jump table in @phba
10014 * Returns: 0 - success, -ENODEV - failure.
10017 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10021 case LPFC_PCI_DEV_LP:
10022 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10023 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10025 case LPFC_PCI_DEV_OC:
10026 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10027 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10030 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10031 "1419 Invalid HBA PCI-device group: 0x%x\n",
10036 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10041 * lpfc_sli4_calc_ring - Calculates which ring to use
10042 * @phba: Pointer to HBA context object.
10043 * @piocb: Pointer to command iocb.
10045 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10046 * hba_wqidx, thus we need to calculate the corresponding ring.
10047 * Since ABORTS must go on the same WQ of the command they are
10048 * aborting, we use command's hba_wqidx.
10050 struct lpfc_sli_ring *
10051 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10053 struct lpfc_io_buf *lpfc_cmd;
10055 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10056 if (unlikely(!phba->sli4_hba.hdwq))
10059 * for abort iocb hba_wqidx should already
10060 * be setup based on what work queue we used.
10062 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10063 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10064 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10066 return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring;
10068 if (unlikely(!phba->sli4_hba.els_wq))
10070 piocb->hba_wqidx = 0;
10071 return phba->sli4_hba.els_wq->pring;
10076 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10077 * @phba: Pointer to HBA context object.
10078 * @pring: Pointer to driver SLI ring object.
10079 * @piocb: Pointer to command iocb.
10080 * @flag: Flag indicating if this command can be put into txq.
10082 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10083 * function. This function gets the hbalock and calls
10084 * __lpfc_sli_issue_iocb function and will return the error returned
10085 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10086 * functions which do not hold hbalock.
10089 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10090 struct lpfc_iocbq *piocb, uint32_t flag)
10092 struct lpfc_sli_ring *pring;
10093 unsigned long iflags;
10096 if (phba->sli_rev == LPFC_SLI_REV4) {
10097 pring = lpfc_sli4_calc_ring(phba, piocb);
10098 if (unlikely(pring == NULL))
10101 spin_lock_irqsave(&pring->ring_lock, iflags);
10102 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10103 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10105 /* For now, SLI2/3 will still use hbalock */
10106 spin_lock_irqsave(&phba->hbalock, iflags);
10107 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10108 spin_unlock_irqrestore(&phba->hbalock, iflags);
10114 * lpfc_extra_ring_setup - Extra ring setup function
10115 * @phba: Pointer to HBA context object.
10117 * This function is called while driver attaches with the
10118 * HBA to setup the extra ring. The extra ring is used
10119 * only when driver needs to support target mode functionality
10120 * or IP over FC functionalities.
10122 * This function is called with no lock held. SLI3 only.
10125 lpfc_extra_ring_setup( struct lpfc_hba *phba)
10127 struct lpfc_sli *psli;
10128 struct lpfc_sli_ring *pring;
10132 /* Adjust cmd/rsp ring iocb entries more evenly */
10134 /* Take some away from the FCP ring */
10135 pring = &psli->sli3_ring[LPFC_FCP_RING];
10136 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10137 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10138 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10139 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10141 /* and give them to the extra ring */
10142 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10144 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10145 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10146 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10147 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10149 /* Setup default profile for this ring */
10150 pring->iotag_max = 4096;
10151 pring->num_mask = 1;
10152 pring->prt[0].profile = 0; /* Mask 0 */
10153 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10154 pring->prt[0].type = phba->cfg_multi_ring_type;
10155 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10159 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10160 * @phba: Pointer to HBA context object.
10161 * @iocbq: Pointer to iocb object.
10163 * The async_event handler calls this routine when it receives
10164 * an ASYNC_STATUS_CN event from the port. The port generates
10165 * this event when an Abort Sequence request to an rport fails
10166 * twice in succession. The abort could be originated by the
10167 * driver or by the port. The ABTS could have been for an ELS
10168 * or FCP IO. The port only generates this event when an ABTS
10169 * fails to complete after one retry.
10172 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10173 struct lpfc_iocbq *iocbq)
10175 struct lpfc_nodelist *ndlp = NULL;
10176 uint16_t rpi = 0, vpi = 0;
10177 struct lpfc_vport *vport = NULL;
10179 /* The rpi in the ulpContext is vport-sensitive. */
10180 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10181 rpi = iocbq->iocb.ulpContext;
10183 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10184 "3092 Port generated ABTS async event "
10185 "on vpi %d rpi %d status 0x%x\n",
10186 vpi, rpi, iocbq->iocb.ulpStatus);
10188 vport = lpfc_find_vport_by_vpid(phba, vpi);
10191 ndlp = lpfc_findnode_rpi(vport, rpi);
10192 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10195 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10196 lpfc_sli_abts_recover_port(vport, ndlp);
10200 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10201 "3095 Event Context not found, no "
10202 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10203 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10207 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10208 * @phba: pointer to HBA context object.
10209 * @ndlp: nodelist pointer for the impacted rport.
10210 * @axri: pointer to the wcqe containing the failed exchange.
10212 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10213 * port. The port generates this event when an abort exchange request to an
10214 * rport fails twice in succession with no reply. The abort could be originated
10215 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10218 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10219 struct lpfc_nodelist *ndlp,
10220 struct sli4_wcqe_xri_aborted *axri)
10222 struct lpfc_vport *vport;
10223 uint32_t ext_status = 0;
10225 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
10226 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10227 "3115 Node Context not found, driver "
10228 "ignoring abts err event\n");
10232 vport = ndlp->vport;
10233 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10234 "3116 Port generated FCP XRI ABORT event on "
10235 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10236 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10237 bf_get(lpfc_wcqe_xa_xri, axri),
10238 bf_get(lpfc_wcqe_xa_status, axri),
10242 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10243 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10244 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10246 ext_status = axri->parameter & IOERR_PARAM_MASK;
10247 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10248 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10249 lpfc_sli_abts_recover_port(vport, ndlp);
10253 * lpfc_sli_async_event_handler - ASYNC iocb handler function
10254 * @phba: Pointer to HBA context object.
10255 * @pring: Pointer to driver SLI ring object.
10256 * @iocbq: Pointer to iocb object.
10258 * This function is called by the slow ring event handler
10259 * function when there is an ASYNC event iocb in the ring.
10260 * This function is called with no lock held.
10261 * Currently this function handles only temperature related
10262 * ASYNC events. The function decodes the temperature sensor
10263 * event message and posts events for the management applications.
10266 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10267 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10271 struct temp_event temp_event_data;
10272 struct Scsi_Host *shost;
10275 icmd = &iocbq->iocb;
10276 evt_code = icmd->un.asyncstat.evt_code;
10278 switch (evt_code) {
10279 case ASYNC_TEMP_WARN:
10280 case ASYNC_TEMP_SAFE:
10281 temp_event_data.data = (uint32_t) icmd->ulpContext;
10282 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10283 if (evt_code == ASYNC_TEMP_WARN) {
10284 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10285 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10286 "0347 Adapter is very hot, please take "
10287 "corrective action. temperature : %d Celsius\n",
10288 (uint32_t) icmd->ulpContext);
10290 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10291 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10292 "0340 Adapter temperature is OK now. "
10293 "temperature : %d Celsius\n",
10294 (uint32_t) icmd->ulpContext);
10297 /* Send temperature change event to applications */
10298 shost = lpfc_shost_from_vport(phba->pport);
10299 fc_host_post_vendor_event(shost, fc_get_event_number(),
10300 sizeof(temp_event_data), (char *) &temp_event_data,
10301 LPFC_NL_VENDOR_ID);
10303 case ASYNC_STATUS_CN:
10304 lpfc_sli_abts_err_handler(phba, iocbq);
10307 iocb_w = (uint32_t *) icmd;
10308 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10309 "0346 Ring %d handler: unexpected ASYNC_STATUS"
10311 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10312 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10313 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10314 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10315 pring->ringno, icmd->un.asyncstat.evt_code,
10316 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10317 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10318 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10319 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10327 * lpfc_sli4_setup - SLI ring setup function
10328 * @phba: Pointer to HBA context object.
10330 * lpfc_sli_setup sets up rings of the SLI interface with
10331 * number of iocbs per ring and iotags. This function is
10332 * called while driver attach to the HBA and before the
10333 * interrupts are enabled. So there is no need for locking.
10335 * This function always returns 0.
10338 lpfc_sli4_setup(struct lpfc_hba *phba)
10340 struct lpfc_sli_ring *pring;
10342 pring = phba->sli4_hba.els_wq->pring;
10343 pring->num_mask = LPFC_MAX_RING_MASK;
10344 pring->prt[0].profile = 0; /* Mask 0 */
10345 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10346 pring->prt[0].type = FC_TYPE_ELS;
10347 pring->prt[0].lpfc_sli_rcv_unsol_event =
10348 lpfc_els_unsol_event;
10349 pring->prt[1].profile = 0; /* Mask 1 */
10350 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10351 pring->prt[1].type = FC_TYPE_ELS;
10352 pring->prt[1].lpfc_sli_rcv_unsol_event =
10353 lpfc_els_unsol_event;
10354 pring->prt[2].profile = 0; /* Mask 2 */
10355 /* NameServer Inquiry */
10356 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10358 pring->prt[2].type = FC_TYPE_CT;
10359 pring->prt[2].lpfc_sli_rcv_unsol_event =
10360 lpfc_ct_unsol_event;
10361 pring->prt[3].profile = 0; /* Mask 3 */
10362 /* NameServer response */
10363 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10365 pring->prt[3].type = FC_TYPE_CT;
10366 pring->prt[3].lpfc_sli_rcv_unsol_event =
10367 lpfc_ct_unsol_event;
10372 * lpfc_sli_setup - SLI ring setup function
10373 * @phba: Pointer to HBA context object.
10375 * lpfc_sli_setup sets up rings of the SLI interface with
10376 * number of iocbs per ring and iotags. This function is
10377 * called while driver attach to the HBA and before the
10378 * interrupts are enabled. So there is no need for locking.
10380 * This function always returns 0. SLI3 only.
10383 lpfc_sli_setup(struct lpfc_hba *phba)
10385 int i, totiocbsize = 0;
10386 struct lpfc_sli *psli = &phba->sli;
10387 struct lpfc_sli_ring *pring;
10389 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10390 psli->sli_flag = 0;
10392 psli->iocbq_lookup = NULL;
10393 psli->iocbq_lookup_len = 0;
10394 psli->last_iotag = 0;
10396 for (i = 0; i < psli->num_rings; i++) {
10397 pring = &psli->sli3_ring[i];
10399 case LPFC_FCP_RING: /* ring 0 - FCP */
10400 /* numCiocb and numRiocb are used in config_port */
10401 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10402 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10403 pring->sli.sli3.numCiocb +=
10404 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10405 pring->sli.sli3.numRiocb +=
10406 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10407 pring->sli.sli3.numCiocb +=
10408 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10409 pring->sli.sli3.numRiocb +=
10410 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10411 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10412 SLI3_IOCB_CMD_SIZE :
10413 SLI2_IOCB_CMD_SIZE;
10414 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10415 SLI3_IOCB_RSP_SIZE :
10416 SLI2_IOCB_RSP_SIZE;
10417 pring->iotag_ctr = 0;
10419 (phba->cfg_hba_queue_depth * 2);
10420 pring->fast_iotag = pring->iotag_max;
10421 pring->num_mask = 0;
10423 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
10424 /* numCiocb and numRiocb are used in config_port */
10425 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10426 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10427 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10428 SLI3_IOCB_CMD_SIZE :
10429 SLI2_IOCB_CMD_SIZE;
10430 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10431 SLI3_IOCB_RSP_SIZE :
10432 SLI2_IOCB_RSP_SIZE;
10433 pring->iotag_max = phba->cfg_hba_queue_depth;
10434 pring->num_mask = 0;
10436 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10437 /* numCiocb and numRiocb are used in config_port */
10438 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10439 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10440 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10441 SLI3_IOCB_CMD_SIZE :
10442 SLI2_IOCB_CMD_SIZE;
10443 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10444 SLI3_IOCB_RSP_SIZE :
10445 SLI2_IOCB_RSP_SIZE;
10446 pring->fast_iotag = 0;
10447 pring->iotag_ctr = 0;
10448 pring->iotag_max = 4096;
10449 pring->lpfc_sli_rcv_async_status =
10450 lpfc_sli_async_event_handler;
10451 pring->num_mask = LPFC_MAX_RING_MASK;
10452 pring->prt[0].profile = 0; /* Mask 0 */
10453 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10454 pring->prt[0].type = FC_TYPE_ELS;
10455 pring->prt[0].lpfc_sli_rcv_unsol_event =
10456 lpfc_els_unsol_event;
10457 pring->prt[1].profile = 0; /* Mask 1 */
10458 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10459 pring->prt[1].type = FC_TYPE_ELS;
10460 pring->prt[1].lpfc_sli_rcv_unsol_event =
10461 lpfc_els_unsol_event;
10462 pring->prt[2].profile = 0; /* Mask 2 */
10463 /* NameServer Inquiry */
10464 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10466 pring->prt[2].type = FC_TYPE_CT;
10467 pring->prt[2].lpfc_sli_rcv_unsol_event =
10468 lpfc_ct_unsol_event;
10469 pring->prt[3].profile = 0; /* Mask 3 */
10470 /* NameServer response */
10471 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10473 pring->prt[3].type = FC_TYPE_CT;
10474 pring->prt[3].lpfc_sli_rcv_unsol_event =
10475 lpfc_ct_unsol_event;
10478 totiocbsize += (pring->sli.sli3.numCiocb *
10479 pring->sli.sli3.sizeCiocb) +
10480 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10482 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10483 /* Too many cmd / rsp ring entries in SLI2 SLIM */
10484 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10485 "SLI2 SLIM Data: x%x x%lx\n",
10486 phba->brd_no, totiocbsize,
10487 (unsigned long) MAX_SLIM_IOCB_SIZE);
10489 if (phba->cfg_multi_ring_support == 2)
10490 lpfc_extra_ring_setup(phba);
10496 * lpfc_sli4_queue_init - Queue initialization function
10497 * @phba: Pointer to HBA context object.
10499 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
10500 * ring. This function also initializes ring indices of each ring.
10501 * This function is called during the initialization of the SLI
10502 * interface of an HBA.
10503 * This function is called with no lock held and always returns
10507 lpfc_sli4_queue_init(struct lpfc_hba *phba)
10509 struct lpfc_sli *psli;
10510 struct lpfc_sli_ring *pring;
10514 spin_lock_irq(&phba->hbalock);
10515 INIT_LIST_HEAD(&psli->mboxq);
10516 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10517 /* Initialize list headers for txq and txcmplq as double linked lists */
10518 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10519 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
10521 pring->ringno = LPFC_FCP_RING;
10522 pring->txcmplq_cnt = 0;
10523 INIT_LIST_HEAD(&pring->txq);
10524 INIT_LIST_HEAD(&pring->txcmplq);
10525 INIT_LIST_HEAD(&pring->iocb_continueq);
10526 spin_lock_init(&pring->ring_lock);
10528 pring = phba->sli4_hba.els_wq->pring;
10530 pring->ringno = LPFC_ELS_RING;
10531 pring->txcmplq_cnt = 0;
10532 INIT_LIST_HEAD(&pring->txq);
10533 INIT_LIST_HEAD(&pring->txcmplq);
10534 INIT_LIST_HEAD(&pring->iocb_continueq);
10535 spin_lock_init(&pring->ring_lock);
10537 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10538 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10539 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
10541 pring->ringno = LPFC_FCP_RING;
10542 pring->txcmplq_cnt = 0;
10543 INIT_LIST_HEAD(&pring->txq);
10544 INIT_LIST_HEAD(&pring->txcmplq);
10545 INIT_LIST_HEAD(&pring->iocb_continueq);
10546 spin_lock_init(&pring->ring_lock);
10548 pring = phba->sli4_hba.nvmels_wq->pring;
10550 pring->ringno = LPFC_ELS_RING;
10551 pring->txcmplq_cnt = 0;
10552 INIT_LIST_HEAD(&pring->txq);
10553 INIT_LIST_HEAD(&pring->txcmplq);
10554 INIT_LIST_HEAD(&pring->iocb_continueq);
10555 spin_lock_init(&pring->ring_lock);
10558 spin_unlock_irq(&phba->hbalock);
10562 * lpfc_sli_queue_init - Queue initialization function
10563 * @phba: Pointer to HBA context object.
10565 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10566 * ring. This function also initializes ring indices of each ring.
10567 * This function is called during the initialization of the SLI
10568 * interface of an HBA.
10569 * This function is called with no lock held and always returns
10573 lpfc_sli_queue_init(struct lpfc_hba *phba)
10575 struct lpfc_sli *psli;
10576 struct lpfc_sli_ring *pring;
10580 spin_lock_irq(&phba->hbalock);
10581 INIT_LIST_HEAD(&psli->mboxq);
10582 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10583 /* Initialize list headers for txq and txcmplq as double linked lists */
10584 for (i = 0; i < psli->num_rings; i++) {
10585 pring = &psli->sli3_ring[i];
10587 pring->sli.sli3.next_cmdidx = 0;
10588 pring->sli.sli3.local_getidx = 0;
10589 pring->sli.sli3.cmdidx = 0;
10590 INIT_LIST_HEAD(&pring->iocb_continueq);
10591 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10592 INIT_LIST_HEAD(&pring->postbufq);
10594 INIT_LIST_HEAD(&pring->txq);
10595 INIT_LIST_HEAD(&pring->txcmplq);
10596 spin_lock_init(&pring->ring_lock);
10598 spin_unlock_irq(&phba->hbalock);
10602 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10603 * @phba: Pointer to HBA context object.
10605 * This routine flushes the mailbox command subsystem. It will unconditionally
10606 * flush all the mailbox commands in the three possible stages in the mailbox
10607 * command sub-system: pending mailbox command queue; the outstanding mailbox
10608 * command; and completed mailbox command queue. It is caller's responsibility
10609 * to make sure that the driver is in the proper state to flush the mailbox
10610 * command sub-system. Namely, the posting of mailbox commands into the
10611 * pending mailbox command queue from the various clients must be stopped;
10612 * either the HBA is in a state that it will never works on the outstanding
10613 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10614 * mailbox command has been completed.
10617 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10619 LIST_HEAD(completions);
10620 struct lpfc_sli *psli = &phba->sli;
10622 unsigned long iflag;
10624 /* Disable softirqs, including timers from obtaining phba->hbalock */
10625 local_bh_disable();
10627 /* Flush all the mailbox commands in the mbox system */
10628 spin_lock_irqsave(&phba->hbalock, iflag);
10630 /* The pending mailbox command queue */
10631 list_splice_init(&phba->sli.mboxq, &completions);
10632 /* The outstanding active mailbox command */
10633 if (psli->mbox_active) {
10634 list_add_tail(&psli->mbox_active->list, &completions);
10635 psli->mbox_active = NULL;
10636 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10638 /* The completed mailbox command queue */
10639 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10640 spin_unlock_irqrestore(&phba->hbalock, iflag);
10642 /* Enable softirqs again, done with phba->hbalock */
10645 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10646 while (!list_empty(&completions)) {
10647 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10648 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10649 if (pmb->mbox_cmpl)
10650 pmb->mbox_cmpl(phba, pmb);
10655 * lpfc_sli_host_down - Vport cleanup function
10656 * @vport: Pointer to virtual port object.
10658 * lpfc_sli_host_down is called to clean up the resources
10659 * associated with a vport before destroying virtual
10660 * port data structures.
10661 * This function does following operations:
10662 * - Free discovery resources associated with this virtual
10664 * - Free iocbs associated with this virtual port in
10666 * - Send abort for all iocb commands associated with this
10667 * vport in txcmplq.
10669 * This function is called with no lock held and always returns 1.
10672 lpfc_sli_host_down(struct lpfc_vport *vport)
10674 LIST_HEAD(completions);
10675 struct lpfc_hba *phba = vport->phba;
10676 struct lpfc_sli *psli = &phba->sli;
10677 struct lpfc_queue *qp = NULL;
10678 struct lpfc_sli_ring *pring;
10679 struct lpfc_iocbq *iocb, *next_iocb;
10681 unsigned long flags = 0;
10682 uint16_t prev_pring_flag;
10684 lpfc_cleanup_discovery_resources(vport);
10686 spin_lock_irqsave(&phba->hbalock, flags);
10689 * Error everything on the txq since these iocbs
10690 * have not been given to the FW yet.
10691 * Also issue ABTS for everything on the txcmplq
10693 if (phba->sli_rev != LPFC_SLI_REV4) {
10694 for (i = 0; i < psli->num_rings; i++) {
10695 pring = &psli->sli3_ring[i];
10696 prev_pring_flag = pring->flag;
10697 /* Only slow rings */
10698 if (pring->ringno == LPFC_ELS_RING) {
10699 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10700 /* Set the lpfc data pending flag */
10701 set_bit(LPFC_DATA_READY, &phba->data_flags);
10703 list_for_each_entry_safe(iocb, next_iocb,
10704 &pring->txq, list) {
10705 if (iocb->vport != vport)
10707 list_move_tail(&iocb->list, &completions);
10709 list_for_each_entry_safe(iocb, next_iocb,
10710 &pring->txcmplq, list) {
10711 if (iocb->vport != vport)
10713 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10715 pring->flag = prev_pring_flag;
10718 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10722 if (pring == phba->sli4_hba.els_wq->pring) {
10723 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10724 /* Set the lpfc data pending flag */
10725 set_bit(LPFC_DATA_READY, &phba->data_flags);
10727 prev_pring_flag = pring->flag;
10728 spin_lock_irq(&pring->ring_lock);
10729 list_for_each_entry_safe(iocb, next_iocb,
10730 &pring->txq, list) {
10731 if (iocb->vport != vport)
10733 list_move_tail(&iocb->list, &completions);
10735 spin_unlock_irq(&pring->ring_lock);
10736 list_for_each_entry_safe(iocb, next_iocb,
10737 &pring->txcmplq, list) {
10738 if (iocb->vport != vport)
10740 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10742 pring->flag = prev_pring_flag;
10745 spin_unlock_irqrestore(&phba->hbalock, flags);
10747 /* Cancel all the IOCBs from the completions list */
10748 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10754 * lpfc_sli_hba_down - Resource cleanup function for the HBA
10755 * @phba: Pointer to HBA context object.
10757 * This function cleans up all iocb, buffers, mailbox commands
10758 * while shutting down the HBA. This function is called with no
10759 * lock held and always returns 1.
10760 * This function does the following to cleanup driver resources:
10761 * - Free discovery resources for each virtual port
10762 * - Cleanup any pending fabric iocbs
10763 * - Iterate through the iocb txq and free each entry
10765 * - Free up any buffer posted to the HBA
10766 * - Free mailbox commands in the mailbox queue.
10769 lpfc_sli_hba_down(struct lpfc_hba *phba)
10771 LIST_HEAD(completions);
10772 struct lpfc_sli *psli = &phba->sli;
10773 struct lpfc_queue *qp = NULL;
10774 struct lpfc_sli_ring *pring;
10775 struct lpfc_dmabuf *buf_ptr;
10776 unsigned long flags = 0;
10779 /* Shutdown the mailbox command sub-system */
10780 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10782 lpfc_hba_down_prep(phba);
10784 /* Disable softirqs, including timers from obtaining phba->hbalock */
10785 local_bh_disable();
10787 lpfc_fabric_abort_hba(phba);
10789 spin_lock_irqsave(&phba->hbalock, flags);
10792 * Error everything on the txq since these iocbs
10793 * have not been given to the FW yet.
10795 if (phba->sli_rev != LPFC_SLI_REV4) {
10796 for (i = 0; i < psli->num_rings; i++) {
10797 pring = &psli->sli3_ring[i];
10798 /* Only slow rings */
10799 if (pring->ringno == LPFC_ELS_RING) {
10800 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10801 /* Set the lpfc data pending flag */
10802 set_bit(LPFC_DATA_READY, &phba->data_flags);
10804 list_splice_init(&pring->txq, &completions);
10807 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10811 spin_lock_irq(&pring->ring_lock);
10812 list_splice_init(&pring->txq, &completions);
10813 spin_unlock_irq(&pring->ring_lock);
10814 if (pring == phba->sli4_hba.els_wq->pring) {
10815 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10816 /* Set the lpfc data pending flag */
10817 set_bit(LPFC_DATA_READY, &phba->data_flags);
10821 spin_unlock_irqrestore(&phba->hbalock, flags);
10823 /* Cancel all the IOCBs from the completions list */
10824 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10827 spin_lock_irqsave(&phba->hbalock, flags);
10828 list_splice_init(&phba->elsbuf, &completions);
10829 phba->elsbuf_cnt = 0;
10830 phba->elsbuf_prev_cnt = 0;
10831 spin_unlock_irqrestore(&phba->hbalock, flags);
10833 while (!list_empty(&completions)) {
10834 list_remove_head(&completions, buf_ptr,
10835 struct lpfc_dmabuf, list);
10836 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10840 /* Enable softirqs again, done with phba->hbalock */
10843 /* Return any active mbox cmds */
10844 del_timer_sync(&psli->mbox_tmo);
10846 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
10847 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10848 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
10854 * lpfc_sli_pcimem_bcopy - SLI memory copy function
10855 * @srcp: Source memory pointer.
10856 * @destp: Destination memory pointer.
10857 * @cnt: Number of words required to be copied.
10859 * This function is used for copying data between driver memory
10860 * and the SLI memory. This function also changes the endianness
10861 * of each word if native endianness is different from SLI
10862 * endianness. This function can be called with or without
10866 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10868 uint32_t *src = srcp;
10869 uint32_t *dest = destp;
10873 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10875 ldata = le32_to_cpu(ldata);
10884 * lpfc_sli_bemem_bcopy - SLI memory copy function
10885 * @srcp: Source memory pointer.
10886 * @destp: Destination memory pointer.
10887 * @cnt: Number of words required to be copied.
10889 * This function is used for copying data between a data structure
10890 * with big endian representation to local endianness.
10891 * This function can be called with or without lock.
10894 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10896 uint32_t *src = srcp;
10897 uint32_t *dest = destp;
10901 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10903 ldata = be32_to_cpu(ldata);
10911 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
10912 * @phba: Pointer to HBA context object.
10913 * @pring: Pointer to driver SLI ring object.
10914 * @mp: Pointer to driver buffer object.
10916 * This function is called with no lock held.
10917 * It always return zero after adding the buffer to the postbufq
10921 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10922 struct lpfc_dmabuf *mp)
10924 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10926 spin_lock_irq(&phba->hbalock);
10927 list_add_tail(&mp->list, &pring->postbufq);
10928 pring->postbufq_cnt++;
10929 spin_unlock_irq(&phba->hbalock);
10934 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
10935 * @phba: Pointer to HBA context object.
10937 * When HBQ is enabled, buffers are searched based on tags. This function
10938 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10939 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10940 * does not conflict with tags of buffer posted for unsolicited events.
10941 * The function returns the allocated tag. The function is called with
10945 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10947 spin_lock_irq(&phba->hbalock);
10948 phba->buffer_tag_count++;
10950 * Always set the QUE_BUFTAG_BIT to distiguish between
10951 * a tag assigned by HBQ.
10953 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10954 spin_unlock_irq(&phba->hbalock);
10955 return phba->buffer_tag_count;
10959 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
10960 * @phba: Pointer to HBA context object.
10961 * @pring: Pointer to driver SLI ring object.
10962 * @tag: Buffer tag.
10964 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10965 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10966 * iocb is posted to the response ring with the tag of the buffer.
10967 * This function searches the pring->postbufq list using the tag
10968 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10969 * iocb. If the buffer is found then lpfc_dmabuf object of the
10970 * buffer is returned to the caller else NULL is returned.
10971 * This function is called with no lock held.
10973 struct lpfc_dmabuf *
10974 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10977 struct lpfc_dmabuf *mp, *next_mp;
10978 struct list_head *slp = &pring->postbufq;
10980 /* Search postbufq, from the beginning, looking for a match on tag */
10981 spin_lock_irq(&phba->hbalock);
10982 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10983 if (mp->buffer_tag == tag) {
10984 list_del_init(&mp->list);
10985 pring->postbufq_cnt--;
10986 spin_unlock_irq(&phba->hbalock);
10991 spin_unlock_irq(&phba->hbalock);
10992 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10993 "0402 Cannot find virtual addr for buffer tag on "
10994 "ring %d Data x%lx x%p x%p x%x\n",
10995 pring->ringno, (unsigned long) tag,
10996 slp->next, slp->prev, pring->postbufq_cnt);
11002 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
11003 * @phba: Pointer to HBA context object.
11004 * @pring: Pointer to driver SLI ring object.
11005 * @phys: DMA address of the buffer.
11007 * This function searches the buffer list using the dma_address
11008 * of unsolicited event to find the driver's lpfc_dmabuf object
11009 * corresponding to the dma_address. The function returns the
11010 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11011 * This function is called by the ct and els unsolicited event
11012 * handlers to get the buffer associated with the unsolicited
11015 * This function is called with no lock held.
11017 struct lpfc_dmabuf *
11018 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11021 struct lpfc_dmabuf *mp, *next_mp;
11022 struct list_head *slp = &pring->postbufq;
11024 /* Search postbufq, from the beginning, looking for a match on phys */
11025 spin_lock_irq(&phba->hbalock);
11026 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11027 if (mp->phys == phys) {
11028 list_del_init(&mp->list);
11029 pring->postbufq_cnt--;
11030 spin_unlock_irq(&phba->hbalock);
11035 spin_unlock_irq(&phba->hbalock);
11036 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11037 "0410 Cannot find virtual addr for mapped buf on "
11038 "ring %d Data x%llx x%p x%p x%x\n",
11039 pring->ringno, (unsigned long long)phys,
11040 slp->next, slp->prev, pring->postbufq_cnt);
11045 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
11046 * @phba: Pointer to HBA context object.
11047 * @cmdiocb: Pointer to driver command iocb object.
11048 * @rspiocb: Pointer to driver response iocb object.
11050 * This function is the completion handler for the abort iocbs for
11051 * ELS commands. This function is called from the ELS ring event
11052 * handler with no lock held. This function frees memory resources
11053 * associated with the abort iocb.
11056 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11057 struct lpfc_iocbq *rspiocb)
11059 IOCB_t *irsp = &rspiocb->iocb;
11060 uint16_t abort_iotag, abort_context;
11061 struct lpfc_iocbq *abort_iocb = NULL;
11063 if (irsp->ulpStatus) {
11066 * Assume that the port already completed and returned, or
11067 * will return the iocb. Just Log the message.
11069 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11070 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11072 spin_lock_irq(&phba->hbalock);
11073 if (phba->sli_rev < LPFC_SLI_REV4) {
11074 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11075 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11076 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11077 spin_unlock_irq(&phba->hbalock);
11080 if (abort_iotag != 0 &&
11081 abort_iotag <= phba->sli.last_iotag)
11083 phba->sli.iocbq_lookup[abort_iotag];
11085 /* For sli4 the abort_tag is the XRI,
11086 * so the abort routine puts the iotag of the iocb
11087 * being aborted in the context field of the abort
11090 abort_iocb = phba->sli.iocbq_lookup[abort_context];
11092 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11093 "0327 Cannot abort els iocb %p "
11094 "with tag %x context %x, abort status %x, "
11096 abort_iocb, abort_iotag, abort_context,
11097 irsp->ulpStatus, irsp->un.ulpWord[4]);
11099 spin_unlock_irq(&phba->hbalock);
11102 lpfc_sli_release_iocbq(phba, cmdiocb);
11107 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
11108 * @phba: Pointer to HBA context object.
11109 * @cmdiocb: Pointer to driver command iocb object.
11110 * @rspiocb: Pointer to driver response iocb object.
11112 * The function is called from SLI ring event handler with no
11113 * lock held. This function is the completion handler for ELS commands
11114 * which are aborted. The function frees memory resources used for
11115 * the aborted ELS commands.
11118 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11119 struct lpfc_iocbq *rspiocb)
11121 IOCB_t *irsp = &rspiocb->iocb;
11123 /* ELS cmd tag <ulpIoTag> completes */
11124 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11125 "0139 Ignoring ELS cmd tag x%x completion Data: "
11127 irsp->ulpIoTag, irsp->ulpStatus,
11128 irsp->un.ulpWord[4], irsp->ulpTimeout);
11129 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11130 lpfc_ct_free_iocb(phba, cmdiocb);
11132 lpfc_els_free_iocb(phba, cmdiocb);
11137 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
11138 * @phba: Pointer to HBA context object.
11139 * @pring: Pointer to driver SLI ring object.
11140 * @cmdiocb: Pointer to driver command iocb object.
11142 * This function issues an abort iocb for the provided command iocb down to
11143 * the port. Other than the case the outstanding command iocb is an abort
11144 * request, this function issues abort out unconditionally. This function is
11145 * called with hbalock held. The function returns 0 when it fails due to
11146 * memory allocation failure or when the command iocb is an abort request.
11149 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11150 struct lpfc_iocbq *cmdiocb)
11152 struct lpfc_vport *vport = cmdiocb->vport;
11153 struct lpfc_iocbq *abtsiocbp;
11154 IOCB_t *icmd = NULL;
11155 IOCB_t *iabt = NULL;
11157 unsigned long iflags;
11158 struct lpfc_nodelist *ndlp;
11160 lockdep_assert_held(&phba->hbalock);
11163 * There are certain command types we don't want to abort. And we
11164 * don't want to abort commands that are already in the process of
11167 icmd = &cmdiocb->iocb;
11168 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11169 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11170 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11173 /* issue ABTS for this IOCB based on iotag */
11174 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11175 if (abtsiocbp == NULL)
11178 /* This signals the response to set the correct status
11179 * before calling the completion handler
11181 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11183 iabt = &abtsiocbp->iocb;
11184 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11185 iabt->un.acxri.abortContextTag = icmd->ulpContext;
11186 if (phba->sli_rev == LPFC_SLI_REV4) {
11187 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11188 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11190 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11191 if (pring->ringno == LPFC_ELS_RING) {
11192 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11193 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11197 iabt->ulpClass = icmd->ulpClass;
11199 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11200 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11201 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11202 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11203 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11204 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11206 if (phba->link_state >= LPFC_LINK_UP)
11207 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11209 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11211 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11212 abtsiocbp->vport = vport;
11214 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11215 "0339 Abort xri x%x, original iotag x%x, "
11216 "abort cmd iotag x%x\n",
11217 iabt->un.acxri.abortIoTag,
11218 iabt->un.acxri.abortContextTag,
11221 if (phba->sli_rev == LPFC_SLI_REV4) {
11222 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11223 if (unlikely(pring == NULL))
11225 /* Note: both hbalock and ring_lock need to be set here */
11226 spin_lock_irqsave(&pring->ring_lock, iflags);
11227 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11229 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11231 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11236 __lpfc_sli_release_iocbq(phba, abtsiocbp);
11239 * Caller to this routine should check for IOCB_ERROR
11240 * and handle it properly. This routine no longer removes
11241 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11247 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11248 * @phba: Pointer to HBA context object.
11249 * @pring: Pointer to driver SLI ring object.
11250 * @cmdiocb: Pointer to driver command iocb object.
11252 * This function issues an abort iocb for the provided command iocb. In case
11253 * of unloading, the abort iocb will not be issued to commands on the ELS
11254 * ring. Instead, the callback function shall be changed to those commands
11255 * so that nothing happens when them finishes. This function is called with
11256 * hbalock held. The function returns 0 when the command iocb is an abort
11260 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11261 struct lpfc_iocbq *cmdiocb)
11263 struct lpfc_vport *vport = cmdiocb->vport;
11264 int retval = IOCB_ERROR;
11265 IOCB_t *icmd = NULL;
11267 lockdep_assert_held(&phba->hbalock);
11270 * There are certain command types we don't want to abort. And we
11271 * don't want to abort commands that are already in the process of
11274 icmd = &cmdiocb->iocb;
11275 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11276 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11277 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11281 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11282 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11284 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11285 goto abort_iotag_exit;
11289 * If we're unloading, don't abort iocb on the ELS ring, but change
11290 * the callback so that nothing happens when it finishes.
11292 if ((vport->load_flag & FC_UNLOADING) &&
11293 (pring->ringno == LPFC_ELS_RING)) {
11294 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11295 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11297 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11298 goto abort_iotag_exit;
11301 /* Now, we try to issue the abort to the cmdiocb out */
11302 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11306 * Caller to this routine should check for IOCB_ERROR
11307 * and handle it properly. This routine no longer removes
11308 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11314 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11315 * @phba: pointer to lpfc HBA data structure.
11317 * This routine will abort all pending and outstanding iocbs to an HBA.
11320 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11322 struct lpfc_sli *psli = &phba->sli;
11323 struct lpfc_sli_ring *pring;
11324 struct lpfc_queue *qp = NULL;
11327 if (phba->sli_rev != LPFC_SLI_REV4) {
11328 for (i = 0; i < psli->num_rings; i++) {
11329 pring = &psli->sli3_ring[i];
11330 lpfc_sli_abort_iocb_ring(phba, pring);
11334 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11338 lpfc_sli_abort_iocb_ring(phba, pring);
11343 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11344 * @iocbq: Pointer to driver iocb object.
11345 * @vport: Pointer to driver virtual port object.
11346 * @tgt_id: SCSI ID of the target.
11347 * @lun_id: LUN ID of the scsi device.
11348 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11350 * This function acts as an iocb filter for functions which abort or count
11351 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11352 * 0 if the filtering criteria is met for the given iocb and will return
11353 * 1 if the filtering criteria is not met.
11354 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11355 * given iocb is for the SCSI device specified by vport, tgt_id and
11356 * lun_id parameter.
11357 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11358 * given iocb is for the SCSI target specified by vport and tgt_id
11360 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11361 * given iocb is for the SCSI host associated with the given vport.
11362 * This function is called with no locks held.
11365 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11366 uint16_t tgt_id, uint64_t lun_id,
11367 lpfc_ctx_cmd ctx_cmd)
11369 struct lpfc_io_buf *lpfc_cmd;
11372 if (iocbq->vport != vport)
11375 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11376 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
11379 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11381 if (lpfc_cmd->pCmd == NULL)
11386 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11387 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11388 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11392 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11393 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11396 case LPFC_CTX_HOST:
11400 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11401 __func__, ctx_cmd);
11409 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11410 * @vport: Pointer to virtual port.
11411 * @tgt_id: SCSI ID of the target.
11412 * @lun_id: LUN ID of the scsi device.
11413 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11415 * This function returns number of FCP commands pending for the vport.
11416 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11417 * commands pending on the vport associated with SCSI device specified
11418 * by tgt_id and lun_id parameters.
11419 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11420 * commands pending on the vport associated with SCSI target specified
11421 * by tgt_id parameter.
11422 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11423 * commands pending on the vport.
11424 * This function returns the number of iocbs which satisfy the filter.
11425 * This function is called without any lock held.
11428 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11429 lpfc_ctx_cmd ctx_cmd)
11431 struct lpfc_hba *phba = vport->phba;
11432 struct lpfc_iocbq *iocbq;
11435 spin_lock_irq(&phba->hbalock);
11436 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11437 iocbq = phba->sli.iocbq_lookup[i];
11439 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11443 spin_unlock_irq(&phba->hbalock);
11449 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11450 * @phba: Pointer to HBA context object
11451 * @cmdiocb: Pointer to command iocb object.
11452 * @rspiocb: Pointer to response iocb object.
11454 * This function is called when an aborted FCP iocb completes. This
11455 * function is called by the ring event handler with no lock held.
11456 * This function frees the iocb.
11459 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11460 struct lpfc_iocbq *rspiocb)
11462 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11463 "3096 ABORT_XRI_CN completing on rpi x%x "
11464 "original iotag x%x, abort cmd iotag x%x "
11465 "status 0x%x, reason 0x%x\n",
11466 cmdiocb->iocb.un.acxri.abortContextTag,
11467 cmdiocb->iocb.un.acxri.abortIoTag,
11468 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11469 rspiocb->iocb.un.ulpWord[4]);
11470 lpfc_sli_release_iocbq(phba, cmdiocb);
11475 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11476 * @vport: Pointer to virtual port.
11477 * @pring: Pointer to driver SLI ring object.
11478 * @tgt_id: SCSI ID of the target.
11479 * @lun_id: LUN ID of the scsi device.
11480 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11482 * This function sends an abort command for every SCSI command
11483 * associated with the given virtual port pending on the ring
11484 * filtered by lpfc_sli_validate_fcp_iocb function.
11485 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11486 * FCP iocbs associated with lun specified by tgt_id and lun_id
11488 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11489 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11490 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11491 * FCP iocbs associated with virtual port.
11492 * This function returns number of iocbs it failed to abort.
11493 * This function is called with no locks held.
11496 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11497 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11499 struct lpfc_hba *phba = vport->phba;
11500 struct lpfc_iocbq *iocbq;
11501 struct lpfc_iocbq *abtsiocb;
11502 struct lpfc_sli_ring *pring_s4;
11503 IOCB_t *cmd = NULL;
11504 int errcnt = 0, ret_val = 0;
11507 /* all I/Os are in process of being flushed */
11508 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
11511 for (i = 1; i <= phba->sli.last_iotag; i++) {
11512 iocbq = phba->sli.iocbq_lookup[i];
11514 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11519 * If the iocbq is already being aborted, don't take a second
11520 * action, but do count it.
11522 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11525 /* issue ABTS for this IOCB based on iotag */
11526 abtsiocb = lpfc_sli_get_iocbq(phba);
11527 if (abtsiocb == NULL) {
11532 /* indicate the IO is being aborted by the driver. */
11533 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11535 cmd = &iocbq->iocb;
11536 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11537 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11538 if (phba->sli_rev == LPFC_SLI_REV4)
11539 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11541 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11542 abtsiocb->iocb.ulpLe = 1;
11543 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11544 abtsiocb->vport = vport;
11546 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11547 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11548 if (iocbq->iocb_flag & LPFC_IO_FCP)
11549 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11550 if (iocbq->iocb_flag & LPFC_IO_FOF)
11551 abtsiocb->iocb_flag |= LPFC_IO_FOF;
11553 if (lpfc_is_link_up(phba))
11554 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11556 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11558 /* Setup callback routine and issue the command. */
11559 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11560 if (phba->sli_rev == LPFC_SLI_REV4) {
11561 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11564 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11567 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11569 if (ret_val == IOCB_ERROR) {
11570 lpfc_sli_release_iocbq(phba, abtsiocb);
11580 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11581 * @vport: Pointer to virtual port.
11582 * @pring: Pointer to driver SLI ring object.
11583 * @tgt_id: SCSI ID of the target.
11584 * @lun_id: LUN ID of the scsi device.
11585 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11587 * This function sends an abort command for every SCSI command
11588 * associated with the given virtual port pending on the ring
11589 * filtered by lpfc_sli_validate_fcp_iocb function.
11590 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11591 * FCP iocbs associated with lun specified by tgt_id and lun_id
11593 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11594 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11595 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11596 * FCP iocbs associated with virtual port.
11597 * This function returns number of iocbs it aborted .
11598 * This function is called with no locks held right after a taskmgmt
11602 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11603 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11605 struct lpfc_hba *phba = vport->phba;
11606 struct lpfc_io_buf *lpfc_cmd;
11607 struct lpfc_iocbq *abtsiocbq;
11608 struct lpfc_nodelist *ndlp;
11609 struct lpfc_iocbq *iocbq;
11611 int sum, i, ret_val;
11612 unsigned long iflags;
11613 struct lpfc_sli_ring *pring_s4 = NULL;
11615 spin_lock_irqsave(&phba->hbalock, iflags);
11617 /* all I/Os are in process of being flushed */
11618 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
11619 spin_unlock_irqrestore(&phba->hbalock, iflags);
11624 for (i = 1; i <= phba->sli.last_iotag; i++) {
11625 iocbq = phba->sli.iocbq_lookup[i];
11627 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11631 /* Guard against IO completion being called at same time */
11632 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11633 spin_lock(&lpfc_cmd->buf_lock);
11635 if (!lpfc_cmd->pCmd) {
11636 spin_unlock(&lpfc_cmd->buf_lock);
11640 if (phba->sli_rev == LPFC_SLI_REV4) {
11642 phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring;
11644 spin_unlock(&lpfc_cmd->buf_lock);
11647 /* Note: both hbalock and ring_lock must be set here */
11648 spin_lock(&pring_s4->ring_lock);
11652 * If the iocbq is already being aborted, don't take a second
11653 * action, but do count it.
11655 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11656 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11657 if (phba->sli_rev == LPFC_SLI_REV4)
11658 spin_unlock(&pring_s4->ring_lock);
11659 spin_unlock(&lpfc_cmd->buf_lock);
11663 /* issue ABTS for this IOCB based on iotag */
11664 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11666 if (phba->sli_rev == LPFC_SLI_REV4)
11667 spin_unlock(&pring_s4->ring_lock);
11668 spin_unlock(&lpfc_cmd->buf_lock);
11672 icmd = &iocbq->iocb;
11673 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11674 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11675 if (phba->sli_rev == LPFC_SLI_REV4)
11676 abtsiocbq->iocb.un.acxri.abortIoTag =
11677 iocbq->sli4_xritag;
11679 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11680 abtsiocbq->iocb.ulpLe = 1;
11681 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11682 abtsiocbq->vport = vport;
11684 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11685 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11686 if (iocbq->iocb_flag & LPFC_IO_FCP)
11687 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11688 if (iocbq->iocb_flag & LPFC_IO_FOF)
11689 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11691 ndlp = lpfc_cmd->rdata->pnode;
11693 if (lpfc_is_link_up(phba) &&
11694 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11695 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11697 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11699 /* Setup callback routine and issue the command. */
11700 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11703 * Indicate the IO is being aborted by the driver and set
11704 * the caller's flag into the aborted IO.
11706 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11708 if (phba->sli_rev == LPFC_SLI_REV4) {
11709 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11711 spin_unlock(&pring_s4->ring_lock);
11713 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11717 spin_unlock(&lpfc_cmd->buf_lock);
11719 if (ret_val == IOCB_ERROR)
11720 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11724 spin_unlock_irqrestore(&phba->hbalock, iflags);
11729 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
11730 * @phba: Pointer to HBA context object.
11731 * @cmdiocbq: Pointer to command iocb.
11732 * @rspiocbq: Pointer to response iocb.
11734 * This function is the completion handler for iocbs issued using
11735 * lpfc_sli_issue_iocb_wait function. This function is called by the
11736 * ring event handler function without any lock held. This function
11737 * can be called from both worker thread context and interrupt
11738 * context. This function also can be called from other thread which
11739 * cleans up the SLI layer objects.
11740 * This function copy the contents of the response iocb to the
11741 * response iocb memory object provided by the caller of
11742 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11743 * sleeps for the iocb completion.
11746 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11747 struct lpfc_iocbq *cmdiocbq,
11748 struct lpfc_iocbq *rspiocbq)
11750 wait_queue_head_t *pdone_q;
11751 unsigned long iflags;
11752 struct lpfc_io_buf *lpfc_cmd;
11754 spin_lock_irqsave(&phba->hbalock, iflags);
11755 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11758 * A time out has occurred for the iocb. If a time out
11759 * completion handler has been supplied, call it. Otherwise,
11760 * just free the iocbq.
11763 spin_unlock_irqrestore(&phba->hbalock, iflags);
11764 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11765 cmdiocbq->wait_iocb_cmpl = NULL;
11766 if (cmdiocbq->iocb_cmpl)
11767 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11769 lpfc_sli_release_iocbq(phba, cmdiocbq);
11773 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11774 if (cmdiocbq->context2 && rspiocbq)
11775 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11776 &rspiocbq->iocb, sizeof(IOCB_t));
11778 /* Set the exchange busy flag for task management commands */
11779 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11780 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11781 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
11783 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11786 pdone_q = cmdiocbq->context_un.wait_queue;
11789 spin_unlock_irqrestore(&phba->hbalock, iflags);
11794 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11795 * @phba: Pointer to HBA context object..
11796 * @piocbq: Pointer to command iocb.
11797 * @flag: Flag to test.
11799 * This routine grabs the hbalock and then test the iocb_flag to
11800 * see if the passed in flag is set.
11802 * 1 if flag is set.
11803 * 0 if flag is not set.
11806 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11807 struct lpfc_iocbq *piocbq, uint32_t flag)
11809 unsigned long iflags;
11812 spin_lock_irqsave(&phba->hbalock, iflags);
11813 ret = piocbq->iocb_flag & flag;
11814 spin_unlock_irqrestore(&phba->hbalock, iflags);
11820 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
11821 * @phba: Pointer to HBA context object..
11822 * @pring: Pointer to sli ring.
11823 * @piocb: Pointer to command iocb.
11824 * @prspiocbq: Pointer to response iocb.
11825 * @timeout: Timeout in number of seconds.
11827 * This function issues the iocb to firmware and waits for the
11828 * iocb to complete. The iocb_cmpl field of the shall be used
11829 * to handle iocbs which time out. If the field is NULL, the
11830 * function shall free the iocbq structure. If more clean up is
11831 * needed, the caller is expected to provide a completion function
11832 * that will provide the needed clean up. If the iocb command is
11833 * not completed within timeout seconds, the function will either
11834 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11835 * completion function set in the iocb_cmpl field and then return
11836 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11837 * resources if this function returns IOCB_TIMEDOUT.
11838 * The function waits for the iocb completion using an
11839 * non-interruptible wait.
11840 * This function will sleep while waiting for iocb completion.
11841 * So, this function should not be called from any context which
11842 * does not allow sleeping. Due to the same reason, this function
11843 * cannot be called with interrupt disabled.
11844 * This function assumes that the iocb completions occur while
11845 * this function sleep. So, this function cannot be called from
11846 * the thread which process iocb completion for this ring.
11847 * This function clears the iocb_flag of the iocb object before
11848 * issuing the iocb and the iocb completion handler sets this
11849 * flag and wakes this thread when the iocb completes.
11850 * The contents of the response iocb will be copied to prspiocbq
11851 * by the completion handler when the command completes.
11852 * This function returns IOCB_SUCCESS when success.
11853 * This function is called with no lock held.
11856 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
11857 uint32_t ring_number,
11858 struct lpfc_iocbq *piocb,
11859 struct lpfc_iocbq *prspiocbq,
11862 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11863 long timeleft, timeout_req = 0;
11864 int retval = IOCB_SUCCESS;
11866 struct lpfc_iocbq *iocb;
11868 int txcmplq_cnt = 0;
11869 struct lpfc_sli_ring *pring;
11870 unsigned long iflags;
11871 bool iocb_completed = true;
11873 if (phba->sli_rev >= LPFC_SLI_REV4)
11874 pring = lpfc_sli4_calc_ring(phba, piocb);
11876 pring = &phba->sli.sli3_ring[ring_number];
11878 * If the caller has provided a response iocbq buffer, then context2
11879 * is NULL or its an error.
11882 if (piocb->context2)
11884 piocb->context2 = prspiocbq;
11887 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
11888 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11889 piocb->context_un.wait_queue = &done_q;
11890 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
11892 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11893 if (lpfc_readl(phba->HCregaddr, &creg_val))
11895 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11896 writel(creg_val, phba->HCregaddr);
11897 readl(phba->HCregaddr); /* flush */
11900 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11901 SLI_IOCB_RET_IOCB);
11902 if (retval == IOCB_SUCCESS) {
11903 timeout_req = msecs_to_jiffies(timeout * 1000);
11904 timeleft = wait_event_timeout(done_q,
11905 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
11907 spin_lock_irqsave(&phba->hbalock, iflags);
11908 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11911 * IOCB timed out. Inform the wake iocb wait
11912 * completion function and set local status
11915 iocb_completed = false;
11916 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11918 spin_unlock_irqrestore(&phba->hbalock, iflags);
11919 if (iocb_completed) {
11920 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11921 "0331 IOCB wake signaled\n");
11922 /* Note: we are not indicating if the IOCB has a success
11923 * status or not - that's for the caller to check.
11924 * IOCB_SUCCESS means just that the command was sent and
11925 * completed. Not that it completed successfully.
11927 } else if (timeleft == 0) {
11928 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11929 "0338 IOCB wait timeout error - no "
11930 "wake response Data x%x\n", timeout);
11931 retval = IOCB_TIMEDOUT;
11933 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11934 "0330 IOCB wake NOT set, "
11936 timeout, (timeleft / jiffies));
11937 retval = IOCB_TIMEDOUT;
11939 } else if (retval == IOCB_BUSY) {
11940 if (phba->cfg_log_verbose & LOG_SLI) {
11941 list_for_each_entry(iocb, &pring->txq, list) {
11944 list_for_each_entry(iocb, &pring->txcmplq, list) {
11947 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11948 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11949 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
11953 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11954 "0332 IOCB wait issue failed, Data x%x\n",
11956 retval = IOCB_ERROR;
11959 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11960 if (lpfc_readl(phba->HCregaddr, &creg_val))
11962 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
11963 writel(creg_val, phba->HCregaddr);
11964 readl(phba->HCregaddr); /* flush */
11968 piocb->context2 = NULL;
11970 piocb->context_un.wait_queue = NULL;
11971 piocb->iocb_cmpl = NULL;
11976 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
11977 * @phba: Pointer to HBA context object.
11978 * @pmboxq: Pointer to driver mailbox object.
11979 * @timeout: Timeout in number of seconds.
11981 * This function issues the mailbox to firmware and waits for the
11982 * mailbox command to complete. If the mailbox command is not
11983 * completed within timeout seconds, it returns MBX_TIMEOUT.
11984 * The function waits for the mailbox completion using an
11985 * interruptible wait. If the thread is woken up due to a
11986 * signal, MBX_TIMEOUT error is returned to the caller. Caller
11987 * should not free the mailbox resources, if this function returns
11989 * This function will sleep while waiting for mailbox completion.
11990 * So, this function should not be called from any context which
11991 * does not allow sleeping. Due to the same reason, this function
11992 * cannot be called with interrupt disabled.
11993 * This function assumes that the mailbox completion occurs while
11994 * this function sleep. So, this function cannot be called from
11995 * the worker thread which processes mailbox completion.
11996 * This function is called in the context of HBA management
11998 * This function returns MBX_SUCCESS when successful.
11999 * This function is called with no lock held.
12002 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
12005 struct completion mbox_done;
12007 unsigned long flag;
12009 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12010 /* setup wake call as IOCB callback */
12011 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12013 /* setup context3 field to pass wait_queue pointer to wake function */
12014 init_completion(&mbox_done);
12015 pmboxq->context3 = &mbox_done;
12016 /* now issue the command */
12017 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12018 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
12019 wait_for_completion_timeout(&mbox_done,
12020 msecs_to_jiffies(timeout * 1000));
12022 spin_lock_irqsave(&phba->hbalock, flag);
12023 pmboxq->context3 = NULL;
12025 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12026 * else do not free the resources.
12028 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12029 retval = MBX_SUCCESS;
12031 retval = MBX_TIMEOUT;
12032 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12034 spin_unlock_irqrestore(&phba->hbalock, flag);
12040 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
12041 * @phba: Pointer to HBA context.
12043 * This function is called to shutdown the driver's mailbox sub-system.
12044 * It first marks the mailbox sub-system is in a block state to prevent
12045 * the asynchronous mailbox command from issued off the pending mailbox
12046 * command queue. If the mailbox command sub-system shutdown is due to
12047 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12048 * the mailbox sub-system flush routine to forcefully bring down the
12049 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12050 * as with offline or HBA function reset), this routine will wait for the
12051 * outstanding mailbox command to complete before invoking the mailbox
12052 * sub-system flush routine to gracefully bring down mailbox sub-system.
12055 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12057 struct lpfc_sli *psli = &phba->sli;
12058 unsigned long timeout;
12060 if (mbx_action == LPFC_MBX_NO_WAIT) {
12061 /* delay 100ms for port state */
12063 lpfc_sli_mbox_sys_flush(phba);
12066 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12068 /* Disable softirqs, including timers from obtaining phba->hbalock */
12069 local_bh_disable();
12071 spin_lock_irq(&phba->hbalock);
12072 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12074 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12075 /* Determine how long we might wait for the active mailbox
12076 * command to be gracefully completed by firmware.
12078 if (phba->sli.mbox_active)
12079 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12080 phba->sli.mbox_active) *
12082 spin_unlock_irq(&phba->hbalock);
12084 /* Enable softirqs again, done with phba->hbalock */
12087 while (phba->sli.mbox_active) {
12088 /* Check active mailbox complete status every 2ms */
12090 if (time_after(jiffies, timeout))
12091 /* Timeout, let the mailbox flush routine to
12092 * forcefully release active mailbox command
12097 spin_unlock_irq(&phba->hbalock);
12099 /* Enable softirqs again, done with phba->hbalock */
12103 lpfc_sli_mbox_sys_flush(phba);
12107 * lpfc_sli_eratt_read - read sli-3 error attention events
12108 * @phba: Pointer to HBA context.
12110 * This function is called to read the SLI3 device error attention registers
12111 * for possible error attention events. The caller must hold the hostlock
12112 * with spin_lock_irq().
12114 * This function returns 1 when there is Error Attention in the Host Attention
12115 * Register and returns 0 otherwise.
12118 lpfc_sli_eratt_read(struct lpfc_hba *phba)
12122 /* Read chip Host Attention (HA) register */
12123 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12126 if (ha_copy & HA_ERATT) {
12127 /* Read host status register to retrieve error event */
12128 if (lpfc_sli_read_hs(phba))
12131 /* Check if there is a deferred error condition is active */
12132 if ((HS_FFER1 & phba->work_hs) &&
12133 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12134 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12135 phba->hba_flag |= DEFER_ERATT;
12136 /* Clear all interrupt enable conditions */
12137 writel(0, phba->HCregaddr);
12138 readl(phba->HCregaddr);
12141 /* Set the driver HA work bitmap */
12142 phba->work_ha |= HA_ERATT;
12143 /* Indicate polling handles this ERATT */
12144 phba->hba_flag |= HBA_ERATT_HANDLED;
12150 /* Set the driver HS work bitmap */
12151 phba->work_hs |= UNPLUG_ERR;
12152 /* Set the driver HA work bitmap */
12153 phba->work_ha |= HA_ERATT;
12154 /* Indicate polling handles this ERATT */
12155 phba->hba_flag |= HBA_ERATT_HANDLED;
12160 * lpfc_sli4_eratt_read - read sli-4 error attention events
12161 * @phba: Pointer to HBA context.
12163 * This function is called to read the SLI4 device error attention registers
12164 * for possible error attention events. The caller must hold the hostlock
12165 * with spin_lock_irq().
12167 * This function returns 1 when there is Error Attention in the Host Attention
12168 * Register and returns 0 otherwise.
12171 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12173 uint32_t uerr_sta_hi, uerr_sta_lo;
12174 uint32_t if_type, portsmphr;
12175 struct lpfc_register portstat_reg;
12178 * For now, use the SLI4 device internal unrecoverable error
12179 * registers for error attention. This can be changed later.
12181 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12183 case LPFC_SLI_INTF_IF_TYPE_0:
12184 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12186 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12188 phba->work_hs |= UNPLUG_ERR;
12189 phba->work_ha |= HA_ERATT;
12190 phba->hba_flag |= HBA_ERATT_HANDLED;
12193 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12194 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12195 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12196 "1423 HBA Unrecoverable error: "
12197 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12198 "ue_mask_lo_reg=0x%x, "
12199 "ue_mask_hi_reg=0x%x\n",
12200 uerr_sta_lo, uerr_sta_hi,
12201 phba->sli4_hba.ue_mask_lo,
12202 phba->sli4_hba.ue_mask_hi);
12203 phba->work_status[0] = uerr_sta_lo;
12204 phba->work_status[1] = uerr_sta_hi;
12205 phba->work_ha |= HA_ERATT;
12206 phba->hba_flag |= HBA_ERATT_HANDLED;
12210 case LPFC_SLI_INTF_IF_TYPE_2:
12211 case LPFC_SLI_INTF_IF_TYPE_6:
12212 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12213 &portstat_reg.word0) ||
12214 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12216 phba->work_hs |= UNPLUG_ERR;
12217 phba->work_ha |= HA_ERATT;
12218 phba->hba_flag |= HBA_ERATT_HANDLED;
12221 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12222 phba->work_status[0] =
12223 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12224 phba->work_status[1] =
12225 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12226 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12227 "2885 Port Status Event: "
12228 "port status reg 0x%x, "
12229 "port smphr reg 0x%x, "
12230 "error 1=0x%x, error 2=0x%x\n",
12231 portstat_reg.word0,
12233 phba->work_status[0],
12234 phba->work_status[1]);
12235 phba->work_ha |= HA_ERATT;
12236 phba->hba_flag |= HBA_ERATT_HANDLED;
12240 case LPFC_SLI_INTF_IF_TYPE_1:
12242 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12243 "2886 HBA Error Attention on unsupported "
12244 "if type %d.", if_type);
12252 * lpfc_sli_check_eratt - check error attention events
12253 * @phba: Pointer to HBA context.
12255 * This function is called from timer soft interrupt context to check HBA's
12256 * error attention register bit for error attention events.
12258 * This function returns 1 when there is Error Attention in the Host Attention
12259 * Register and returns 0 otherwise.
12262 lpfc_sli_check_eratt(struct lpfc_hba *phba)
12266 /* If somebody is waiting to handle an eratt, don't process it
12267 * here. The brdkill function will do this.
12269 if (phba->link_flag & LS_IGNORE_ERATT)
12272 /* Check if interrupt handler handles this ERATT */
12273 spin_lock_irq(&phba->hbalock);
12274 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12275 /* Interrupt handler has handled ERATT */
12276 spin_unlock_irq(&phba->hbalock);
12281 * If there is deferred error attention, do not check for error
12284 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12285 spin_unlock_irq(&phba->hbalock);
12289 /* If PCI channel is offline, don't process it */
12290 if (unlikely(pci_channel_offline(phba->pcidev))) {
12291 spin_unlock_irq(&phba->hbalock);
12295 switch (phba->sli_rev) {
12296 case LPFC_SLI_REV2:
12297 case LPFC_SLI_REV3:
12298 /* Read chip Host Attention (HA) register */
12299 ha_copy = lpfc_sli_eratt_read(phba);
12301 case LPFC_SLI_REV4:
12302 /* Read device Uncoverable Error (UERR) registers */
12303 ha_copy = lpfc_sli4_eratt_read(phba);
12306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12307 "0299 Invalid SLI revision (%d)\n",
12312 spin_unlock_irq(&phba->hbalock);
12318 * lpfc_intr_state_check - Check device state for interrupt handling
12319 * @phba: Pointer to HBA context.
12321 * This inline routine checks whether a device or its PCI slot is in a state
12322 * that the interrupt should be handled.
12324 * This function returns 0 if the device or the PCI slot is in a state that
12325 * interrupt should be handled, otherwise -EIO.
12328 lpfc_intr_state_check(struct lpfc_hba *phba)
12330 /* If the pci channel is offline, ignore all the interrupts */
12331 if (unlikely(pci_channel_offline(phba->pcidev)))
12334 /* Update device level interrupt statistics */
12335 phba->sli.slistat.sli_intr++;
12337 /* Ignore all interrupts during initialization. */
12338 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12345 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
12346 * @irq: Interrupt number.
12347 * @dev_id: The device context pointer.
12349 * This function is directly called from the PCI layer as an interrupt
12350 * service routine when device with SLI-3 interface spec is enabled with
12351 * MSI-X multi-message interrupt mode and there are slow-path events in
12352 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12353 * interrupt mode, this function is called as part of the device-level
12354 * interrupt handler. When the PCI slot is in error recovery or the HBA
12355 * is undergoing initialization, the interrupt handler will not process
12356 * the interrupt. The link attention and ELS ring attention events are
12357 * handled by the worker thread. The interrupt handler signals the worker
12358 * thread and returns for these events. This function is called without
12359 * any lock held. It gets the hbalock to access and update SLI data
12362 * This function returns IRQ_HANDLED when interrupt is handled else it
12363 * returns IRQ_NONE.
12366 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12368 struct lpfc_hba *phba;
12369 uint32_t ha_copy, hc_copy;
12370 uint32_t work_ha_copy;
12371 unsigned long status;
12372 unsigned long iflag;
12375 MAILBOX_t *mbox, *pmbox;
12376 struct lpfc_vport *vport;
12377 struct lpfc_nodelist *ndlp;
12378 struct lpfc_dmabuf *mp;
12383 * Get the driver's phba structure from the dev_id and
12384 * assume the HBA is not interrupting.
12386 phba = (struct lpfc_hba *)dev_id;
12388 if (unlikely(!phba))
12392 * Stuff needs to be attented to when this function is invoked as an
12393 * individual interrupt handler in MSI-X multi-message interrupt mode
12395 if (phba->intr_type == MSIX) {
12396 /* Check device state for handling interrupt */
12397 if (lpfc_intr_state_check(phba))
12399 /* Need to read HA REG for slow-path events */
12400 spin_lock_irqsave(&phba->hbalock, iflag);
12401 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12403 /* If somebody is waiting to handle an eratt don't process it
12404 * here. The brdkill function will do this.
12406 if (phba->link_flag & LS_IGNORE_ERATT)
12407 ha_copy &= ~HA_ERATT;
12408 /* Check the need for handling ERATT in interrupt handler */
12409 if (ha_copy & HA_ERATT) {
12410 if (phba->hba_flag & HBA_ERATT_HANDLED)
12411 /* ERATT polling has handled ERATT */
12412 ha_copy &= ~HA_ERATT;
12414 /* Indicate interrupt handler handles ERATT */
12415 phba->hba_flag |= HBA_ERATT_HANDLED;
12419 * If there is deferred error attention, do not check for any
12422 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12423 spin_unlock_irqrestore(&phba->hbalock, iflag);
12427 /* Clear up only attention source related to slow-path */
12428 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12431 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12432 HC_LAINT_ENA | HC_ERINT_ENA),
12434 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12436 writel(hc_copy, phba->HCregaddr);
12437 readl(phba->HAregaddr); /* flush */
12438 spin_unlock_irqrestore(&phba->hbalock, iflag);
12440 ha_copy = phba->ha_copy;
12442 work_ha_copy = ha_copy & phba->work_ha_mask;
12444 if (work_ha_copy) {
12445 if (work_ha_copy & HA_LATT) {
12446 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12448 * Turn off Link Attention interrupts
12449 * until CLEAR_LA done
12451 spin_lock_irqsave(&phba->hbalock, iflag);
12452 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12453 if (lpfc_readl(phba->HCregaddr, &control))
12455 control &= ~HC_LAINT_ENA;
12456 writel(control, phba->HCregaddr);
12457 readl(phba->HCregaddr); /* flush */
12458 spin_unlock_irqrestore(&phba->hbalock, iflag);
12461 work_ha_copy &= ~HA_LATT;
12464 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12466 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12467 * the only slow ring.
12469 status = (work_ha_copy &
12470 (HA_RXMASK << (4*LPFC_ELS_RING)));
12471 status >>= (4*LPFC_ELS_RING);
12472 if (status & HA_RXMASK) {
12473 spin_lock_irqsave(&phba->hbalock, iflag);
12474 if (lpfc_readl(phba->HCregaddr, &control))
12477 lpfc_debugfs_slow_ring_trc(phba,
12478 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12480 (uint32_t)phba->sli.slistat.sli_intr);
12482 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12483 lpfc_debugfs_slow_ring_trc(phba,
12484 "ISR Disable ring:"
12485 "pwork:x%x hawork:x%x wait:x%x",
12486 phba->work_ha, work_ha_copy,
12487 (uint32_t)((unsigned long)
12488 &phba->work_waitq));
12491 ~(HC_R0INT_ENA << LPFC_ELS_RING);
12492 writel(control, phba->HCregaddr);
12493 readl(phba->HCregaddr); /* flush */
12496 lpfc_debugfs_slow_ring_trc(phba,
12497 "ISR slow ring: pwork:"
12498 "x%x hawork:x%x wait:x%x",
12499 phba->work_ha, work_ha_copy,
12500 (uint32_t)((unsigned long)
12501 &phba->work_waitq));
12503 spin_unlock_irqrestore(&phba->hbalock, iflag);
12506 spin_lock_irqsave(&phba->hbalock, iflag);
12507 if (work_ha_copy & HA_ERATT) {
12508 if (lpfc_sli_read_hs(phba))
12511 * Check if there is a deferred error condition
12514 if ((HS_FFER1 & phba->work_hs) &&
12515 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12516 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12518 phba->hba_flag |= DEFER_ERATT;
12519 /* Clear all interrupt enable conditions */
12520 writel(0, phba->HCregaddr);
12521 readl(phba->HCregaddr);
12525 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12526 pmb = phba->sli.mbox_active;
12527 pmbox = &pmb->u.mb;
12529 vport = pmb->vport;
12531 /* First check out the status word */
12532 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12533 if (pmbox->mbxOwner != OWN_HOST) {
12534 spin_unlock_irqrestore(&phba->hbalock, iflag);
12536 * Stray Mailbox Interrupt, mbxCommand <cmd>
12537 * mbxStatus <status>
12539 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12541 "(%d):0304 Stray Mailbox "
12542 "Interrupt mbxCommand x%x "
12544 (vport ? vport->vpi : 0),
12547 /* clear mailbox attention bit */
12548 work_ha_copy &= ~HA_MBATT;
12550 phba->sli.mbox_active = NULL;
12551 spin_unlock_irqrestore(&phba->hbalock, iflag);
12552 phba->last_completion_time = jiffies;
12553 del_timer(&phba->sli.mbox_tmo);
12554 if (pmb->mbox_cmpl) {
12555 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12557 if (pmb->out_ext_byte_len &&
12559 lpfc_sli_pcimem_bcopy(
12562 pmb->out_ext_byte_len);
12564 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12565 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12567 lpfc_debugfs_disc_trc(vport,
12568 LPFC_DISC_TRC_MBOX_VPORT,
12569 "MBOX dflt rpi: : "
12570 "status:x%x rpi:x%x",
12571 (uint32_t)pmbox->mbxStatus,
12572 pmbox->un.varWords[0], 0);
12574 if (!pmbox->mbxStatus) {
12575 mp = (struct lpfc_dmabuf *)
12577 ndlp = (struct lpfc_nodelist *)
12580 /* Reg_LOGIN of dflt RPI was
12581 * successful. new lets get
12582 * rid of the RPI using the
12583 * same mbox buffer.
12585 lpfc_unreg_login(phba,
12587 pmbox->un.varWords[0],
12590 lpfc_mbx_cmpl_dflt_rpi;
12592 pmb->ctx_ndlp = ndlp;
12593 pmb->vport = vport;
12594 rc = lpfc_sli_issue_mbox(phba,
12597 if (rc != MBX_BUSY)
12598 lpfc_printf_log(phba,
12600 LOG_MBOX | LOG_SLI,
12601 "0350 rc should have"
12602 "been MBX_BUSY\n");
12603 if (rc != MBX_NOT_FINISHED)
12604 goto send_current_mbox;
12608 &phba->pport->work_port_lock,
12610 phba->pport->work_port_events &=
12612 spin_unlock_irqrestore(
12613 &phba->pport->work_port_lock,
12615 lpfc_mbox_cmpl_put(phba, pmb);
12618 spin_unlock_irqrestore(&phba->hbalock, iflag);
12620 if ((work_ha_copy & HA_MBATT) &&
12621 (phba->sli.mbox_active == NULL)) {
12623 /* Process next mailbox command if there is one */
12625 rc = lpfc_sli_issue_mbox(phba, NULL,
12627 } while (rc == MBX_NOT_FINISHED);
12628 if (rc != MBX_SUCCESS)
12629 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12630 LOG_SLI, "0349 rc should be "
12634 spin_lock_irqsave(&phba->hbalock, iflag);
12635 phba->work_ha |= work_ha_copy;
12636 spin_unlock_irqrestore(&phba->hbalock, iflag);
12637 lpfc_worker_wake_up(phba);
12639 return IRQ_HANDLED;
12641 spin_unlock_irqrestore(&phba->hbalock, iflag);
12642 return IRQ_HANDLED;
12644 } /* lpfc_sli_sp_intr_handler */
12647 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
12648 * @irq: Interrupt number.
12649 * @dev_id: The device context pointer.
12651 * This function is directly called from the PCI layer as an interrupt
12652 * service routine when device with SLI-3 interface spec is enabled with
12653 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12654 * ring event in the HBA. However, when the device is enabled with either
12655 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12656 * device-level interrupt handler. When the PCI slot is in error recovery
12657 * or the HBA is undergoing initialization, the interrupt handler will not
12658 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12659 * the intrrupt context. This function is called without any lock held.
12660 * It gets the hbalock to access and update SLI data structures.
12662 * This function returns IRQ_HANDLED when interrupt is handled else it
12663 * returns IRQ_NONE.
12666 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12668 struct lpfc_hba *phba;
12670 unsigned long status;
12671 unsigned long iflag;
12672 struct lpfc_sli_ring *pring;
12674 /* Get the driver's phba structure from the dev_id and
12675 * assume the HBA is not interrupting.
12677 phba = (struct lpfc_hba *) dev_id;
12679 if (unlikely(!phba))
12683 * Stuff needs to be attented to when this function is invoked as an
12684 * individual interrupt handler in MSI-X multi-message interrupt mode
12686 if (phba->intr_type == MSIX) {
12687 /* Check device state for handling interrupt */
12688 if (lpfc_intr_state_check(phba))
12690 /* Need to read HA REG for FCP ring and other ring events */
12691 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12692 return IRQ_HANDLED;
12693 /* Clear up only attention source related to fast-path */
12694 spin_lock_irqsave(&phba->hbalock, iflag);
12696 * If there is deferred error attention, do not check for
12699 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12700 spin_unlock_irqrestore(&phba->hbalock, iflag);
12703 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12705 readl(phba->HAregaddr); /* flush */
12706 spin_unlock_irqrestore(&phba->hbalock, iflag);
12708 ha_copy = phba->ha_copy;
12711 * Process all events on FCP ring. Take the optimized path for FCP IO.
12713 ha_copy &= ~(phba->work_ha_mask);
12715 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12716 status >>= (4*LPFC_FCP_RING);
12717 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12718 if (status & HA_RXMASK)
12719 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12721 if (phba->cfg_multi_ring_support == 2) {
12723 * Process all events on extra ring. Take the optimized path
12724 * for extra ring IO.
12726 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12727 status >>= (4*LPFC_EXTRA_RING);
12728 if (status & HA_RXMASK) {
12729 lpfc_sli_handle_fast_ring_event(phba,
12730 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12734 return IRQ_HANDLED;
12735 } /* lpfc_sli_fp_intr_handler */
12738 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
12739 * @irq: Interrupt number.
12740 * @dev_id: The device context pointer.
12742 * This function is the HBA device-level interrupt handler to device with
12743 * SLI-3 interface spec, called from the PCI layer when either MSI or
12744 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12745 * requires driver attention. This function invokes the slow-path interrupt
12746 * attention handling function and fast-path interrupt attention handling
12747 * function in turn to process the relevant HBA attention events. This
12748 * function is called without any lock held. It gets the hbalock to access
12749 * and update SLI data structures.
12751 * This function returns IRQ_HANDLED when interrupt is handled, else it
12752 * returns IRQ_NONE.
12755 lpfc_sli_intr_handler(int irq, void *dev_id)
12757 struct lpfc_hba *phba;
12758 irqreturn_t sp_irq_rc, fp_irq_rc;
12759 unsigned long status1, status2;
12763 * Get the driver's phba structure from the dev_id and
12764 * assume the HBA is not interrupting.
12766 phba = (struct lpfc_hba *) dev_id;
12768 if (unlikely(!phba))
12771 /* Check device state for handling interrupt */
12772 if (lpfc_intr_state_check(phba))
12775 spin_lock(&phba->hbalock);
12776 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12777 spin_unlock(&phba->hbalock);
12778 return IRQ_HANDLED;
12781 if (unlikely(!phba->ha_copy)) {
12782 spin_unlock(&phba->hbalock);
12784 } else if (phba->ha_copy & HA_ERATT) {
12785 if (phba->hba_flag & HBA_ERATT_HANDLED)
12786 /* ERATT polling has handled ERATT */
12787 phba->ha_copy &= ~HA_ERATT;
12789 /* Indicate interrupt handler handles ERATT */
12790 phba->hba_flag |= HBA_ERATT_HANDLED;
12794 * If there is deferred error attention, do not check for any interrupt.
12796 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12797 spin_unlock(&phba->hbalock);
12801 /* Clear attention sources except link and error attentions */
12802 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12803 spin_unlock(&phba->hbalock);
12804 return IRQ_HANDLED;
12806 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12807 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12809 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
12810 writel(hc_copy, phba->HCregaddr);
12811 readl(phba->HAregaddr); /* flush */
12812 spin_unlock(&phba->hbalock);
12815 * Invokes slow-path host attention interrupt handling as appropriate.
12818 /* status of events with mailbox and link attention */
12819 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12821 /* status of events with ELS ring */
12822 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12823 status2 >>= (4*LPFC_ELS_RING);
12825 if (status1 || (status2 & HA_RXMASK))
12826 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
12828 sp_irq_rc = IRQ_NONE;
12831 * Invoke fast-path host attention interrupt handling as appropriate.
12834 /* status of events with FCP ring */
12835 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12836 status1 >>= (4*LPFC_FCP_RING);
12838 /* status of events with extra ring */
12839 if (phba->cfg_multi_ring_support == 2) {
12840 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12841 status2 >>= (4*LPFC_EXTRA_RING);
12845 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
12846 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
12848 fp_irq_rc = IRQ_NONE;
12850 /* Return device-level interrupt handling status */
12851 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
12852 } /* lpfc_sli_intr_handler */
12855 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12856 * @phba: pointer to lpfc hba data structure.
12858 * This routine is invoked by the worker thread to process all the pending
12859 * SLI4 els abort xri events.
12861 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12863 struct lpfc_cq_event *cq_event;
12865 /* First, declare the els xri abort event has been handled */
12866 spin_lock_irq(&phba->hbalock);
12867 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12868 spin_unlock_irq(&phba->hbalock);
12869 /* Now, handle all the els xri abort events */
12870 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12871 /* Get the first event from the head of the event queue */
12872 spin_lock_irq(&phba->hbalock);
12873 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12874 cq_event, struct lpfc_cq_event, list);
12875 spin_unlock_irq(&phba->hbalock);
12876 /* Notify aborted XRI for ELS work queue */
12877 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12878 /* Free the event processed back to the free pool */
12879 lpfc_sli4_cq_event_release(phba, cq_event);
12884 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12885 * @phba: pointer to lpfc hba data structure
12886 * @pIocbIn: pointer to the rspiocbq
12887 * @pIocbOut: pointer to the cmdiocbq
12888 * @wcqe: pointer to the complete wcqe
12890 * This routine transfers the fields of a command iocbq to a response iocbq
12891 * by copying all the IOCB fields from command iocbq and transferring the
12892 * completion status information from the complete wcqe.
12895 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12896 struct lpfc_iocbq *pIocbIn,
12897 struct lpfc_iocbq *pIocbOut,
12898 struct lpfc_wcqe_complete *wcqe)
12901 unsigned long iflags;
12902 uint32_t status, max_response;
12903 struct lpfc_dmabuf *dmabuf;
12904 struct ulp_bde64 *bpl, bde;
12905 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12907 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12908 sizeof(struct lpfc_iocbq) - offset);
12909 /* Map WCQE parameters into irspiocb parameters */
12910 status = bf_get(lpfc_wcqe_c_status, wcqe);
12911 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
12912 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12913 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12914 pIocbIn->iocb.un.fcpi.fcpi_parm =
12915 pIocbOut->iocb.un.fcpi.fcpi_parm -
12916 wcqe->total_data_placed;
12918 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12920 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12921 switch (pIocbOut->iocb.ulpCommand) {
12922 case CMD_ELS_REQUEST64_CR:
12923 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12924 bpl = (struct ulp_bde64 *)dmabuf->virt;
12925 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12926 max_response = bde.tus.f.bdeSize;
12928 case CMD_GEN_REQUEST64_CR:
12930 if (!pIocbOut->context3)
12932 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12933 sizeof(struct ulp_bde64);
12934 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12935 bpl = (struct ulp_bde64 *)dmabuf->virt;
12936 for (i = 0; i < numBdes; i++) {
12937 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12938 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12939 max_response += bde.tus.f.bdeSize;
12943 max_response = wcqe->total_data_placed;
12946 if (max_response < wcqe->total_data_placed)
12947 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12949 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12950 wcqe->total_data_placed;
12953 /* Convert BG errors for completion status */
12954 if (status == CQE_STATUS_DI_ERROR) {
12955 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
12957 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
12958 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
12960 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
12962 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
12963 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
12964 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12965 BGS_GUARD_ERR_MASK;
12966 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
12967 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12968 BGS_APPTAG_ERR_MASK;
12969 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
12970 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12971 BGS_REFTAG_ERR_MASK;
12973 /* Check to see if there was any good data before the error */
12974 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
12975 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12976 BGS_HI_WATER_MARK_PRESENT_MASK;
12977 pIocbIn->iocb.unsli3.sli3_bg.bghm =
12978 wcqe->total_data_placed;
12982 * Set ALL the error bits to indicate we don't know what
12983 * type of error it is.
12985 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
12986 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12987 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
12988 BGS_GUARD_ERR_MASK);
12991 /* Pick up HBA exchange busy condition */
12992 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
12993 spin_lock_irqsave(&phba->hbalock, iflags);
12994 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
12995 spin_unlock_irqrestore(&phba->hbalock, iflags);
13000 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13001 * @phba: Pointer to HBA context object.
13002 * @wcqe: Pointer to work-queue completion queue entry.
13004 * This routine handles an ELS work-queue completion event and construct
13005 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13006 * discovery engine to handle.
13008 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13010 static struct lpfc_iocbq *
13011 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13012 struct lpfc_iocbq *irspiocbq)
13014 struct lpfc_sli_ring *pring;
13015 struct lpfc_iocbq *cmdiocbq;
13016 struct lpfc_wcqe_complete *wcqe;
13017 unsigned long iflags;
13019 pring = lpfc_phba_elsring(phba);
13020 if (unlikely(!pring))
13023 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13024 pring->stats.iocb_event++;
13025 /* Look up the ELS command IOCB and create pseudo response IOCB */
13026 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13027 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13028 if (unlikely(!cmdiocbq)) {
13029 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13030 "0386 ELS complete with no corresponding "
13031 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13032 wcqe->word0, wcqe->total_data_placed,
13033 wcqe->parameter, wcqe->word3);
13034 lpfc_sli_release_iocbq(phba, irspiocbq);
13038 spin_lock_irqsave(&pring->ring_lock, iflags);
13039 /* Put the iocb back on the txcmplq */
13040 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13041 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13043 /* Fake the irspiocbq and copy necessary response information */
13044 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13049 inline struct lpfc_cq_event *
13050 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13052 struct lpfc_cq_event *cq_event;
13054 /* Allocate a new internal CQ_EVENT entry */
13055 cq_event = lpfc_sli4_cq_event_alloc(phba);
13057 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13058 "0602 Failed to alloc CQ_EVENT entry\n");
13062 /* Move the CQE into the event */
13063 memcpy(&cq_event->cqe, entry, size);
13068 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
13069 * @phba: Pointer to HBA context object.
13070 * @cqe: Pointer to mailbox completion queue entry.
13072 * This routine process a mailbox completion queue entry with asynchrous
13075 * Return: true if work posted to worker thread, otherwise false.
13078 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13080 struct lpfc_cq_event *cq_event;
13081 unsigned long iflags;
13083 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13084 "0392 Async Event: word0:x%x, word1:x%x, "
13085 "word2:x%x, word3:x%x\n", mcqe->word0,
13086 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13088 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13091 spin_lock_irqsave(&phba->hbalock, iflags);
13092 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13093 /* Set the async event flag */
13094 phba->hba_flag |= ASYNC_EVENT;
13095 spin_unlock_irqrestore(&phba->hbalock, iflags);
13101 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13102 * @phba: Pointer to HBA context object.
13103 * @cqe: Pointer to mailbox completion queue entry.
13105 * This routine process a mailbox completion queue entry with mailbox
13106 * completion event.
13108 * Return: true if work posted to worker thread, otherwise false.
13111 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13113 uint32_t mcqe_status;
13114 MAILBOX_t *mbox, *pmbox;
13115 struct lpfc_mqe *mqe;
13116 struct lpfc_vport *vport;
13117 struct lpfc_nodelist *ndlp;
13118 struct lpfc_dmabuf *mp;
13119 unsigned long iflags;
13121 bool workposted = false;
13124 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13125 if (!bf_get(lpfc_trailer_completed, mcqe))
13126 goto out_no_mqe_complete;
13128 /* Get the reference to the active mbox command */
13129 spin_lock_irqsave(&phba->hbalock, iflags);
13130 pmb = phba->sli.mbox_active;
13131 if (unlikely(!pmb)) {
13132 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13133 "1832 No pending MBOX command to handle\n");
13134 spin_unlock_irqrestore(&phba->hbalock, iflags);
13135 goto out_no_mqe_complete;
13137 spin_unlock_irqrestore(&phba->hbalock, iflags);
13139 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13141 vport = pmb->vport;
13143 /* Reset heartbeat timer */
13144 phba->last_completion_time = jiffies;
13145 del_timer(&phba->sli.mbox_tmo);
13147 /* Move mbox data to caller's mailbox region, do endian swapping */
13148 if (pmb->mbox_cmpl && mbox)
13149 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13152 * For mcqe errors, conditionally move a modified error code to
13153 * the mbox so that the error will not be missed.
13155 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13156 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13157 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13158 bf_set(lpfc_mqe_status, mqe,
13159 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13161 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13162 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13163 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13164 "MBOX dflt rpi: status:x%x rpi:x%x",
13166 pmbox->un.varWords[0], 0);
13167 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13168 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13169 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13170 /* Reg_LOGIN of dflt RPI was successful. Now lets get
13171 * RID of the PPI using the same mbox buffer.
13173 lpfc_unreg_login(phba, vport->vpi,
13174 pmbox->un.varWords[0], pmb);
13175 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13177 pmb->ctx_ndlp = ndlp;
13178 pmb->vport = vport;
13179 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13180 if (rc != MBX_BUSY)
13181 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
13182 LOG_SLI, "0385 rc should "
13183 "have been MBX_BUSY\n");
13184 if (rc != MBX_NOT_FINISHED)
13185 goto send_current_mbox;
13188 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13189 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13190 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13192 /* There is mailbox completion work to do */
13193 spin_lock_irqsave(&phba->hbalock, iflags);
13194 __lpfc_mbox_cmpl_put(phba, pmb);
13195 phba->work_ha |= HA_MBATT;
13196 spin_unlock_irqrestore(&phba->hbalock, iflags);
13200 spin_lock_irqsave(&phba->hbalock, iflags);
13201 /* Release the mailbox command posting token */
13202 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13203 /* Setting active mailbox pointer need to be in sync to flag clear */
13204 phba->sli.mbox_active = NULL;
13205 spin_unlock_irqrestore(&phba->hbalock, iflags);
13206 /* Wake up worker thread to post the next pending mailbox command */
13207 lpfc_worker_wake_up(phba);
13208 out_no_mqe_complete:
13209 if (bf_get(lpfc_trailer_consumed, mcqe))
13210 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13215 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13216 * @phba: Pointer to HBA context object.
13217 * @cqe: Pointer to mailbox completion queue entry.
13219 * This routine process a mailbox completion queue entry, it invokes the
13220 * proper mailbox complete handling or asynchrous event handling routine
13221 * according to the MCQE's async bit.
13223 * Return: true if work posted to worker thread, otherwise false.
13226 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13227 struct lpfc_cqe *cqe)
13229 struct lpfc_mcqe mcqe;
13234 /* Copy the mailbox MCQE and convert endian order as needed */
13235 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13237 /* Invoke the proper event handling routine */
13238 if (!bf_get(lpfc_trailer_async, &mcqe))
13239 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13241 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13246 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13247 * @phba: Pointer to HBA context object.
13248 * @cq: Pointer to associated CQ
13249 * @wcqe: Pointer to work-queue completion queue entry.
13251 * This routine handles an ELS work-queue completion event.
13253 * Return: true if work posted to worker thread, otherwise false.
13256 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13257 struct lpfc_wcqe_complete *wcqe)
13259 struct lpfc_iocbq *irspiocbq;
13260 unsigned long iflags;
13261 struct lpfc_sli_ring *pring = cq->pring;
13263 int txcmplq_cnt = 0;
13264 int fcp_txcmplq_cnt = 0;
13266 /* Check for response status */
13267 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13268 /* Log the error status */
13269 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13270 "0357 ELS CQE error: status=x%x: "
13271 "CQE: %08x %08x %08x %08x\n",
13272 bf_get(lpfc_wcqe_c_status, wcqe),
13273 wcqe->word0, wcqe->total_data_placed,
13274 wcqe->parameter, wcqe->word3);
13277 /* Get an irspiocbq for later ELS response processing use */
13278 irspiocbq = lpfc_sli_get_iocbq(phba);
13280 if (!list_empty(&pring->txq))
13282 if (!list_empty(&pring->txcmplq))
13284 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13285 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13286 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
13287 txq_cnt, phba->iocb_cnt,
13293 /* Save off the slow-path queue event for work thread to process */
13294 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13295 spin_lock_irqsave(&phba->hbalock, iflags);
13296 list_add_tail(&irspiocbq->cq_event.list,
13297 &phba->sli4_hba.sp_queue_event);
13298 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13299 spin_unlock_irqrestore(&phba->hbalock, iflags);
13305 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13306 * @phba: Pointer to HBA context object.
13307 * @wcqe: Pointer to work-queue completion queue entry.
13309 * This routine handles slow-path WQ entry consumed event by invoking the
13310 * proper WQ release routine to the slow-path WQ.
13313 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13314 struct lpfc_wcqe_release *wcqe)
13316 /* sanity check on queue memory */
13317 if (unlikely(!phba->sli4_hba.els_wq))
13319 /* Check for the slow-path ELS work queue */
13320 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13321 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13322 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13324 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13325 "2579 Slow-path wqe consume event carries "
13326 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13327 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13328 phba->sli4_hba.els_wq->queue_id);
13332 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13333 * @phba: Pointer to HBA context object.
13334 * @cq: Pointer to a WQ completion queue.
13335 * @wcqe: Pointer to work-queue completion queue entry.
13337 * This routine handles an XRI abort event.
13339 * Return: true if work posted to worker thread, otherwise false.
13342 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13343 struct lpfc_queue *cq,
13344 struct sli4_wcqe_xri_aborted *wcqe)
13346 bool workposted = false;
13347 struct lpfc_cq_event *cq_event;
13348 unsigned long iflags;
13350 switch (cq->subtype) {
13352 lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq);
13353 workposted = false;
13355 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
13357 cq_event = lpfc_cq_event_setup(
13358 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13361 cq_event->hdwq = cq->hdwq;
13362 spin_lock_irqsave(&phba->hbalock, iflags);
13363 list_add_tail(&cq_event->list,
13364 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13365 /* Set the els xri abort event flag */
13366 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13367 spin_unlock_irqrestore(&phba->hbalock, iflags);
13371 /* Notify aborted XRI for NVME work queue */
13372 if (phba->nvmet_support)
13373 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13375 lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq);
13377 workposted = false;
13380 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13381 "0603 Invalid CQ subtype %d: "
13382 "%08x %08x %08x %08x\n",
13383 cq->subtype, wcqe->word0, wcqe->parameter,
13384 wcqe->word2, wcqe->word3);
13385 workposted = false;
13391 #define FC_RCTL_MDS_DIAGS 0xF4
13394 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13395 * @phba: Pointer to HBA context object.
13396 * @rcqe: Pointer to receive-queue completion queue entry.
13398 * This routine process a receive-queue completion queue entry.
13400 * Return: true if work posted to worker thread, otherwise false.
13403 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13405 bool workposted = false;
13406 struct fc_frame_header *fc_hdr;
13407 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13408 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13409 struct lpfc_nvmet_tgtport *tgtp;
13410 struct hbq_dmabuf *dma_buf;
13411 uint32_t status, rq_id;
13412 unsigned long iflags;
13414 /* sanity check on queue memory */
13415 if (unlikely(!hrq) || unlikely(!drq))
13418 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13419 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13421 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13422 if (rq_id != hrq->queue_id)
13425 status = bf_get(lpfc_rcqe_status, rcqe);
13427 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13428 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13429 "2537 Receive Frame Truncated!!\n");
13431 case FC_STATUS_RQ_SUCCESS:
13432 spin_lock_irqsave(&phba->hbalock, iflags);
13433 lpfc_sli4_rq_release(hrq, drq);
13434 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13436 hrq->RQ_no_buf_found++;
13437 spin_unlock_irqrestore(&phba->hbalock, iflags);
13441 hrq->RQ_buf_posted--;
13442 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13444 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13446 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13447 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13448 spin_unlock_irqrestore(&phba->hbalock, iflags);
13449 /* Handle MDS Loopback frames */
13450 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
13454 /* save off the frame for the work thread to process */
13455 list_add_tail(&dma_buf->cq_event.list,
13456 &phba->sli4_hba.sp_queue_event);
13457 /* Frame received */
13458 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13459 spin_unlock_irqrestore(&phba->hbalock, iflags);
13462 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13463 if (phba->nvmet_support) {
13464 tgtp = phba->targetport->private;
13465 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13466 "6402 RQE Error x%x, posted %d err_cnt "
13468 status, hrq->RQ_buf_posted,
13469 hrq->RQ_no_posted_buf,
13470 atomic_read(&tgtp->rcv_fcp_cmd_in),
13471 atomic_read(&tgtp->rcv_fcp_cmd_out),
13472 atomic_read(&tgtp->xmt_fcp_release));
13476 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13477 hrq->RQ_no_posted_buf++;
13478 /* Post more buffers if possible */
13479 spin_lock_irqsave(&phba->hbalock, iflags);
13480 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13481 spin_unlock_irqrestore(&phba->hbalock, iflags);
13490 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13491 * @phba: Pointer to HBA context object.
13492 * @cq: Pointer to the completion queue.
13493 * @cqe: Pointer to a completion queue entry.
13495 * This routine process a slow-path work-queue or receive queue completion queue
13498 * Return: true if work posted to worker thread, otherwise false.
13501 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13502 struct lpfc_cqe *cqe)
13504 struct lpfc_cqe cqevt;
13505 bool workposted = false;
13507 /* Copy the work queue CQE and convert endian order if needed */
13508 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13510 /* Check and process for different type of WCQE and dispatch */
13511 switch (bf_get(lpfc_cqe_code, &cqevt)) {
13512 case CQE_CODE_COMPL_WQE:
13513 /* Process the WQ/RQ complete event */
13514 phba->last_completion_time = jiffies;
13515 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13516 (struct lpfc_wcqe_complete *)&cqevt);
13518 case CQE_CODE_RELEASE_WQE:
13519 /* Process the WQ release event */
13520 lpfc_sli4_sp_handle_rel_wcqe(phba,
13521 (struct lpfc_wcqe_release *)&cqevt);
13523 case CQE_CODE_XRI_ABORTED:
13524 /* Process the WQ XRI abort event */
13525 phba->last_completion_time = jiffies;
13526 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13527 (struct sli4_wcqe_xri_aborted *)&cqevt);
13529 case CQE_CODE_RECEIVE:
13530 case CQE_CODE_RECEIVE_V1:
13531 /* Process the RQ event */
13532 phba->last_completion_time = jiffies;
13533 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13534 (struct lpfc_rcqe *)&cqevt);
13537 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13538 "0388 Not a valid WCQE code: x%x\n",
13539 bf_get(lpfc_cqe_code, &cqevt));
13546 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13547 * @phba: Pointer to HBA context object.
13548 * @eqe: Pointer to fast-path event queue entry.
13550 * This routine process a event queue entry from the slow-path event queue.
13551 * It will check the MajorCode and MinorCode to determine this is for a
13552 * completion event on a completion queue, if not, an error shall be logged
13553 * and just return. Otherwise, it will get to the corresponding completion
13554 * queue and process all the entries on that completion queue, rearm the
13555 * completion queue, and then return.
13559 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13560 struct lpfc_queue *speq)
13562 struct lpfc_queue *cq = NULL, *childq;
13565 /* Get the reference to the corresponding CQ */
13566 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13568 list_for_each_entry(childq, &speq->child_list, list) {
13569 if (childq->queue_id == cqid) {
13574 if (unlikely(!cq)) {
13575 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13576 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13577 "0365 Slow-path CQ identifier "
13578 "(%d) does not exist\n", cqid);
13582 /* Save EQ associated with this CQ */
13583 cq->assoc_qp = speq;
13585 if (!queue_work_on(cq->chann, phba->wq, &cq->spwork))
13586 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13587 "0390 Cannot schedule soft IRQ "
13588 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13589 cqid, cq->queue_id, raw_smp_processor_id());
13593 * __lpfc_sli4_process_cq - Process elements of a CQ
13594 * @phba: Pointer to HBA context object.
13595 * @cq: Pointer to CQ to be processed
13596 * @handler: Routine to process each cqe
13597 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
13599 * This routine processes completion queue entries in a CQ. While a valid
13600 * queue element is found, the handler is called. During processing checks
13601 * are made for periodic doorbell writes to let the hardware know of
13602 * element consumption.
13604 * If the max limit on cqes to process is hit, or there are no more valid
13605 * entries, the loop stops. If we processed a sufficient number of elements,
13606 * meaning there is sufficient load, rather than rearming and generating
13607 * another interrupt, a cq rescheduling delay will be set. A delay of 0
13608 * indicates no rescheduling.
13610 * Returns True if work scheduled, False otherwise.
13613 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13614 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13615 struct lpfc_cqe *), unsigned long *delay)
13617 struct lpfc_cqe *cqe;
13618 bool workposted = false;
13619 int count = 0, consumed = 0;
13622 /* default - no reschedule */
13625 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13626 goto rearm_and_exit;
13628 /* Process all the entries to the CQ */
13630 cqe = lpfc_sli4_cq_get(cq);
13632 workposted |= handler(phba, cq, cqe);
13633 __lpfc_sli4_consume_cqe(phba, cq, cqe);
13636 if (!(++count % cq->max_proc_limit))
13639 if (!(count % cq->notify_interval)) {
13640 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13645 if (count == LPFC_NVMET_CQ_NOTIFY)
13646 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
13648 cqe = lpfc_sli4_cq_get(cq);
13650 if (count >= phba->cfg_cq_poll_threshold) {
13655 /* Track the max number of CQEs processed in 1 EQ */
13656 if (count > cq->CQ_max_cqe)
13657 cq->CQ_max_cqe = count;
13659 cq->assoc_qp->EQ_cqe_cnt += count;
13661 /* Catch the no cq entry condition */
13662 if (unlikely(count == 0))
13663 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13664 "0369 No entry from completion queue "
13665 "qid=%d\n", cq->queue_id);
13667 cq->queue_claimed = 0;
13670 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13671 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13677 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13678 * @cq: pointer to CQ to process
13680 * This routine calls the cq processing routine with a handler specific
13681 * to the type of queue bound to it.
13683 * The CQ routine returns two values: the first is the calling status,
13684 * which indicates whether work was queued to the background discovery
13685 * thread. If true, the routine should wakeup the discovery thread;
13686 * the second is the delay parameter. If non-zero, rather than rearming
13687 * the CQ and yet another interrupt, the CQ handler should be queued so
13688 * that it is processed in a subsequent polling action. The value of
13689 * the delay indicates when to reschedule it.
13692 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13694 struct lpfc_hba *phba = cq->phba;
13695 unsigned long delay;
13696 bool workposted = false;
13698 /* Process and rearm the CQ */
13699 switch (cq->type) {
13701 workposted |= __lpfc_sli4_process_cq(phba, cq,
13702 lpfc_sli4_sp_handle_mcqe,
13706 if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME)
13707 workposted |= __lpfc_sli4_process_cq(phba, cq,
13708 lpfc_sli4_fp_handle_cqe,
13711 workposted |= __lpfc_sli4_process_cq(phba, cq,
13712 lpfc_sli4_sp_handle_cqe,
13716 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13717 "0370 Invalid completion queue type (%d)\n",
13723 if (!queue_delayed_work_on(cq->chann, phba->wq,
13724 &cq->sched_spwork, delay))
13725 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13726 "0394 Cannot schedule soft IRQ "
13727 "for cqid=%d on CPU %d\n",
13728 cq->queue_id, cq->chann);
13731 /* wake up worker thread if there are works to be done */
13733 lpfc_worker_wake_up(phba);
13737 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
13739 * @work: pointer to work element
13741 * translates from the work handler and calls the slow-path handler.
13744 lpfc_sli4_sp_process_cq(struct work_struct *work)
13746 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
13748 __lpfc_sli4_sp_process_cq(cq);
13752 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
13753 * @work: pointer to work element
13755 * translates from the work handler and calls the slow-path handler.
13758 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
13760 struct lpfc_queue *cq = container_of(to_delayed_work(work),
13761 struct lpfc_queue, sched_spwork);
13763 __lpfc_sli4_sp_process_cq(cq);
13767 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
13768 * @phba: Pointer to HBA context object.
13769 * @cq: Pointer to associated CQ
13770 * @wcqe: Pointer to work-queue completion queue entry.
13772 * This routine process a fast-path work queue completion entry from fast-path
13773 * event queue for FCP command response completion.
13776 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13777 struct lpfc_wcqe_complete *wcqe)
13779 struct lpfc_sli_ring *pring = cq->pring;
13780 struct lpfc_iocbq *cmdiocbq;
13781 struct lpfc_iocbq irspiocbq;
13782 unsigned long iflags;
13784 /* Check for response status */
13785 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13786 /* If resource errors reported from HBA, reduce queue
13787 * depth of the SCSI device.
13789 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13790 IOSTAT_LOCAL_REJECT)) &&
13791 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13792 IOERR_NO_RESOURCES))
13793 phba->lpfc_rampdown_queue_depth(phba);
13795 /* Log the error status */
13796 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13797 "0373 FCP CQE error: status=x%x: "
13798 "CQE: %08x %08x %08x %08x\n",
13799 bf_get(lpfc_wcqe_c_status, wcqe),
13800 wcqe->word0, wcqe->total_data_placed,
13801 wcqe->parameter, wcqe->word3);
13804 /* Look up the FCP command IOCB and create pseudo response IOCB */
13805 spin_lock_irqsave(&pring->ring_lock, iflags);
13806 pring->stats.iocb_event++;
13807 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13808 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13809 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13810 if (unlikely(!cmdiocbq)) {
13811 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13812 "0374 FCP complete with no corresponding "
13813 "cmdiocb: iotag (%d)\n",
13814 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13817 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13818 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13820 if (cmdiocbq->iocb_cmpl == NULL) {
13821 if (cmdiocbq->wqe_cmpl) {
13822 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13823 spin_lock_irqsave(&phba->hbalock, iflags);
13824 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13825 spin_unlock_irqrestore(&phba->hbalock, iflags);
13828 /* Pass the cmd_iocb and the wcqe to the upper layer */
13829 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13832 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13833 "0375 FCP cmdiocb not callback function "
13835 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13839 /* Fake the irspiocb and copy necessary response information */
13840 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
13842 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13843 spin_lock_irqsave(&phba->hbalock, iflags);
13844 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13845 spin_unlock_irqrestore(&phba->hbalock, iflags);
13848 /* Pass the cmd_iocb and the rsp state to the upper layer */
13849 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13853 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13854 * @phba: Pointer to HBA context object.
13855 * @cq: Pointer to completion queue.
13856 * @wcqe: Pointer to work-queue completion queue entry.
13858 * This routine handles an fast-path WQ entry consumed event by invoking the
13859 * proper WQ release routine to the slow-path WQ.
13862 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13863 struct lpfc_wcqe_release *wcqe)
13865 struct lpfc_queue *childwq;
13866 bool wqid_matched = false;
13869 /* Check for fast-path FCP work queue release */
13870 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
13871 list_for_each_entry(childwq, &cq->child_list, list) {
13872 if (childwq->queue_id == hba_wqid) {
13873 lpfc_sli4_wq_release(childwq,
13874 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13875 if (childwq->q_flag & HBA_NVMET_WQFULL)
13876 lpfc_nvmet_wqfull_process(phba, childwq);
13877 wqid_matched = true;
13881 /* Report warning log message if no match found */
13882 if (wqid_matched != true)
13883 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13884 "2580 Fast-path wqe consume event carries "
13885 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
13889 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13890 * @phba: Pointer to HBA context object.
13891 * @rcqe: Pointer to receive-queue completion queue entry.
13893 * This routine process a receive-queue completion queue entry.
13895 * Return: true if work posted to worker thread, otherwise false.
13898 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13899 struct lpfc_rcqe *rcqe)
13901 bool workposted = false;
13902 struct lpfc_queue *hrq;
13903 struct lpfc_queue *drq;
13904 struct rqb_dmabuf *dma_buf;
13905 struct fc_frame_header *fc_hdr;
13906 struct lpfc_nvmet_tgtport *tgtp;
13907 uint32_t status, rq_id;
13908 unsigned long iflags;
13909 uint32_t fctl, idx;
13911 if ((phba->nvmet_support == 0) ||
13912 (phba->sli4_hba.nvmet_cqset == NULL))
13915 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13916 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13917 drq = phba->sli4_hba.nvmet_mrq_data[idx];
13919 /* sanity check on queue memory */
13920 if (unlikely(!hrq) || unlikely(!drq))
13923 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13924 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13926 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13928 if ((phba->nvmet_support == 0) ||
13929 (rq_id != hrq->queue_id))
13932 status = bf_get(lpfc_rcqe_status, rcqe);
13934 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13935 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13936 "6126 Receive Frame Truncated!!\n");
13938 case FC_STATUS_RQ_SUCCESS:
13939 spin_lock_irqsave(&phba->hbalock, iflags);
13940 lpfc_sli4_rq_release(hrq, drq);
13941 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13943 hrq->RQ_no_buf_found++;
13944 spin_unlock_irqrestore(&phba->hbalock, iflags);
13947 spin_unlock_irqrestore(&phba->hbalock, iflags);
13949 hrq->RQ_buf_posted--;
13950 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13952 /* Just some basic sanity checks on FCP Command frame */
13953 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13954 fc_hdr->fh_f_ctl[1] << 8 |
13955 fc_hdr->fh_f_ctl[2]);
13957 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13958 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13959 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
13962 if (fc_hdr->fh_type == FC_TYPE_FCP) {
13963 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
13964 lpfc_nvmet_unsol_fcp_event(
13965 phba, idx, dma_buf, cq->isr_timestamp,
13966 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
13970 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
13972 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13973 if (phba->nvmet_support) {
13974 tgtp = phba->targetport->private;
13975 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13976 "6401 RQE Error x%x, posted %d err_cnt "
13978 status, hrq->RQ_buf_posted,
13979 hrq->RQ_no_posted_buf,
13980 atomic_read(&tgtp->rcv_fcp_cmd_in),
13981 atomic_read(&tgtp->rcv_fcp_cmd_out),
13982 atomic_read(&tgtp->xmt_fcp_release));
13986 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13987 hrq->RQ_no_posted_buf++;
13988 /* Post more buffers if possible */
13996 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
13997 * @phba: adapter with cq
13998 * @cq: Pointer to the completion queue.
13999 * @eqe: Pointer to fast-path completion queue entry.
14001 * This routine process a fast-path work queue completion entry from fast-path
14002 * event queue for FCP command response completion.
14004 * Return: true if work posted to worker thread, otherwise false.
14007 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14008 struct lpfc_cqe *cqe)
14010 struct lpfc_wcqe_release wcqe;
14011 bool workposted = false;
14013 /* Copy the work queue CQE and convert endian order if needed */
14014 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14016 /* Check and process for different type of WCQE and dispatch */
14017 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14018 case CQE_CODE_COMPL_WQE:
14019 case CQE_CODE_NVME_ERSP:
14021 /* Process the WQ complete event */
14022 phba->last_completion_time = jiffies;
14023 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
14024 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14025 (struct lpfc_wcqe_complete *)&wcqe);
14026 if (cq->subtype == LPFC_NVME_LS)
14027 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14028 (struct lpfc_wcqe_complete *)&wcqe);
14030 case CQE_CODE_RELEASE_WQE:
14031 cq->CQ_release_wqe++;
14032 /* Process the WQ release event */
14033 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14034 (struct lpfc_wcqe_release *)&wcqe);
14036 case CQE_CODE_XRI_ABORTED:
14037 cq->CQ_xri_aborted++;
14038 /* Process the WQ XRI abort event */
14039 phba->last_completion_time = jiffies;
14040 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14041 (struct sli4_wcqe_xri_aborted *)&wcqe);
14043 case CQE_CODE_RECEIVE_V1:
14044 case CQE_CODE_RECEIVE:
14045 phba->last_completion_time = jiffies;
14046 if (cq->subtype == LPFC_NVMET) {
14047 workposted = lpfc_sli4_nvmet_handle_rcqe(
14048 phba, cq, (struct lpfc_rcqe *)&wcqe);
14052 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14053 "0144 Not a valid CQE code: x%x\n",
14054 bf_get(lpfc_wcqe_c_code, &wcqe));
14061 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
14062 * @phba: Pointer to HBA context object.
14063 * @eqe: Pointer to fast-path event queue entry.
14065 * This routine process a event queue entry from the fast-path event queue.
14066 * It will check the MajorCode and MinorCode to determine this is for a
14067 * completion event on a completion queue, if not, an error shall be logged
14068 * and just return. Otherwise, it will get to the corresponding completion
14069 * queue and process all the entries on the completion queue, rearm the
14070 * completion queue, and then return.
14073 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14074 struct lpfc_eqe *eqe)
14076 struct lpfc_queue *cq = NULL;
14077 uint32_t qidx = eq->hdwq;
14080 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14081 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14082 "0366 Not a valid completion "
14083 "event: majorcode=x%x, minorcode=x%x\n",
14084 bf_get_le32(lpfc_eqe_major_code, eqe),
14085 bf_get_le32(lpfc_eqe_minor_code, eqe));
14089 /* Get the reference to the corresponding CQ */
14090 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14092 /* Use the fast lookup method first */
14093 if (cqid <= phba->sli4_hba.cq_max) {
14094 cq = phba->sli4_hba.cq_lookup[cqid];
14099 /* Next check for NVMET completion */
14100 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14101 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14102 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14103 /* Process NVMET unsol rcv */
14104 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14109 if (phba->sli4_hba.nvmels_cq &&
14110 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14111 /* Process NVME unsol rcv */
14112 cq = phba->sli4_hba.nvmels_cq;
14115 /* Otherwise this is a Slow path event */
14117 lpfc_sli4_sp_handle_eqe(phba, eqe,
14118 phba->sli4_hba.hdwq[qidx].hba_eq);
14123 if (unlikely(cqid != cq->queue_id)) {
14124 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14125 "0368 Miss-matched fast-path completion "
14126 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14127 cqid, cq->queue_id);
14132 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14133 if (phba->ktime_on)
14134 cq->isr_timestamp = ktime_get_ns();
14136 cq->isr_timestamp = 0;
14138 if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
14139 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14140 "0363 Cannot schedule soft IRQ "
14141 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14142 cqid, cq->queue_id, raw_smp_processor_id());
14146 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14147 * @cq: Pointer to CQ to be processed
14149 * This routine calls the cq processing routine with the handler for
14152 * The CQ routine returns two values: the first is the calling status,
14153 * which indicates whether work was queued to the background discovery
14154 * thread. If true, the routine should wakeup the discovery thread;
14155 * the second is the delay parameter. If non-zero, rather than rearming
14156 * the CQ and yet another interrupt, the CQ handler should be queued so
14157 * that it is processed in a subsequent polling action. The value of
14158 * the delay indicates when to reschedule it.
14161 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
14163 struct lpfc_hba *phba = cq->phba;
14164 unsigned long delay;
14165 bool workposted = false;
14167 /* process and rearm the CQ */
14168 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14172 if (!queue_delayed_work_on(cq->chann, phba->wq,
14173 &cq->sched_irqwork, delay))
14174 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14175 "0367 Cannot schedule soft IRQ "
14176 "for cqid=%d on CPU %d\n",
14177 cq->queue_id, cq->chann);
14180 /* wake up worker thread if there are works to be done */
14182 lpfc_worker_wake_up(phba);
14186 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14188 * @work: pointer to work element
14190 * translates from the work handler and calls the fast-path handler.
14193 lpfc_sli4_hba_process_cq(struct work_struct *work)
14195 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
14197 __lpfc_sli4_hba_process_cq(cq);
14201 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
14202 * @work: pointer to work element
14204 * translates from the work handler and calls the fast-path handler.
14207 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
14209 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14210 struct lpfc_queue, sched_irqwork);
14212 __lpfc_sli4_hba_process_cq(cq);
14216 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
14217 * @irq: Interrupt number.
14218 * @dev_id: The device context pointer.
14220 * This function is directly called from the PCI layer as an interrupt
14221 * service routine when device with SLI-4 interface spec is enabled with
14222 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14223 * ring event in the HBA. However, when the device is enabled with either
14224 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14225 * device-level interrupt handler. When the PCI slot is in error recovery
14226 * or the HBA is undergoing initialization, the interrupt handler will not
14227 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14228 * the intrrupt context. This function is called without any lock held.
14229 * It gets the hbalock to access and update SLI data structures. Note that,
14230 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14231 * equal to that of FCP CQ index.
14233 * The link attention and ELS ring attention events are handled
14234 * by the worker thread. The interrupt handler signals the worker thread
14235 * and returns for these events. This function is called without any lock
14236 * held. It gets the hbalock to access and update SLI data structures.
14238 * This function returns IRQ_HANDLED when interrupt is handled else it
14239 * returns IRQ_NONE.
14242 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14244 struct lpfc_hba *phba;
14245 struct lpfc_hba_eq_hdl *hba_eq_hdl;
14246 struct lpfc_queue *fpeq;
14247 unsigned long iflag;
14250 struct lpfc_eq_intr_info *eqi;
14253 /* Get the driver's phba structure from the dev_id */
14254 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14255 phba = hba_eq_hdl->phba;
14256 hba_eqidx = hba_eq_hdl->idx;
14258 if (unlikely(!phba))
14260 if (unlikely(!phba->sli4_hba.hdwq))
14263 /* Get to the EQ struct associated with this vector */
14264 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
14265 if (unlikely(!fpeq))
14268 /* Check device state for handling interrupt */
14269 if (unlikely(lpfc_intr_state_check(phba))) {
14270 /* Check again for link_state with lock held */
14271 spin_lock_irqsave(&phba->hbalock, iflag);
14272 if (phba->link_state < LPFC_LINK_DOWN)
14273 /* Flush, clear interrupt, and rearm the EQ */
14274 lpfc_sli4_eq_flush(phba, fpeq);
14275 spin_unlock_irqrestore(&phba->hbalock, iflag);
14279 eqi = phba->sli4_hba.eq_info;
14280 icnt = this_cpu_inc_return(eqi->icnt);
14281 fpeq->last_cpu = raw_smp_processor_id();
14283 if (icnt > LPFC_EQD_ISR_TRIGGER &&
14284 phba->cfg_irq_chann == 1 &&
14285 phba->cfg_auto_imax &&
14286 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14287 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14288 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14290 /* process and rearm the EQ */
14291 ecount = lpfc_sli4_process_eq(phba, fpeq);
14293 if (unlikely(ecount == 0)) {
14294 fpeq->EQ_no_entry++;
14295 if (phba->intr_type == MSIX)
14296 /* MSI-X treated interrupt served as no EQ share INT */
14297 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14298 "0358 MSI-X interrupt with no EQE\n");
14300 /* Non MSI-X treated on interrupt as EQ share INT */
14304 return IRQ_HANDLED;
14305 } /* lpfc_sli4_fp_intr_handler */
14308 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14309 * @irq: Interrupt number.
14310 * @dev_id: The device context pointer.
14312 * This function is the device-level interrupt handler to device with SLI-4
14313 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14314 * interrupt mode is enabled and there is an event in the HBA which requires
14315 * driver attention. This function invokes the slow-path interrupt attention
14316 * handling function and fast-path interrupt attention handling function in
14317 * turn to process the relevant HBA attention events. This function is called
14318 * without any lock held. It gets the hbalock to access and update SLI data
14321 * This function returns IRQ_HANDLED when interrupt is handled, else it
14322 * returns IRQ_NONE.
14325 lpfc_sli4_intr_handler(int irq, void *dev_id)
14327 struct lpfc_hba *phba;
14328 irqreturn_t hba_irq_rc;
14329 bool hba_handled = false;
14332 /* Get the driver's phba structure from the dev_id */
14333 phba = (struct lpfc_hba *)dev_id;
14335 if (unlikely(!phba))
14339 * Invoke fast-path host attention interrupt handling as appropriate.
14341 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
14342 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14343 &phba->sli4_hba.hba_eq_hdl[qidx]);
14344 if (hba_irq_rc == IRQ_HANDLED)
14345 hba_handled |= true;
14348 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14349 } /* lpfc_sli4_intr_handler */
14352 * lpfc_sli4_queue_free - free a queue structure and associated memory
14353 * @queue: The queue structure to free.
14355 * This function frees a queue structure and the DMAable memory used for
14356 * the host resident queue. This function must be called after destroying the
14357 * queue on the HBA.
14360 lpfc_sli4_queue_free(struct lpfc_queue *queue)
14362 struct lpfc_dmabuf *dmabuf;
14367 if (!list_empty(&queue->wq_list))
14368 list_del(&queue->wq_list);
14370 while (!list_empty(&queue->page_list)) {
14371 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14373 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14374 dmabuf->virt, dmabuf->phys);
14378 lpfc_free_rq_buffer(queue->phba, queue);
14379 kfree(queue->rqbp);
14382 if (!list_empty(&queue->cpu_list))
14383 list_del(&queue->cpu_list);
14390 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14391 * @phba: The HBA that this queue is being created on.
14392 * @page_size: The size of a queue page
14393 * @entry_size: The size of each queue entry for this queue.
14394 * @entry count: The number of entries that this queue will handle.
14395 * @cpu: The cpu that will primarily utilize this queue.
14397 * This function allocates a queue structure and the DMAable memory used for
14398 * the host resident queue. This function must be called before creating the
14399 * queue on the HBA.
14401 struct lpfc_queue *
14402 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14403 uint32_t entry_size, uint32_t entry_count, int cpu)
14405 struct lpfc_queue *queue;
14406 struct lpfc_dmabuf *dmabuf;
14407 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14410 if (!phba->sli4_hba.pc_sli4_params.supported)
14411 hw_page_size = page_size;
14413 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
14415 /* If needed, Adjust page count to match the max the adapter supports */
14416 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
14417 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
14419 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
14420 GFP_KERNEL, cpu_to_node(cpu));
14424 INIT_LIST_HEAD(&queue->list);
14425 INIT_LIST_HEAD(&queue->wq_list);
14426 INIT_LIST_HEAD(&queue->wqfull_list);
14427 INIT_LIST_HEAD(&queue->page_list);
14428 INIT_LIST_HEAD(&queue->child_list);
14429 INIT_LIST_HEAD(&queue->cpu_list);
14431 /* Set queue parameters now. If the system cannot provide memory
14432 * resources, the free routine needs to know what was allocated.
14434 queue->page_count = pgcnt;
14435 queue->q_pgs = (void **)&queue[1];
14436 queue->entry_cnt_per_pg = hw_page_size / entry_size;
14437 queue->entry_size = entry_size;
14438 queue->entry_count = entry_count;
14439 queue->page_size = hw_page_size;
14440 queue->phba = phba;
14442 for (x = 0; x < queue->page_count; x++) {
14443 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
14444 dev_to_node(&phba->pcidev->dev));
14447 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14448 hw_page_size, &dmabuf->phys,
14450 if (!dmabuf->virt) {
14454 dmabuf->buffer_tag = x;
14455 list_add_tail(&dmabuf->list, &queue->page_list);
14456 /* use lpfc_sli4_qe to index a paritcular entry in this page */
14457 queue->q_pgs[x] = dmabuf->virt;
14459 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14460 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14461 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14462 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
14464 /* notify_interval will be set during q creation */
14468 lpfc_sli4_queue_free(queue);
14473 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14474 * @phba: HBA structure that indicates port to create a queue on.
14475 * @pci_barset: PCI BAR set flag.
14477 * This function shall perform iomap of the specified PCI BAR address to host
14478 * memory address if not already done so and return it. The returned host
14479 * memory address can be NULL.
14481 static void __iomem *
14482 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14487 switch (pci_barset) {
14488 case WQ_PCI_BAR_0_AND_1:
14489 return phba->pci_bar0_memmap_p;
14490 case WQ_PCI_BAR_2_AND_3:
14491 return phba->pci_bar2_memmap_p;
14492 case WQ_PCI_BAR_4_AND_5:
14493 return phba->pci_bar4_memmap_p;
14501 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
14502 * @phba: HBA structure that EQs are on.
14503 * @startq: The starting EQ index to modify
14504 * @numq: The number of EQs (consecutive indexes) to modify
14505 * @usdelay: amount of delay
14507 * This function revises the EQ delay on 1 or more EQs. The EQ delay
14508 * is set either by writing to a register (if supported by the SLI Port)
14509 * or by mailbox command. The mailbox command allows several EQs to be
14512 * The @phba struct is used to send a mailbox command to HBA. The @startq
14513 * is used to get the starting EQ index to change. The @numq value is
14514 * used to specify how many consecutive EQ indexes, starting at EQ index,
14515 * are to be changed. This function is asynchronous and will wait for any
14516 * mailbox commands to finish before returning.
14518 * On success this function will return a zero. If unable to allocate
14519 * enough memory this function will return -ENOMEM. If a mailbox command
14520 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
14521 * have had their delay multipler changed.
14524 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14525 uint32_t numq, uint32_t usdelay)
14527 struct lpfc_mbx_modify_eq_delay *eq_delay;
14528 LPFC_MBOXQ_t *mbox;
14529 struct lpfc_queue *eq;
14530 int cnt = 0, rc, length;
14531 uint32_t shdr_status, shdr_add_status;
14534 union lpfc_sli4_cfg_shdr *shdr;
14536 if (startq >= phba->cfg_irq_chann)
14539 if (usdelay > 0xFFFF) {
14540 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
14541 "6429 usdelay %d too large. Scaled down to "
14542 "0xFFFF.\n", usdelay);
14546 /* set values by EQ_DELAY register if supported */
14547 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14548 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14549 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14553 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
14561 /* Otherwise, set values by mailbox cmd */
14563 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14565 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME,
14566 "6428 Failed allocating mailbox cmd buffer."
14567 " EQ delay was not set.\n");
14570 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14571 sizeof(struct lpfc_sli4_cfg_mhdr));
14572 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14573 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14574 length, LPFC_SLI4_MBX_EMBED);
14575 eq_delay = &mbox->u.mqe.un.eq_delay;
14577 /* Calculate delay multiper from maximum interrupt per second */
14578 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
14581 if (dmult > LPFC_DMULT_MAX)
14582 dmult = LPFC_DMULT_MAX;
14584 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14585 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14588 eq->q_mode = usdelay;
14589 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14590 eq_delay->u.request.eq[cnt].phase = 0;
14591 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14596 eq_delay->u.request.num_eq = cnt;
14598 mbox->vport = phba->pport;
14599 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14600 mbox->ctx_buf = NULL;
14601 mbox->ctx_ndlp = NULL;
14602 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14603 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14604 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14605 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14606 if (shdr_status || shdr_add_status || rc) {
14607 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14608 "2512 MODIFY_EQ_DELAY mailbox failed with "
14609 "status x%x add_status x%x, mbx status x%x\n",
14610 shdr_status, shdr_add_status, rc);
14612 mempool_free(mbox, phba->mbox_mem_pool);
14617 * lpfc_eq_create - Create an Event Queue on the HBA
14618 * @phba: HBA structure that indicates port to create a queue on.
14619 * @eq: The queue structure to use to create the event queue.
14620 * @imax: The maximum interrupt per second limit.
14622 * This function creates an event queue, as detailed in @eq, on a port,
14623 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14625 * The @phba struct is used to send mailbox command to HBA. The @eq struct
14626 * is used to get the entry count and entry size that are necessary to
14627 * determine the number of pages to allocate and use for this queue. This
14628 * function will send the EQ_CREATE mailbox command to the HBA to setup the
14629 * event queue. This function is asynchronous and will wait for the mailbox
14630 * command to finish before continuing.
14632 * On success this function will return a zero. If unable to allocate enough
14633 * memory this function will return -ENOMEM. If the queue create mailbox command
14634 * fails this function will return -ENXIO.
14637 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14639 struct lpfc_mbx_eq_create *eq_create;
14640 LPFC_MBOXQ_t *mbox;
14641 int rc, length, status = 0;
14642 struct lpfc_dmabuf *dmabuf;
14643 uint32_t shdr_status, shdr_add_status;
14644 union lpfc_sli4_cfg_shdr *shdr;
14646 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14648 /* sanity check on queue memory */
14651 if (!phba->sli4_hba.pc_sli4_params.supported)
14652 hw_page_size = SLI4_PAGE_SIZE;
14654 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14657 length = (sizeof(struct lpfc_mbx_eq_create) -
14658 sizeof(struct lpfc_sli4_cfg_mhdr));
14659 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14660 LPFC_MBOX_OPCODE_EQ_CREATE,
14661 length, LPFC_SLI4_MBX_EMBED);
14662 eq_create = &mbox->u.mqe.un.eq_create;
14663 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14664 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14666 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14668 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
14670 /* Use version 2 of CREATE_EQ if eqav is set */
14671 if (phba->sli4_hba.pc_sli4_params.eqav) {
14672 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14673 LPFC_Q_CREATE_VERSION_2);
14674 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14675 phba->sli4_hba.pc_sli4_params.eqav);
14678 /* don't setup delay multiplier using EQ_CREATE */
14680 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14682 switch (eq->entry_count) {
14684 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14685 "0360 Unsupported EQ count. (%d)\n",
14687 if (eq->entry_count < 256) {
14691 /* fall through - otherwise default to smallest count */
14693 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14697 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14701 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14705 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14709 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14713 list_for_each_entry(dmabuf, &eq->page_list, list) {
14714 memset(dmabuf->virt, 0, hw_page_size);
14715 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14716 putPaddrLow(dmabuf->phys);
14717 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14718 putPaddrHigh(dmabuf->phys);
14720 mbox->vport = phba->pport;
14721 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14722 mbox->ctx_buf = NULL;
14723 mbox->ctx_ndlp = NULL;
14724 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14725 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14726 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14727 if (shdr_status || shdr_add_status || rc) {
14728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14729 "2500 EQ_CREATE mailbox failed with "
14730 "status x%x add_status x%x, mbx status x%x\n",
14731 shdr_status, shdr_add_status, rc);
14734 eq->type = LPFC_EQ;
14735 eq->subtype = LPFC_NONE;
14736 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14737 if (eq->queue_id == 0xFFFF)
14739 eq->host_index = 0;
14740 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
14741 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
14743 mempool_free(mbox, phba->mbox_mem_pool);
14748 * lpfc_cq_create - Create a Completion Queue on the HBA
14749 * @phba: HBA structure that indicates port to create a queue on.
14750 * @cq: The queue structure to use to create the completion queue.
14751 * @eq: The event queue to bind this completion queue to.
14753 * This function creates a completion queue, as detailed in @wq, on a port,
14754 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14756 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14757 * is used to get the entry count and entry size that are necessary to
14758 * determine the number of pages to allocate and use for this queue. The @eq
14759 * is used to indicate which event queue to bind this completion queue to. This
14760 * function will send the CQ_CREATE mailbox command to the HBA to setup the
14761 * completion queue. This function is asynchronous and will wait for the mailbox
14762 * command to finish before continuing.
14764 * On success this function will return a zero. If unable to allocate enough
14765 * memory this function will return -ENOMEM. If the queue create mailbox command
14766 * fails this function will return -ENXIO.
14769 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14770 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14772 struct lpfc_mbx_cq_create *cq_create;
14773 struct lpfc_dmabuf *dmabuf;
14774 LPFC_MBOXQ_t *mbox;
14775 int rc, length, status = 0;
14776 uint32_t shdr_status, shdr_add_status;
14777 union lpfc_sli4_cfg_shdr *shdr;
14779 /* sanity check on queue memory */
14783 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14786 length = (sizeof(struct lpfc_mbx_cq_create) -
14787 sizeof(struct lpfc_sli4_cfg_mhdr));
14788 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14789 LPFC_MBOX_OPCODE_CQ_CREATE,
14790 length, LPFC_SLI4_MBX_EMBED);
14791 cq_create = &mbox->u.mqe.un.cq_create;
14792 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
14793 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14795 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14796 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
14797 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14798 phba->sli4_hba.pc_sli4_params.cqv);
14799 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
14800 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14801 (cq->page_size / SLI4_PAGE_SIZE));
14802 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14804 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14805 phba->sli4_hba.pc_sli4_params.cqav);
14807 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14810 switch (cq->entry_count) {
14813 if (phba->sli4_hba.pc_sli4_params.cqv ==
14814 LPFC_Q_CREATE_VERSION_2) {
14815 cq_create->u.request.context.lpfc_cq_context_count =
14817 bf_set(lpfc_cq_context_count,
14818 &cq_create->u.request.context,
14819 LPFC_CQ_CNT_WORD7);
14824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14825 "0361 Unsupported CQ count: "
14826 "entry cnt %d sz %d pg cnt %d\n",
14827 cq->entry_count, cq->entry_size,
14829 if (cq->entry_count < 256) {
14833 /* fall through - otherwise default to smallest count */
14835 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14839 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14843 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14847 list_for_each_entry(dmabuf, &cq->page_list, list) {
14848 memset(dmabuf->virt, 0, cq->page_size);
14849 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14850 putPaddrLow(dmabuf->phys);
14851 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14852 putPaddrHigh(dmabuf->phys);
14854 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14856 /* The IOCTL status is embedded in the mailbox subheader. */
14857 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14858 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14859 if (shdr_status || shdr_add_status || rc) {
14860 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14861 "2501 CQ_CREATE mailbox failed with "
14862 "status x%x add_status x%x, mbx status x%x\n",
14863 shdr_status, shdr_add_status, rc);
14867 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14868 if (cq->queue_id == 0xFFFF) {
14872 /* link the cq onto the parent eq child list */
14873 list_add_tail(&cq->list, &eq->child_list);
14874 /* Set up completion queue's type and subtype */
14876 cq->subtype = subtype;
14877 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14878 cq->assoc_qid = eq->queue_id;
14880 cq->host_index = 0;
14881 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
14882 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
14884 if (cq->queue_id > phba->sli4_hba.cq_max)
14885 phba->sli4_hba.cq_max = cq->queue_id;
14887 mempool_free(mbox, phba->mbox_mem_pool);
14892 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
14893 * @phba: HBA structure that indicates port to create a queue on.
14894 * @cqp: The queue structure array to use to create the completion queues.
14895 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
14897 * This function creates a set of completion queue, s to support MRQ
14898 * as detailed in @cqp, on a port,
14899 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
14901 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14902 * is used to get the entry count and entry size that are necessary to
14903 * determine the number of pages to allocate and use for this queue. The @eq
14904 * is used to indicate which event queue to bind this completion queue to. This
14905 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
14906 * completion queue. This function is asynchronous and will wait for the mailbox
14907 * command to finish before continuing.
14909 * On success this function will return a zero. If unable to allocate enough
14910 * memory this function will return -ENOMEM. If the queue create mailbox command
14911 * fails this function will return -ENXIO.
14914 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14915 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
14918 struct lpfc_queue *cq;
14919 struct lpfc_queue *eq;
14920 struct lpfc_mbx_cq_create_set *cq_set;
14921 struct lpfc_dmabuf *dmabuf;
14922 LPFC_MBOXQ_t *mbox;
14923 int rc, length, alloclen, status = 0;
14924 int cnt, idx, numcq, page_idx = 0;
14925 uint32_t shdr_status, shdr_add_status;
14926 union lpfc_sli4_cfg_shdr *shdr;
14927 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14929 /* sanity check on queue memory */
14930 numcq = phba->cfg_nvmet_mrq;
14931 if (!cqp || !hdwq || !numcq)
14934 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14938 length = sizeof(struct lpfc_mbx_cq_create_set);
14939 length += ((numcq * cqp[0]->page_count) *
14940 sizeof(struct dma_address));
14941 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14942 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
14943 LPFC_SLI4_MBX_NEMBED);
14944 if (alloclen < length) {
14945 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14946 "3098 Allocated DMA memory size (%d) is "
14947 "less than the requested DMA memory size "
14948 "(%d)\n", alloclen, length);
14952 cq_set = mbox->sge_array->addr[0];
14953 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
14954 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
14956 for (idx = 0; idx < numcq; idx++) {
14958 eq = hdwq[idx].hba_eq;
14963 if (!phba->sli4_hba.pc_sli4_params.supported)
14964 hw_page_size = cq->page_size;
14968 bf_set(lpfc_mbx_cq_create_set_page_size,
14969 &cq_set->u.request,
14970 (hw_page_size / SLI4_PAGE_SIZE));
14971 bf_set(lpfc_mbx_cq_create_set_num_pages,
14972 &cq_set->u.request, cq->page_count);
14973 bf_set(lpfc_mbx_cq_create_set_evt,
14974 &cq_set->u.request, 1);
14975 bf_set(lpfc_mbx_cq_create_set_valid,
14976 &cq_set->u.request, 1);
14977 bf_set(lpfc_mbx_cq_create_set_cqe_size,
14978 &cq_set->u.request, 0);
14979 bf_set(lpfc_mbx_cq_create_set_num_cq,
14980 &cq_set->u.request, numcq);
14981 bf_set(lpfc_mbx_cq_create_set_autovalid,
14982 &cq_set->u.request,
14983 phba->sli4_hba.pc_sli4_params.cqav);
14984 switch (cq->entry_count) {
14987 if (phba->sli4_hba.pc_sli4_params.cqv ==
14988 LPFC_Q_CREATE_VERSION_2) {
14989 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14990 &cq_set->u.request,
14992 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14993 &cq_set->u.request,
14994 LPFC_CQ_CNT_WORD7);
14999 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15000 "3118 Bad CQ count. (%d)\n",
15002 if (cq->entry_count < 256) {
15006 /* fall through - otherwise default to smallest */
15008 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15009 &cq_set->u.request, LPFC_CQ_CNT_256);
15012 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15013 &cq_set->u.request, LPFC_CQ_CNT_512);
15016 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15017 &cq_set->u.request, LPFC_CQ_CNT_1024);
15020 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15021 &cq_set->u.request, eq->queue_id);
15024 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15025 &cq_set->u.request, eq->queue_id);
15028 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15029 &cq_set->u.request, eq->queue_id);
15032 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15033 &cq_set->u.request, eq->queue_id);
15036 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15037 &cq_set->u.request, eq->queue_id);
15040 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15041 &cq_set->u.request, eq->queue_id);
15044 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15045 &cq_set->u.request, eq->queue_id);
15048 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15049 &cq_set->u.request, eq->queue_id);
15052 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15053 &cq_set->u.request, eq->queue_id);
15056 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15057 &cq_set->u.request, eq->queue_id);
15060 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15061 &cq_set->u.request, eq->queue_id);
15064 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15065 &cq_set->u.request, eq->queue_id);
15068 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15069 &cq_set->u.request, eq->queue_id);
15072 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15073 &cq_set->u.request, eq->queue_id);
15076 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15077 &cq_set->u.request, eq->queue_id);
15080 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15081 &cq_set->u.request, eq->queue_id);
15085 /* link the cq onto the parent eq child list */
15086 list_add_tail(&cq->list, &eq->child_list);
15087 /* Set up completion queue's type and subtype */
15089 cq->subtype = subtype;
15090 cq->assoc_qid = eq->queue_id;
15092 cq->host_index = 0;
15093 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15094 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15099 list_for_each_entry(dmabuf, &cq->page_list, list) {
15100 memset(dmabuf->virt, 0, hw_page_size);
15101 cnt = page_idx + dmabuf->buffer_tag;
15102 cq_set->u.request.page[cnt].addr_lo =
15103 putPaddrLow(dmabuf->phys);
15104 cq_set->u.request.page[cnt].addr_hi =
15105 putPaddrHigh(dmabuf->phys);
15111 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15113 /* The IOCTL status is embedded in the mailbox subheader. */
15114 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15115 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15116 if (shdr_status || shdr_add_status || rc) {
15117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15118 "3119 CQ_CREATE_SET mailbox failed with "
15119 "status x%x add_status x%x, mbx status x%x\n",
15120 shdr_status, shdr_add_status, rc);
15124 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15125 if (rc == 0xFFFF) {
15130 for (idx = 0; idx < numcq; idx++) {
15132 cq->queue_id = rc + idx;
15133 if (cq->queue_id > phba->sli4_hba.cq_max)
15134 phba->sli4_hba.cq_max = cq->queue_id;
15138 lpfc_sli4_mbox_cmd_free(phba, mbox);
15143 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15144 * @phba: HBA structure that indicates port to create a queue on.
15145 * @mq: The queue structure to use to create the mailbox queue.
15146 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15147 * @cq: The completion queue to associate with this cq.
15149 * This function provides failback (fb) functionality when the
15150 * mq_create_ext fails on older FW generations. It's purpose is identical
15151 * to mq_create_ext otherwise.
15153 * This routine cannot fail as all attributes were previously accessed and
15154 * initialized in mq_create_ext.
15157 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15158 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15160 struct lpfc_mbx_mq_create *mq_create;
15161 struct lpfc_dmabuf *dmabuf;
15164 length = (sizeof(struct lpfc_mbx_mq_create) -
15165 sizeof(struct lpfc_sli4_cfg_mhdr));
15166 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15167 LPFC_MBOX_OPCODE_MQ_CREATE,
15168 length, LPFC_SLI4_MBX_EMBED);
15169 mq_create = &mbox->u.mqe.un.mq_create;
15170 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15172 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15174 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15175 switch (mq->entry_count) {
15177 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15178 LPFC_MQ_RING_SIZE_16);
15181 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15182 LPFC_MQ_RING_SIZE_32);
15185 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15186 LPFC_MQ_RING_SIZE_64);
15189 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15190 LPFC_MQ_RING_SIZE_128);
15193 list_for_each_entry(dmabuf, &mq->page_list, list) {
15194 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15195 putPaddrLow(dmabuf->phys);
15196 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15197 putPaddrHigh(dmabuf->phys);
15202 * lpfc_mq_create - Create a mailbox Queue on the HBA
15203 * @phba: HBA structure that indicates port to create a queue on.
15204 * @mq: The queue structure to use to create the mailbox queue.
15205 * @cq: The completion queue to associate with this cq.
15206 * @subtype: The queue's subtype.
15208 * This function creates a mailbox queue, as detailed in @mq, on a port,
15209 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15211 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15212 * is used to get the entry count and entry size that are necessary to
15213 * determine the number of pages to allocate and use for this queue. This
15214 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15215 * mailbox queue. This function is asynchronous and will wait for the mailbox
15216 * command to finish before continuing.
15218 * On success this function will return a zero. If unable to allocate enough
15219 * memory this function will return -ENOMEM. If the queue create mailbox command
15220 * fails this function will return -ENXIO.
15223 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15224 struct lpfc_queue *cq, uint32_t subtype)
15226 struct lpfc_mbx_mq_create *mq_create;
15227 struct lpfc_mbx_mq_create_ext *mq_create_ext;
15228 struct lpfc_dmabuf *dmabuf;
15229 LPFC_MBOXQ_t *mbox;
15230 int rc, length, status = 0;
15231 uint32_t shdr_status, shdr_add_status;
15232 union lpfc_sli4_cfg_shdr *shdr;
15233 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15235 /* sanity check on queue memory */
15238 if (!phba->sli4_hba.pc_sli4_params.supported)
15239 hw_page_size = SLI4_PAGE_SIZE;
15241 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15244 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15245 sizeof(struct lpfc_sli4_cfg_mhdr));
15246 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15247 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15248 length, LPFC_SLI4_MBX_EMBED);
15250 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15251 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15252 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15253 &mq_create_ext->u.request, mq->page_count);
15254 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15255 &mq_create_ext->u.request, 1);
15256 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
15257 &mq_create_ext->u.request, 1);
15258 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15259 &mq_create_ext->u.request, 1);
15260 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15261 &mq_create_ext->u.request, 1);
15262 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15263 &mq_create_ext->u.request, 1);
15264 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
15265 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15266 phba->sli4_hba.pc_sli4_params.mqv);
15267 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15268 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15271 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15273 switch (mq->entry_count) {
15275 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15276 "0362 Unsupported MQ count. (%d)\n",
15278 if (mq->entry_count < 16) {
15282 /* fall through - otherwise default to smallest count */
15284 bf_set(lpfc_mq_context_ring_size,
15285 &mq_create_ext->u.request.context,
15286 LPFC_MQ_RING_SIZE_16);
15289 bf_set(lpfc_mq_context_ring_size,
15290 &mq_create_ext->u.request.context,
15291 LPFC_MQ_RING_SIZE_32);
15294 bf_set(lpfc_mq_context_ring_size,
15295 &mq_create_ext->u.request.context,
15296 LPFC_MQ_RING_SIZE_64);
15299 bf_set(lpfc_mq_context_ring_size,
15300 &mq_create_ext->u.request.context,
15301 LPFC_MQ_RING_SIZE_128);
15304 list_for_each_entry(dmabuf, &mq->page_list, list) {
15305 memset(dmabuf->virt, 0, hw_page_size);
15306 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15307 putPaddrLow(dmabuf->phys);
15308 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15309 putPaddrHigh(dmabuf->phys);
15311 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15312 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15313 &mq_create_ext->u.response);
15314 if (rc != MBX_SUCCESS) {
15315 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15316 "2795 MQ_CREATE_EXT failed with "
15317 "status x%x. Failback to MQ_CREATE.\n",
15319 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15320 mq_create = &mbox->u.mqe.un.mq_create;
15321 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15322 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15323 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15324 &mq_create->u.response);
15327 /* The IOCTL status is embedded in the mailbox subheader. */
15328 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15329 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15330 if (shdr_status || shdr_add_status || rc) {
15331 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15332 "2502 MQ_CREATE mailbox failed with "
15333 "status x%x add_status x%x, mbx status x%x\n",
15334 shdr_status, shdr_add_status, rc);
15338 if (mq->queue_id == 0xFFFF) {
15342 mq->type = LPFC_MQ;
15343 mq->assoc_qid = cq->queue_id;
15344 mq->subtype = subtype;
15345 mq->host_index = 0;
15348 /* link the mq onto the parent cq child list */
15349 list_add_tail(&mq->list, &cq->child_list);
15351 mempool_free(mbox, phba->mbox_mem_pool);
15356 * lpfc_wq_create - Create a Work Queue on the HBA
15357 * @phba: HBA structure that indicates port to create a queue on.
15358 * @wq: The queue structure to use to create the work queue.
15359 * @cq: The completion queue to bind this work queue to.
15360 * @subtype: The subtype of the work queue indicating its functionality.
15362 * This function creates a work queue, as detailed in @wq, on a port, described
15363 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15365 * The @phba struct is used to send mailbox command to HBA. The @wq struct
15366 * is used to get the entry count and entry size that are necessary to
15367 * determine the number of pages to allocate and use for this queue. The @cq
15368 * is used to indicate which completion queue to bind this work queue to. This
15369 * function will send the WQ_CREATE mailbox command to the HBA to setup the
15370 * work queue. This function is asynchronous and will wait for the mailbox
15371 * command to finish before continuing.
15373 * On success this function will return a zero. If unable to allocate enough
15374 * memory this function will return -ENOMEM. If the queue create mailbox command
15375 * fails this function will return -ENXIO.
15378 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15379 struct lpfc_queue *cq, uint32_t subtype)
15381 struct lpfc_mbx_wq_create *wq_create;
15382 struct lpfc_dmabuf *dmabuf;
15383 LPFC_MBOXQ_t *mbox;
15384 int rc, length, status = 0;
15385 uint32_t shdr_status, shdr_add_status;
15386 union lpfc_sli4_cfg_shdr *shdr;
15387 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15388 struct dma_address *page;
15389 void __iomem *bar_memmap_p;
15390 uint32_t db_offset;
15391 uint16_t pci_barset;
15392 uint8_t dpp_barset;
15393 uint32_t dpp_offset;
15394 unsigned long pg_addr;
15395 uint8_t wq_create_version;
15397 /* sanity check on queue memory */
15400 if (!phba->sli4_hba.pc_sli4_params.supported)
15401 hw_page_size = wq->page_size;
15403 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15406 length = (sizeof(struct lpfc_mbx_wq_create) -
15407 sizeof(struct lpfc_sli4_cfg_mhdr));
15408 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15409 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15410 length, LPFC_SLI4_MBX_EMBED);
15411 wq_create = &mbox->u.mqe.un.wq_create;
15412 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15413 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15415 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15418 /* wqv is the earliest version supported, NOT the latest */
15419 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15420 phba->sli4_hba.pc_sli4_params.wqv);
15422 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15423 (wq->page_size > SLI4_PAGE_SIZE))
15424 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15426 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15429 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15430 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15432 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15434 switch (wq_create_version) {
15435 case LPFC_Q_CREATE_VERSION_1:
15436 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15438 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15439 LPFC_Q_CREATE_VERSION_1);
15441 switch (wq->entry_size) {
15444 bf_set(lpfc_mbx_wq_create_wqe_size,
15445 &wq_create->u.request_1,
15446 LPFC_WQ_WQE_SIZE_64);
15449 bf_set(lpfc_mbx_wq_create_wqe_size,
15450 &wq_create->u.request_1,
15451 LPFC_WQ_WQE_SIZE_128);
15454 /* Request DPP by default */
15455 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15456 bf_set(lpfc_mbx_wq_create_page_size,
15457 &wq_create->u.request_1,
15458 (wq->page_size / SLI4_PAGE_SIZE));
15459 page = wq_create->u.request_1.page;
15462 page = wq_create->u.request.page;
15466 list_for_each_entry(dmabuf, &wq->page_list, list) {
15467 memset(dmabuf->virt, 0, hw_page_size);
15468 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15469 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15472 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15473 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15475 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15476 /* The IOCTL status is embedded in the mailbox subheader. */
15477 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15478 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15479 if (shdr_status || shdr_add_status || rc) {
15480 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15481 "2503 WQ_CREATE mailbox failed with "
15482 "status x%x add_status x%x, mbx status x%x\n",
15483 shdr_status, shdr_add_status, rc);
15488 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15489 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15490 &wq_create->u.response);
15492 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15493 &wq_create->u.response_1);
15495 if (wq->queue_id == 0xFFFF) {
15500 wq->db_format = LPFC_DB_LIST_FORMAT;
15501 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15502 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15503 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15504 &wq_create->u.response);
15505 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15506 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15507 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15508 "3265 WQ[%d] doorbell format "
15509 "not supported: x%x\n",
15510 wq->queue_id, wq->db_format);
15514 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15515 &wq_create->u.response);
15516 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15518 if (!bar_memmap_p) {
15519 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15520 "3263 WQ[%d] failed to memmap "
15521 "pci barset:x%x\n",
15522 wq->queue_id, pci_barset);
15526 db_offset = wq_create->u.response.doorbell_offset;
15527 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15528 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15530 "3252 WQ[%d] doorbell offset "
15531 "not supported: x%x\n",
15532 wq->queue_id, db_offset);
15536 wq->db_regaddr = bar_memmap_p + db_offset;
15537 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15538 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15539 "format:x%x\n", wq->queue_id,
15540 pci_barset, db_offset, wq->db_format);
15542 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15544 /* Check if DPP was honored by the firmware */
15545 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15546 &wq_create->u.response_1);
15547 if (wq->dpp_enable) {
15548 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15549 &wq_create->u.response_1);
15550 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15552 if (!bar_memmap_p) {
15553 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15554 "3267 WQ[%d] failed to memmap "
15555 "pci barset:x%x\n",
15556 wq->queue_id, pci_barset);
15560 db_offset = wq_create->u.response_1.doorbell_offset;
15561 wq->db_regaddr = bar_memmap_p + db_offset;
15562 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15563 &wq_create->u.response_1);
15564 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15565 &wq_create->u.response_1);
15566 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15568 if (!bar_memmap_p) {
15569 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15570 "3268 WQ[%d] failed to memmap "
15571 "pci barset:x%x\n",
15572 wq->queue_id, dpp_barset);
15576 dpp_offset = wq_create->u.response_1.dpp_offset;
15577 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15578 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15579 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15580 "dpp_id:x%x dpp_barset:x%x "
15581 "dpp_offset:x%x\n",
15582 wq->queue_id, pci_barset, db_offset,
15583 wq->dpp_id, dpp_barset, dpp_offset);
15585 /* Enable combined writes for DPP aperture */
15586 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15588 rc = set_memory_wc(pg_addr, 1);
15590 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15591 "3272 Cannot setup Combined "
15592 "Write on WQ[%d] - disable DPP\n",
15594 phba->cfg_enable_dpp = 0;
15597 phba->cfg_enable_dpp = 0;
15600 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15602 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15603 if (wq->pring == NULL) {
15607 wq->type = LPFC_WQ;
15608 wq->assoc_qid = cq->queue_id;
15609 wq->subtype = subtype;
15610 wq->host_index = 0;
15612 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
15614 /* link the wq onto the parent cq child list */
15615 list_add_tail(&wq->list, &cq->child_list);
15617 mempool_free(mbox, phba->mbox_mem_pool);
15622 * lpfc_rq_create - Create a Receive Queue on the HBA
15623 * @phba: HBA structure that indicates port to create a queue on.
15624 * @hrq: The queue structure to use to create the header receive queue.
15625 * @drq: The queue structure to use to create the data receive queue.
15626 * @cq: The completion queue to bind this work queue to.
15628 * This function creates a receive buffer queue pair , as detailed in @hrq and
15629 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15632 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15633 * struct is used to get the entry count that is necessary to determine the
15634 * number of pages to use for this queue. The @cq is used to indicate which
15635 * completion queue to bind received buffers that are posted to these queues to.
15636 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15637 * receive queue pair. This function is asynchronous and will wait for the
15638 * mailbox command to finish before continuing.
15640 * On success this function will return a zero. If unable to allocate enough
15641 * memory this function will return -ENOMEM. If the queue create mailbox command
15642 * fails this function will return -ENXIO.
15645 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15646 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15648 struct lpfc_mbx_rq_create *rq_create;
15649 struct lpfc_dmabuf *dmabuf;
15650 LPFC_MBOXQ_t *mbox;
15651 int rc, length, status = 0;
15652 uint32_t shdr_status, shdr_add_status;
15653 union lpfc_sli4_cfg_shdr *shdr;
15654 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15655 void __iomem *bar_memmap_p;
15656 uint32_t db_offset;
15657 uint16_t pci_barset;
15659 /* sanity check on queue memory */
15660 if (!hrq || !drq || !cq)
15662 if (!phba->sli4_hba.pc_sli4_params.supported)
15663 hw_page_size = SLI4_PAGE_SIZE;
15665 if (hrq->entry_count != drq->entry_count)
15667 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15670 length = (sizeof(struct lpfc_mbx_rq_create) -
15671 sizeof(struct lpfc_sli4_cfg_mhdr));
15672 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15673 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15674 length, LPFC_SLI4_MBX_EMBED);
15675 rq_create = &mbox->u.mqe.un.rq_create;
15676 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15677 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15678 phba->sli4_hba.pc_sli4_params.rqv);
15679 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15680 bf_set(lpfc_rq_context_rqe_count_1,
15681 &rq_create->u.request.context,
15683 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
15684 bf_set(lpfc_rq_context_rqe_size,
15685 &rq_create->u.request.context,
15687 bf_set(lpfc_rq_context_page_size,
15688 &rq_create->u.request.context,
15689 LPFC_RQ_PAGE_SIZE_4096);
15691 switch (hrq->entry_count) {
15693 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15694 "2535 Unsupported RQ count. (%d)\n",
15696 if (hrq->entry_count < 512) {
15700 /* fall through - otherwise default to smallest count */
15702 bf_set(lpfc_rq_context_rqe_count,
15703 &rq_create->u.request.context,
15704 LPFC_RQ_RING_SIZE_512);
15707 bf_set(lpfc_rq_context_rqe_count,
15708 &rq_create->u.request.context,
15709 LPFC_RQ_RING_SIZE_1024);
15712 bf_set(lpfc_rq_context_rqe_count,
15713 &rq_create->u.request.context,
15714 LPFC_RQ_RING_SIZE_2048);
15717 bf_set(lpfc_rq_context_rqe_count,
15718 &rq_create->u.request.context,
15719 LPFC_RQ_RING_SIZE_4096);
15722 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15723 LPFC_HDR_BUF_SIZE);
15725 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15727 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15729 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15730 memset(dmabuf->virt, 0, hw_page_size);
15731 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15732 putPaddrLow(dmabuf->phys);
15733 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15734 putPaddrHigh(dmabuf->phys);
15736 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15737 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15739 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15740 /* The IOCTL status is embedded in the mailbox subheader. */
15741 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15742 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15743 if (shdr_status || shdr_add_status || rc) {
15744 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15745 "2504 RQ_CREATE mailbox failed with "
15746 "status x%x add_status x%x, mbx status x%x\n",
15747 shdr_status, shdr_add_status, rc);
15751 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15752 if (hrq->queue_id == 0xFFFF) {
15757 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15758 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15759 &rq_create->u.response);
15760 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15761 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15763 "3262 RQ [%d] doorbell format not "
15764 "supported: x%x\n", hrq->queue_id,
15770 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15771 &rq_create->u.response);
15772 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15773 if (!bar_memmap_p) {
15774 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15775 "3269 RQ[%d] failed to memmap pci "
15776 "barset:x%x\n", hrq->queue_id,
15782 db_offset = rq_create->u.response.doorbell_offset;
15783 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15784 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15785 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15786 "3270 RQ[%d] doorbell offset not "
15787 "supported: x%x\n", hrq->queue_id,
15792 hrq->db_regaddr = bar_memmap_p + db_offset;
15793 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15794 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15795 "format:x%x\n", hrq->queue_id, pci_barset,
15796 db_offset, hrq->db_format);
15798 hrq->db_format = LPFC_DB_RING_FORMAT;
15799 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15801 hrq->type = LPFC_HRQ;
15802 hrq->assoc_qid = cq->queue_id;
15803 hrq->subtype = subtype;
15804 hrq->host_index = 0;
15805 hrq->hba_index = 0;
15806 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
15808 /* now create the data queue */
15809 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15810 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15811 length, LPFC_SLI4_MBX_EMBED);
15812 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15813 phba->sli4_hba.pc_sli4_params.rqv);
15814 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15815 bf_set(lpfc_rq_context_rqe_count_1,
15816 &rq_create->u.request.context, hrq->entry_count);
15817 if (subtype == LPFC_NVMET)
15818 rq_create->u.request.context.buffer_size =
15819 LPFC_NVMET_DATA_BUF_SIZE;
15821 rq_create->u.request.context.buffer_size =
15822 LPFC_DATA_BUF_SIZE;
15823 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15825 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15826 (PAGE_SIZE/SLI4_PAGE_SIZE));
15828 switch (drq->entry_count) {
15830 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15831 "2536 Unsupported RQ count. (%d)\n",
15833 if (drq->entry_count < 512) {
15837 /* fall through - otherwise default to smallest count */
15839 bf_set(lpfc_rq_context_rqe_count,
15840 &rq_create->u.request.context,
15841 LPFC_RQ_RING_SIZE_512);
15844 bf_set(lpfc_rq_context_rqe_count,
15845 &rq_create->u.request.context,
15846 LPFC_RQ_RING_SIZE_1024);
15849 bf_set(lpfc_rq_context_rqe_count,
15850 &rq_create->u.request.context,
15851 LPFC_RQ_RING_SIZE_2048);
15854 bf_set(lpfc_rq_context_rqe_count,
15855 &rq_create->u.request.context,
15856 LPFC_RQ_RING_SIZE_4096);
15859 if (subtype == LPFC_NVMET)
15860 bf_set(lpfc_rq_context_buf_size,
15861 &rq_create->u.request.context,
15862 LPFC_NVMET_DATA_BUF_SIZE);
15864 bf_set(lpfc_rq_context_buf_size,
15865 &rq_create->u.request.context,
15866 LPFC_DATA_BUF_SIZE);
15868 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15870 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15872 list_for_each_entry(dmabuf, &drq->page_list, list) {
15873 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15874 putPaddrLow(dmabuf->phys);
15875 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15876 putPaddrHigh(dmabuf->phys);
15878 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15879 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15880 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15881 /* The IOCTL status is embedded in the mailbox subheader. */
15882 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15883 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15884 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15885 if (shdr_status || shdr_add_status || rc) {
15889 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15890 if (drq->queue_id == 0xFFFF) {
15894 drq->type = LPFC_DRQ;
15895 drq->assoc_qid = cq->queue_id;
15896 drq->subtype = subtype;
15897 drq->host_index = 0;
15898 drq->hba_index = 0;
15899 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
15901 /* link the header and data RQs onto the parent cq child list */
15902 list_add_tail(&hrq->list, &cq->child_list);
15903 list_add_tail(&drq->list, &cq->child_list);
15906 mempool_free(mbox, phba->mbox_mem_pool);
15911 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
15912 * @phba: HBA structure that indicates port to create a queue on.
15913 * @hrqp: The queue structure array to use to create the header receive queues.
15914 * @drqp: The queue structure array to use to create the data receive queues.
15915 * @cqp: The completion queue array to bind these receive queues to.
15917 * This function creates a receive buffer queue pair , as detailed in @hrq and
15918 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15921 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15922 * struct is used to get the entry count that is necessary to determine the
15923 * number of pages to use for this queue. The @cq is used to indicate which
15924 * completion queue to bind received buffers that are posted to these queues to.
15925 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15926 * receive queue pair. This function is asynchronous and will wait for the
15927 * mailbox command to finish before continuing.
15929 * On success this function will return a zero. If unable to allocate enough
15930 * memory this function will return -ENOMEM. If the queue create mailbox command
15931 * fails this function will return -ENXIO.
15934 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15935 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
15938 struct lpfc_queue *hrq, *drq, *cq;
15939 struct lpfc_mbx_rq_create_v2 *rq_create;
15940 struct lpfc_dmabuf *dmabuf;
15941 LPFC_MBOXQ_t *mbox;
15942 int rc, length, alloclen, status = 0;
15943 int cnt, idx, numrq, page_idx = 0;
15944 uint32_t shdr_status, shdr_add_status;
15945 union lpfc_sli4_cfg_shdr *shdr;
15946 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15948 numrq = phba->cfg_nvmet_mrq;
15949 /* sanity check on array memory */
15950 if (!hrqp || !drqp || !cqp || !numrq)
15952 if (!phba->sli4_hba.pc_sli4_params.supported)
15953 hw_page_size = SLI4_PAGE_SIZE;
15955 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15959 length = sizeof(struct lpfc_mbx_rq_create_v2);
15960 length += ((2 * numrq * hrqp[0]->page_count) *
15961 sizeof(struct dma_address));
15963 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15964 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
15965 LPFC_SLI4_MBX_NEMBED);
15966 if (alloclen < length) {
15967 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15968 "3099 Allocated DMA memory size (%d) is "
15969 "less than the requested DMA memory size "
15970 "(%d)\n", alloclen, length);
15977 rq_create = mbox->sge_array->addr[0];
15978 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
15980 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
15983 for (idx = 0; idx < numrq; idx++) {
15988 /* sanity check on queue memory */
15989 if (!hrq || !drq || !cq) {
15994 if (hrq->entry_count != drq->entry_count) {
16000 bf_set(lpfc_mbx_rq_create_num_pages,
16001 &rq_create->u.request,
16003 bf_set(lpfc_mbx_rq_create_rq_cnt,
16004 &rq_create->u.request, (numrq * 2));
16005 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16007 bf_set(lpfc_rq_context_base_cq,
16008 &rq_create->u.request.context,
16010 bf_set(lpfc_rq_context_data_size,
16011 &rq_create->u.request.context,
16012 LPFC_NVMET_DATA_BUF_SIZE);
16013 bf_set(lpfc_rq_context_hdr_size,
16014 &rq_create->u.request.context,
16015 LPFC_HDR_BUF_SIZE);
16016 bf_set(lpfc_rq_context_rqe_count_1,
16017 &rq_create->u.request.context,
16019 bf_set(lpfc_rq_context_rqe_size,
16020 &rq_create->u.request.context,
16022 bf_set(lpfc_rq_context_page_size,
16023 &rq_create->u.request.context,
16024 (PAGE_SIZE/SLI4_PAGE_SIZE));
16027 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16028 memset(dmabuf->virt, 0, hw_page_size);
16029 cnt = page_idx + dmabuf->buffer_tag;
16030 rq_create->u.request.page[cnt].addr_lo =
16031 putPaddrLow(dmabuf->phys);
16032 rq_create->u.request.page[cnt].addr_hi =
16033 putPaddrHigh(dmabuf->phys);
16039 list_for_each_entry(dmabuf, &drq->page_list, list) {
16040 memset(dmabuf->virt, 0, hw_page_size);
16041 cnt = page_idx + dmabuf->buffer_tag;
16042 rq_create->u.request.page[cnt].addr_lo =
16043 putPaddrLow(dmabuf->phys);
16044 rq_create->u.request.page[cnt].addr_hi =
16045 putPaddrHigh(dmabuf->phys);
16050 hrq->db_format = LPFC_DB_RING_FORMAT;
16051 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16052 hrq->type = LPFC_HRQ;
16053 hrq->assoc_qid = cq->queue_id;
16054 hrq->subtype = subtype;
16055 hrq->host_index = 0;
16056 hrq->hba_index = 0;
16057 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16059 drq->db_format = LPFC_DB_RING_FORMAT;
16060 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16061 drq->type = LPFC_DRQ;
16062 drq->assoc_qid = cq->queue_id;
16063 drq->subtype = subtype;
16064 drq->host_index = 0;
16065 drq->hba_index = 0;
16066 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16068 list_add_tail(&hrq->list, &cq->child_list);
16069 list_add_tail(&drq->list, &cq->child_list);
16072 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16073 /* The IOCTL status is embedded in the mailbox subheader. */
16074 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16075 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16076 if (shdr_status || shdr_add_status || rc) {
16077 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16078 "3120 RQ_CREATE mailbox failed with "
16079 "status x%x add_status x%x, mbx status x%x\n",
16080 shdr_status, shdr_add_status, rc);
16084 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16085 if (rc == 0xFFFF) {
16090 /* Initialize all RQs with associated queue id */
16091 for (idx = 0; idx < numrq; idx++) {
16093 hrq->queue_id = rc + (2 * idx);
16095 drq->queue_id = rc + (2 * idx) + 1;
16099 lpfc_sli4_mbox_cmd_free(phba, mbox);
16104 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16105 * @eq: The queue structure associated with the queue to destroy.
16107 * This function destroys a queue, as detailed in @eq by sending an mailbox
16108 * command, specific to the type of queue, to the HBA.
16110 * The @eq struct is used to get the queue ID of the queue to destroy.
16112 * On success this function will return a zero. If the queue destroy mailbox
16113 * command fails this function will return -ENXIO.
16116 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16118 LPFC_MBOXQ_t *mbox;
16119 int rc, length, status = 0;
16120 uint32_t shdr_status, shdr_add_status;
16121 union lpfc_sli4_cfg_shdr *shdr;
16123 /* sanity check on queue memory */
16127 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16130 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16131 sizeof(struct lpfc_sli4_cfg_mhdr));
16132 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16133 LPFC_MBOX_OPCODE_EQ_DESTROY,
16134 length, LPFC_SLI4_MBX_EMBED);
16135 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16137 mbox->vport = eq->phba->pport;
16138 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16140 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16141 /* The IOCTL status is embedded in the mailbox subheader. */
16142 shdr = (union lpfc_sli4_cfg_shdr *)
16143 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16144 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16145 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16146 if (shdr_status || shdr_add_status || rc) {
16147 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16148 "2505 EQ_DESTROY mailbox failed with "
16149 "status x%x add_status x%x, mbx status x%x\n",
16150 shdr_status, shdr_add_status, rc);
16154 /* Remove eq from any list */
16155 list_del_init(&eq->list);
16156 mempool_free(mbox, eq->phba->mbox_mem_pool);
16161 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16162 * @cq: The queue structure associated with the queue to destroy.
16164 * This function destroys a queue, as detailed in @cq by sending an mailbox
16165 * command, specific to the type of queue, to the HBA.
16167 * The @cq struct is used to get the queue ID of the queue to destroy.
16169 * On success this function will return a zero. If the queue destroy mailbox
16170 * command fails this function will return -ENXIO.
16173 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16175 LPFC_MBOXQ_t *mbox;
16176 int rc, length, status = 0;
16177 uint32_t shdr_status, shdr_add_status;
16178 union lpfc_sli4_cfg_shdr *shdr;
16180 /* sanity check on queue memory */
16183 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16186 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16187 sizeof(struct lpfc_sli4_cfg_mhdr));
16188 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16189 LPFC_MBOX_OPCODE_CQ_DESTROY,
16190 length, LPFC_SLI4_MBX_EMBED);
16191 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16193 mbox->vport = cq->phba->pport;
16194 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16195 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16196 /* The IOCTL status is embedded in the mailbox subheader. */
16197 shdr = (union lpfc_sli4_cfg_shdr *)
16198 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16199 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16200 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16201 if (shdr_status || shdr_add_status || rc) {
16202 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16203 "2506 CQ_DESTROY mailbox failed with "
16204 "status x%x add_status x%x, mbx status x%x\n",
16205 shdr_status, shdr_add_status, rc);
16208 /* Remove cq from any list */
16209 list_del_init(&cq->list);
16210 mempool_free(mbox, cq->phba->mbox_mem_pool);
16215 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16216 * @qm: The queue structure associated with the queue to destroy.
16218 * This function destroys a queue, as detailed in @mq by sending an mailbox
16219 * command, specific to the type of queue, to the HBA.
16221 * The @mq struct is used to get the queue ID of the queue to destroy.
16223 * On success this function will return a zero. If the queue destroy mailbox
16224 * command fails this function will return -ENXIO.
16227 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16229 LPFC_MBOXQ_t *mbox;
16230 int rc, length, status = 0;
16231 uint32_t shdr_status, shdr_add_status;
16232 union lpfc_sli4_cfg_shdr *shdr;
16234 /* sanity check on queue memory */
16237 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16240 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16241 sizeof(struct lpfc_sli4_cfg_mhdr));
16242 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16243 LPFC_MBOX_OPCODE_MQ_DESTROY,
16244 length, LPFC_SLI4_MBX_EMBED);
16245 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16247 mbox->vport = mq->phba->pport;
16248 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16249 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16250 /* The IOCTL status is embedded in the mailbox subheader. */
16251 shdr = (union lpfc_sli4_cfg_shdr *)
16252 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16253 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16254 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16255 if (shdr_status || shdr_add_status || rc) {
16256 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16257 "2507 MQ_DESTROY mailbox failed with "
16258 "status x%x add_status x%x, mbx status x%x\n",
16259 shdr_status, shdr_add_status, rc);
16262 /* Remove mq from any list */
16263 list_del_init(&mq->list);
16264 mempool_free(mbox, mq->phba->mbox_mem_pool);
16269 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16270 * @wq: The queue structure associated with the queue to destroy.
16272 * This function destroys a queue, as detailed in @wq by sending an mailbox
16273 * command, specific to the type of queue, to the HBA.
16275 * The @wq struct is used to get the queue ID of the queue to destroy.
16277 * On success this function will return a zero. If the queue destroy mailbox
16278 * command fails this function will return -ENXIO.
16281 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16283 LPFC_MBOXQ_t *mbox;
16284 int rc, length, status = 0;
16285 uint32_t shdr_status, shdr_add_status;
16286 union lpfc_sli4_cfg_shdr *shdr;
16288 /* sanity check on queue memory */
16291 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16294 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16295 sizeof(struct lpfc_sli4_cfg_mhdr));
16296 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16297 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16298 length, LPFC_SLI4_MBX_EMBED);
16299 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16301 mbox->vport = wq->phba->pport;
16302 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16303 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16304 shdr = (union lpfc_sli4_cfg_shdr *)
16305 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16306 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16307 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16308 if (shdr_status || shdr_add_status || rc) {
16309 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16310 "2508 WQ_DESTROY mailbox failed with "
16311 "status x%x add_status x%x, mbx status x%x\n",
16312 shdr_status, shdr_add_status, rc);
16315 /* Remove wq from any list */
16316 list_del_init(&wq->list);
16319 mempool_free(mbox, wq->phba->mbox_mem_pool);
16324 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16325 * @rq: The queue structure associated with the queue to destroy.
16327 * This function destroys a queue, as detailed in @rq by sending an mailbox
16328 * command, specific to the type of queue, to the HBA.
16330 * The @rq struct is used to get the queue ID of the queue to destroy.
16332 * On success this function will return a zero. If the queue destroy mailbox
16333 * command fails this function will return -ENXIO.
16336 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16337 struct lpfc_queue *drq)
16339 LPFC_MBOXQ_t *mbox;
16340 int rc, length, status = 0;
16341 uint32_t shdr_status, shdr_add_status;
16342 union lpfc_sli4_cfg_shdr *shdr;
16344 /* sanity check on queue memory */
16347 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16350 length = (sizeof(struct lpfc_mbx_rq_destroy) -
16351 sizeof(struct lpfc_sli4_cfg_mhdr));
16352 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16353 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16354 length, LPFC_SLI4_MBX_EMBED);
16355 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16357 mbox->vport = hrq->phba->pport;
16358 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16359 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16360 /* The IOCTL status is embedded in the mailbox subheader. */
16361 shdr = (union lpfc_sli4_cfg_shdr *)
16362 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16363 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16364 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16365 if (shdr_status || shdr_add_status || rc) {
16366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16367 "2509 RQ_DESTROY mailbox failed with "
16368 "status x%x add_status x%x, mbx status x%x\n",
16369 shdr_status, shdr_add_status, rc);
16370 if (rc != MBX_TIMEOUT)
16371 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16374 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16376 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16377 shdr = (union lpfc_sli4_cfg_shdr *)
16378 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16379 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16380 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16381 if (shdr_status || shdr_add_status || rc) {
16382 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16383 "2510 RQ_DESTROY mailbox failed with "
16384 "status x%x add_status x%x, mbx status x%x\n",
16385 shdr_status, shdr_add_status, rc);
16388 list_del_init(&hrq->list);
16389 list_del_init(&drq->list);
16390 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16395 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16396 * @phba: The virtual port for which this call being executed.
16397 * @pdma_phys_addr0: Physical address of the 1st SGL page.
16398 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16399 * @xritag: the xritag that ties this io to the SGL pages.
16401 * This routine will post the sgl pages for the IO that has the xritag
16402 * that is in the iocbq structure. The xritag is assigned during iocbq
16403 * creation and persists for as long as the driver is loaded.
16404 * if the caller has fewer than 256 scatter gather segments to map then
16405 * pdma_phys_addr1 should be 0.
16406 * If the caller needs to map more than 256 scatter gather segment then
16407 * pdma_phys_addr1 should be a valid physical address.
16408 * physical address for SGLs must be 64 byte aligned.
16409 * If you are going to map 2 SGL's then the first one must have 256 entries
16410 * the second sgl can have between 1 and 256 entries.
16414 * -ENXIO, -ENOMEM - Failure
16417 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16418 dma_addr_t pdma_phys_addr0,
16419 dma_addr_t pdma_phys_addr1,
16422 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16423 LPFC_MBOXQ_t *mbox;
16425 uint32_t shdr_status, shdr_add_status;
16427 union lpfc_sli4_cfg_shdr *shdr;
16429 if (xritag == NO_XRI) {
16430 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16431 "0364 Invalid param:\n");
16435 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16439 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16440 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16441 sizeof(struct lpfc_mbx_post_sgl_pages) -
16442 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16444 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16445 &mbox->u.mqe.un.post_sgl_pages;
16446 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16447 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16449 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16450 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16451 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16452 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16454 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16455 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16456 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16457 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16458 if (!phba->sli4_hba.intr_enable)
16459 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16461 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16462 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16464 /* The IOCTL status is embedded in the mailbox subheader. */
16465 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16466 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16467 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16468 if (rc != MBX_TIMEOUT)
16469 mempool_free(mbox, phba->mbox_mem_pool);
16470 if (shdr_status || shdr_add_status || rc) {
16471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16472 "2511 POST_SGL mailbox failed with "
16473 "status x%x add_status x%x, mbx status x%x\n",
16474 shdr_status, shdr_add_status, rc);
16480 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
16481 * @phba: pointer to lpfc hba data structure.
16483 * This routine is invoked to post rpi header templates to the
16484 * HBA consistent with the SLI-4 interface spec. This routine
16485 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16486 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
16489 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16490 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16493 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16498 * Fetch the next logical xri. Because this index is logical,
16499 * the driver starts at 0 each time.
16501 spin_lock_irq(&phba->hbalock);
16502 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16503 phba->sli4_hba.max_cfg_param.max_xri, 0);
16504 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16505 spin_unlock_irq(&phba->hbalock);
16508 set_bit(xri, phba->sli4_hba.xri_bmask);
16509 phba->sli4_hba.max_cfg_param.xri_used++;
16511 spin_unlock_irq(&phba->hbalock);
16516 * lpfc_sli4_free_xri - Release an xri for reuse.
16517 * @phba: pointer to lpfc hba data structure.
16519 * This routine is invoked to release an xri to the pool of
16520 * available rpis maintained by the driver.
16523 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16525 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
16526 phba->sli4_hba.max_cfg_param.xri_used--;
16531 * lpfc_sli4_free_xri - Release an xri for reuse.
16532 * @phba: pointer to lpfc hba data structure.
16534 * This routine is invoked to release an xri to the pool of
16535 * available rpis maintained by the driver.
16538 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16540 spin_lock_irq(&phba->hbalock);
16541 __lpfc_sli4_free_xri(phba, xri);
16542 spin_unlock_irq(&phba->hbalock);
16546 * lpfc_sli4_next_xritag - Get an xritag for the io
16547 * @phba: Pointer to HBA context object.
16549 * This function gets an xritag for the iocb. If there is no unused xritag
16550 * it will return 0xffff.
16551 * The function returns the allocated xritag if successful, else returns zero.
16552 * Zero is not a valid xritag.
16553 * The caller is not required to hold any lock.
16556 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16558 uint16_t xri_index;
16560 xri_index = lpfc_sli4_alloc_xri(phba);
16561 if (xri_index == NO_XRI)
16562 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16563 "2004 Failed to allocate XRI.last XRITAG is %d"
16564 " Max XRI is %d, Used XRI is %d\n",
16566 phba->sli4_hba.max_cfg_param.max_xri,
16567 phba->sli4_hba.max_cfg_param.xri_used);
16572 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
16573 * @phba: pointer to lpfc hba data structure.
16574 * @post_sgl_list: pointer to els sgl entry list.
16575 * @count: number of els sgl entries on the list.
16577 * This routine is invoked to post a block of driver's sgl pages to the
16578 * HBA using non-embedded mailbox command. No Lock is held. This routine
16579 * is only called when the driver is loading and after all IO has been
16583 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
16584 struct list_head *post_sgl_list,
16587 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
16588 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16589 struct sgl_page_pairs *sgl_pg_pairs;
16591 LPFC_MBOXQ_t *mbox;
16592 uint32_t reqlen, alloclen, pg_pairs;
16594 uint16_t xritag_start = 0;
16596 uint32_t shdr_status, shdr_add_status;
16597 union lpfc_sli4_cfg_shdr *shdr;
16599 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
16600 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16601 if (reqlen > SLI4_PAGE_SIZE) {
16602 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16603 "2559 Block sgl registration required DMA "
16604 "size (%d) great than a page\n", reqlen);
16608 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16612 /* Allocate DMA memory and set up the non-embedded mailbox command */
16613 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16614 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16615 LPFC_SLI4_MBX_NEMBED);
16617 if (alloclen < reqlen) {
16618 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16619 "0285 Allocated DMA memory size (%d) is "
16620 "less than the requested DMA memory "
16621 "size (%d)\n", alloclen, reqlen);
16622 lpfc_sli4_mbox_cmd_free(phba, mbox);
16625 /* Set up the SGL pages in the non-embedded DMA pages */
16626 viraddr = mbox->sge_array->addr[0];
16627 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16628 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16631 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
16632 /* Set up the sge entry */
16633 sgl_pg_pairs->sgl_pg0_addr_lo =
16634 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16635 sgl_pg_pairs->sgl_pg0_addr_hi =
16636 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16637 sgl_pg_pairs->sgl_pg1_addr_lo =
16638 cpu_to_le32(putPaddrLow(0));
16639 sgl_pg_pairs->sgl_pg1_addr_hi =
16640 cpu_to_le32(putPaddrHigh(0));
16642 /* Keep the first xritag on the list */
16644 xritag_start = sglq_entry->sli4_xritag;
16649 /* Complete initialization and perform endian conversion. */
16650 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16651 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
16652 sgl->word0 = cpu_to_le32(sgl->word0);
16654 if (!phba->sli4_hba.intr_enable)
16655 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16657 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16658 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16660 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16661 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16662 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16663 if (rc != MBX_TIMEOUT)
16664 lpfc_sli4_mbox_cmd_free(phba, mbox);
16665 if (shdr_status || shdr_add_status || rc) {
16666 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16667 "2513 POST_SGL_BLOCK mailbox command failed "
16668 "status x%x add_status x%x mbx status x%x\n",
16669 shdr_status, shdr_add_status, rc);
16676 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
16677 * @phba: pointer to lpfc hba data structure.
16678 * @nblist: pointer to nvme buffer list.
16679 * @count: number of scsi buffers on the list.
16681 * This routine is invoked to post a block of @count scsi sgl pages from a
16682 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
16687 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
16690 struct lpfc_io_buf *lpfc_ncmd;
16691 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16692 struct sgl_page_pairs *sgl_pg_pairs;
16694 LPFC_MBOXQ_t *mbox;
16695 uint32_t reqlen, alloclen, pg_pairs;
16697 uint16_t xritag_start = 0;
16699 uint32_t shdr_status, shdr_add_status;
16700 dma_addr_t pdma_phys_bpl1;
16701 union lpfc_sli4_cfg_shdr *shdr;
16703 /* Calculate the requested length of the dma memory */
16704 reqlen = count * sizeof(struct sgl_page_pairs) +
16705 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16706 if (reqlen > SLI4_PAGE_SIZE) {
16707 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16708 "6118 Block sgl registration required DMA "
16709 "size (%d) great than a page\n", reqlen);
16712 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16714 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16715 "6119 Failed to allocate mbox cmd memory\n");
16719 /* Allocate DMA memory and set up the non-embedded mailbox command */
16720 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16721 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16722 reqlen, LPFC_SLI4_MBX_NEMBED);
16724 if (alloclen < reqlen) {
16725 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16726 "6120 Allocated DMA memory size (%d) is "
16727 "less than the requested DMA memory "
16728 "size (%d)\n", alloclen, reqlen);
16729 lpfc_sli4_mbox_cmd_free(phba, mbox);
16733 /* Get the first SGE entry from the non-embedded DMA memory */
16734 viraddr = mbox->sge_array->addr[0];
16736 /* Set up the SGL pages in the non-embedded DMA pages */
16737 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16738 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16741 list_for_each_entry(lpfc_ncmd, nblist, list) {
16742 /* Set up the sge entry */
16743 sgl_pg_pairs->sgl_pg0_addr_lo =
16744 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
16745 sgl_pg_pairs->sgl_pg0_addr_hi =
16746 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
16747 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16748 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
16751 pdma_phys_bpl1 = 0;
16752 sgl_pg_pairs->sgl_pg1_addr_lo =
16753 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16754 sgl_pg_pairs->sgl_pg1_addr_hi =
16755 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16756 /* Keep the first xritag on the list */
16758 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
16762 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16763 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16764 /* Perform endian conversion if necessary */
16765 sgl->word0 = cpu_to_le32(sgl->word0);
16767 if (!phba->sli4_hba.intr_enable) {
16768 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16770 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16771 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16773 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
16774 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16775 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16776 if (rc != MBX_TIMEOUT)
16777 lpfc_sli4_mbox_cmd_free(phba, mbox);
16778 if (shdr_status || shdr_add_status || rc) {
16779 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16780 "6125 POST_SGL_BLOCK mailbox command failed "
16781 "status x%x add_status x%x mbx status x%x\n",
16782 shdr_status, shdr_add_status, rc);
16789 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
16790 * @phba: pointer to lpfc hba data structure.
16791 * @post_nblist: pointer to the nvme buffer list.
16793 * This routine walks a list of nvme buffers that was passed in. It attempts
16794 * to construct blocks of nvme buffer sgls which contains contiguous xris and
16795 * uses the non-embedded SGL block post mailbox commands to post to the port.
16796 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
16797 * embedded SGL post mailbox command for posting. The @post_nblist passed in
16798 * must be local list, thus no lock is needed when manipulate the list.
16800 * Returns: 0 = failure, non-zero number of successfully posted buffers.
16803 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
16804 struct list_head *post_nblist, int sb_count)
16806 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
16807 int status, sgl_size;
16808 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
16809 dma_addr_t pdma_phys_sgl1;
16810 int last_xritag = NO_XRI;
16812 LIST_HEAD(prep_nblist);
16813 LIST_HEAD(blck_nblist);
16814 LIST_HEAD(nvme_nblist);
16820 sgl_size = phba->cfg_sg_dma_buf_size;
16821 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
16822 list_del_init(&lpfc_ncmd->list);
16824 if ((last_xritag != NO_XRI) &&
16825 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
16826 /* a hole in xri block, form a sgl posting block */
16827 list_splice_init(&prep_nblist, &blck_nblist);
16828 post_cnt = block_cnt - 1;
16829 /* prepare list for next posting block */
16830 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16833 /* prepare list for next posting block */
16834 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16835 /* enough sgls for non-embed sgl mbox command */
16836 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
16837 list_splice_init(&prep_nblist, &blck_nblist);
16838 post_cnt = block_cnt;
16843 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16845 /* end of repost sgl list condition for NVME buffers */
16846 if (num_posting == sb_count) {
16847 if (post_cnt == 0) {
16848 /* last sgl posting block */
16849 list_splice_init(&prep_nblist, &blck_nblist);
16850 post_cnt = block_cnt;
16851 } else if (block_cnt == 1) {
16852 /* last single sgl with non-contiguous xri */
16853 if (sgl_size > SGL_PAGE_SIZE)
16855 lpfc_ncmd->dma_phys_sgl +
16858 pdma_phys_sgl1 = 0;
16859 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16860 status = lpfc_sli4_post_sgl(
16861 phba, lpfc_ncmd->dma_phys_sgl,
16862 pdma_phys_sgl1, cur_xritag);
16864 /* Post error. Buffer unavailable. */
16865 lpfc_ncmd->flags |=
16866 LPFC_SBUF_NOT_POSTED;
16868 /* Post success. Bffer available. */
16869 lpfc_ncmd->flags &=
16870 ~LPFC_SBUF_NOT_POSTED;
16871 lpfc_ncmd->status = IOSTAT_SUCCESS;
16874 /* success, put on NVME buffer sgl list */
16875 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
16879 /* continue until a nembed page worth of sgls */
16883 /* post block of NVME buffer list sgls */
16884 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
16887 /* don't reset xirtag due to hole in xri block */
16888 if (block_cnt == 0)
16889 last_xritag = NO_XRI;
16891 /* reset NVME buffer post count for next round of posting */
16894 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
16895 while (!list_empty(&blck_nblist)) {
16896 list_remove_head(&blck_nblist, lpfc_ncmd,
16897 struct lpfc_io_buf, list);
16899 /* Post error. Mark buffer unavailable. */
16900 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
16902 /* Post success, Mark buffer available. */
16903 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
16904 lpfc_ncmd->status = IOSTAT_SUCCESS;
16907 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
16910 /* Push NVME buffers with sgl posted to the available list */
16911 lpfc_io_buf_replenish(phba, &nvme_nblist);
16917 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
16918 * @phba: pointer to lpfc_hba struct that the frame was received on
16919 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16921 * This function checks the fields in the @fc_hdr to see if the FC frame is a
16922 * valid type of frame that the LPFC driver will handle. This function will
16923 * return a zero if the frame is a valid frame or a non zero value when the
16924 * frame does not pass the check.
16927 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16929 /* make rctl_names static to save stack space */
16930 struct fc_vft_header *fc_vft_hdr;
16931 uint32_t *header = (uint32_t *) fc_hdr;
16933 switch (fc_hdr->fh_r_ctl) {
16934 case FC_RCTL_DD_UNCAT: /* uncategorized information */
16935 case FC_RCTL_DD_SOL_DATA: /* solicited data */
16936 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
16937 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
16938 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
16939 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
16940 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
16941 case FC_RCTL_DD_CMD_STATUS: /* command status */
16942 case FC_RCTL_ELS_REQ: /* extended link services request */
16943 case FC_RCTL_ELS_REP: /* extended link services reply */
16944 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
16945 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
16946 case FC_RCTL_BA_NOP: /* basic link service NOP */
16947 case FC_RCTL_BA_ABTS: /* basic link service abort */
16948 case FC_RCTL_BA_RMC: /* remove connection */
16949 case FC_RCTL_BA_ACC: /* basic accept */
16950 case FC_RCTL_BA_RJT: /* basic reject */
16951 case FC_RCTL_BA_PRMT:
16952 case FC_RCTL_ACK_1: /* acknowledge_1 */
16953 case FC_RCTL_ACK_0: /* acknowledge_0 */
16954 case FC_RCTL_P_RJT: /* port reject */
16955 case FC_RCTL_F_RJT: /* fabric reject */
16956 case FC_RCTL_P_BSY: /* port busy */
16957 case FC_RCTL_F_BSY: /* fabric busy to data frame */
16958 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
16959 case FC_RCTL_LCR: /* link credit reset */
16960 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
16961 case FC_RCTL_END: /* end */
16963 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
16964 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16965 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
16966 return lpfc_fc_frame_check(phba, fc_hdr);
16971 switch (fc_hdr->fh_type) {
16984 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
16985 "2538 Received frame rctl:x%x, type:x%x, "
16986 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
16987 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
16988 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
16989 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
16990 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
16991 be32_to_cpu(header[6]));
16994 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
16995 "2539 Dropped frame rctl:x%x type:x%x\n",
16996 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17001 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17002 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17004 * This function processes the FC header to retrieve the VFI from the VF
17005 * header, if one exists. This function will return the VFI if one exists
17006 * or 0 if no VSAN Header exists.
17009 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17011 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17013 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17015 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17019 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17020 * @phba: Pointer to the HBA structure to search for the vport on
17021 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17022 * @fcfi: The FC Fabric ID that the frame came from
17024 * This function searches the @phba for a vport that matches the content of the
17025 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17026 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17027 * returns the matching vport pointer or NULL if unable to match frame to a
17030 static struct lpfc_vport *
17031 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17032 uint16_t fcfi, uint32_t did)
17034 struct lpfc_vport **vports;
17035 struct lpfc_vport *vport = NULL;
17038 if (did == Fabric_DID)
17039 return phba->pport;
17040 if ((phba->pport->fc_flag & FC_PT2PT) &&
17041 !(phba->link_state == LPFC_HBA_READY))
17042 return phba->pport;
17044 vports = lpfc_create_vport_work_array(phba);
17045 if (vports != NULL) {
17046 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17047 if (phba->fcf.fcfi == fcfi &&
17048 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17049 vports[i]->fc_myDID == did) {
17055 lpfc_destroy_vport_work_array(phba, vports);
17060 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17061 * @vport: The vport to work on.
17063 * This function updates the receive sequence time stamp for this vport. The
17064 * receive sequence time stamp indicates the time that the last frame of the
17065 * the sequence that has been idle for the longest amount of time was received.
17066 * the driver uses this time stamp to indicate if any received sequences have
17070 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17072 struct lpfc_dmabuf *h_buf;
17073 struct hbq_dmabuf *dmabuf = NULL;
17075 /* get the oldest sequence on the rcv list */
17076 h_buf = list_get_first(&vport->rcv_buffer_list,
17077 struct lpfc_dmabuf, list);
17080 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17081 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17085 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17086 * @vport: The vport that the received sequences were sent to.
17088 * This function cleans up all outstanding received sequences. This is called
17089 * by the driver when a link event or user action invalidates all the received
17093 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17095 struct lpfc_dmabuf *h_buf, *hnext;
17096 struct lpfc_dmabuf *d_buf, *dnext;
17097 struct hbq_dmabuf *dmabuf = NULL;
17099 /* start with the oldest sequence on the rcv list */
17100 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17101 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17102 list_del_init(&dmabuf->hbuf.list);
17103 list_for_each_entry_safe(d_buf, dnext,
17104 &dmabuf->dbuf.list, list) {
17105 list_del_init(&d_buf->list);
17106 lpfc_in_buf_free(vport->phba, d_buf);
17108 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17113 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17114 * @vport: The vport that the received sequences were sent to.
17116 * This function determines whether any received sequences have timed out by
17117 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17118 * indicates that there is at least one timed out sequence this routine will
17119 * go through the received sequences one at a time from most inactive to most
17120 * active to determine which ones need to be cleaned up. Once it has determined
17121 * that a sequence needs to be cleaned up it will simply free up the resources
17122 * without sending an abort.
17125 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17127 struct lpfc_dmabuf *h_buf, *hnext;
17128 struct lpfc_dmabuf *d_buf, *dnext;
17129 struct hbq_dmabuf *dmabuf = NULL;
17130 unsigned long timeout;
17131 int abort_count = 0;
17133 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17134 vport->rcv_buffer_time_stamp);
17135 if (list_empty(&vport->rcv_buffer_list) ||
17136 time_before(jiffies, timeout))
17138 /* start with the oldest sequence on the rcv list */
17139 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17140 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17141 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17142 dmabuf->time_stamp);
17143 if (time_before(jiffies, timeout))
17146 list_del_init(&dmabuf->hbuf.list);
17147 list_for_each_entry_safe(d_buf, dnext,
17148 &dmabuf->dbuf.list, list) {
17149 list_del_init(&d_buf->list);
17150 lpfc_in_buf_free(vport->phba, d_buf);
17152 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17155 lpfc_update_rcv_time_stamp(vport);
17159 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17160 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17162 * This function searches through the existing incomplete sequences that have
17163 * been sent to this @vport. If the frame matches one of the incomplete
17164 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17165 * make up that sequence. If no sequence is found that matches this frame then
17166 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17167 * This function returns a pointer to the first dmabuf in the sequence list that
17168 * the frame was linked to.
17170 static struct hbq_dmabuf *
17171 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17173 struct fc_frame_header *new_hdr;
17174 struct fc_frame_header *temp_hdr;
17175 struct lpfc_dmabuf *d_buf;
17176 struct lpfc_dmabuf *h_buf;
17177 struct hbq_dmabuf *seq_dmabuf = NULL;
17178 struct hbq_dmabuf *temp_dmabuf = NULL;
17181 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17182 dmabuf->time_stamp = jiffies;
17183 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17185 /* Use the hdr_buf to find the sequence that this frame belongs to */
17186 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17187 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17188 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17189 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17190 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17192 /* found a pending sequence that matches this frame */
17193 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17198 * This indicates first frame received for this sequence.
17199 * Queue the buffer on the vport's rcv_buffer_list.
17201 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17202 lpfc_update_rcv_time_stamp(vport);
17205 temp_hdr = seq_dmabuf->hbuf.virt;
17206 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17207 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17208 list_del_init(&seq_dmabuf->hbuf.list);
17209 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17210 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17211 lpfc_update_rcv_time_stamp(vport);
17214 /* move this sequence to the tail to indicate a young sequence */
17215 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17216 seq_dmabuf->time_stamp = jiffies;
17217 lpfc_update_rcv_time_stamp(vport);
17218 if (list_empty(&seq_dmabuf->dbuf.list)) {
17219 temp_hdr = dmabuf->hbuf.virt;
17220 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17223 /* find the correct place in the sequence to insert this frame */
17224 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17226 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17227 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17229 * If the frame's sequence count is greater than the frame on
17230 * the list then insert the frame right after this frame
17232 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17233 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17234 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
17239 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17241 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
17250 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17251 * @vport: pointer to a vitural port
17252 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17254 * This function tries to abort from the partially assembed sequence, described
17255 * by the information from basic abbort @dmabuf. It checks to see whether such
17256 * partially assembled sequence held by the driver. If so, it shall free up all
17257 * the frames from the partially assembled sequence.
17260 * true -- if there is matching partially assembled sequence present and all
17261 * the frames freed with the sequence;
17262 * false -- if there is no matching partially assembled sequence present so
17263 * nothing got aborted in the lower layer driver
17266 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17267 struct hbq_dmabuf *dmabuf)
17269 struct fc_frame_header *new_hdr;
17270 struct fc_frame_header *temp_hdr;
17271 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17272 struct hbq_dmabuf *seq_dmabuf = NULL;
17274 /* Use the hdr_buf to find the sequence that matches this frame */
17275 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17276 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17277 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17278 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17279 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17280 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17281 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17282 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17284 /* found a pending sequence that matches this frame */
17285 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17289 /* Free up all the frames from the partially assembled sequence */
17291 list_for_each_entry_safe(d_buf, n_buf,
17292 &seq_dmabuf->dbuf.list, list) {
17293 list_del_init(&d_buf->list);
17294 lpfc_in_buf_free(vport->phba, d_buf);
17302 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17303 * @vport: pointer to a vitural port
17304 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17306 * This function tries to abort from the assembed sequence from upper level
17307 * protocol, described by the information from basic abbort @dmabuf. It
17308 * checks to see whether such pending context exists at upper level protocol.
17309 * If so, it shall clean up the pending context.
17312 * true -- if there is matching pending context of the sequence cleaned
17314 * false -- if there is no matching pending context of the sequence present
17318 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17320 struct lpfc_hba *phba = vport->phba;
17323 /* Accepting abort at ulp with SLI4 only */
17324 if (phba->sli_rev < LPFC_SLI_REV4)
17327 /* Register all caring upper level protocols to attend abort */
17328 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17336 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
17337 * @phba: Pointer to HBA context object.
17338 * @cmd_iocbq: pointer to the command iocbq structure.
17339 * @rsp_iocbq: pointer to the response iocbq structure.
17341 * This function handles the sequence abort response iocb command complete
17342 * event. It properly releases the memory allocated to the sequence abort
17346 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
17347 struct lpfc_iocbq *cmd_iocbq,
17348 struct lpfc_iocbq *rsp_iocbq)
17350 struct lpfc_nodelist *ndlp;
17353 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17354 lpfc_nlp_put(ndlp);
17355 lpfc_nlp_not_used(ndlp);
17356 lpfc_sli_release_iocbq(phba, cmd_iocbq);
17359 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17360 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17361 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17362 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17363 rsp_iocbq->iocb.ulpStatus,
17364 rsp_iocbq->iocb.un.ulpWord[4]);
17368 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17369 * @phba: Pointer to HBA context object.
17370 * @xri: xri id in transaction.
17372 * This function validates the xri maps to the known range of XRIs allocated an
17373 * used by the driver.
17376 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17381 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17382 if (xri == phba->sli4_hba.xri_ids[i])
17389 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
17390 * @phba: Pointer to HBA context object.
17391 * @fc_hdr: pointer to a FC frame header.
17393 * This function sends a basic response to a previous unsol sequence abort
17394 * event after aborting the sequence handling.
17397 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17398 struct fc_frame_header *fc_hdr, bool aborted)
17400 struct lpfc_hba *phba = vport->phba;
17401 struct lpfc_iocbq *ctiocb = NULL;
17402 struct lpfc_nodelist *ndlp;
17403 uint16_t oxid, rxid, xri, lxri;
17404 uint32_t sid, fctl;
17408 if (!lpfc_is_link_up(phba))
17411 sid = sli4_sid_from_fc_hdr(fc_hdr);
17412 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17413 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17415 ndlp = lpfc_findnode_did(vport, sid);
17417 ndlp = lpfc_nlp_init(vport, sid);
17419 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17420 "1268 Failed to allocate ndlp for "
17421 "oxid:x%x SID:x%x\n", oxid, sid);
17424 /* Put ndlp onto pport node list */
17425 lpfc_enqueue_node(vport, ndlp);
17426 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17427 /* re-setup ndlp without removing from node list */
17428 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17430 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17431 "3275 Failed to active ndlp found "
17432 "for oxid:x%x SID:x%x\n", oxid, sid);
17437 /* Allocate buffer for rsp iocb */
17438 ctiocb = lpfc_sli_get_iocbq(phba);
17442 /* Extract the F_CTL field from FC_HDR */
17443 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17445 icmd = &ctiocb->iocb;
17446 icmd->un.xseq64.bdl.bdeSize = 0;
17447 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17448 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17449 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17450 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17452 /* Fill in the rest of iocb fields */
17453 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17454 icmd->ulpBdeCount = 0;
17456 icmd->ulpClass = CLASS3;
17457 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17458 ctiocb->context1 = lpfc_nlp_get(ndlp);
17460 ctiocb->vport = phba->pport;
17461 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17462 ctiocb->sli4_lxritag = NO_XRI;
17463 ctiocb->sli4_xritag = NO_XRI;
17465 if (fctl & FC_FC_EX_CTX)
17466 /* Exchange responder sent the abort so we
17472 lxri = lpfc_sli4_xri_inrange(phba, xri);
17473 if (lxri != NO_XRI)
17474 lpfc_set_rrq_active(phba, ndlp, lxri,
17475 (xri == oxid) ? rxid : oxid, 0);
17476 /* For BA_ABTS from exchange responder, if the logical xri with
17477 * the oxid maps to the FCP XRI range, the port no longer has
17478 * that exchange context, send a BLS_RJT. Override the IOCB for
17481 if ((fctl & FC_FC_EX_CTX) &&
17482 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17483 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17484 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17485 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17486 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17489 /* If BA_ABTS failed to abort a partially assembled receive sequence,
17490 * the driver no longer has that exchange, send a BLS_RJT. Override
17491 * the IOCB for a BA_RJT.
17493 if (aborted == false) {
17494 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17495 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17496 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17497 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17500 if (fctl & FC_FC_EX_CTX) {
17501 /* ABTS sent by responder to CT exchange, construction
17502 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17503 * field and RX_ID from ABTS for RX_ID field.
17505 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
17507 /* ABTS sent by initiator to CT exchange, construction
17508 * of BA_ACC will need to allocate a new XRI as for the
17511 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
17513 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
17514 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
17516 /* Xmit CT abts response on exchange <xid> */
17517 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17518 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17519 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
17521 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17522 if (rc == IOCB_ERROR) {
17523 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17524 "2925 Failed to issue CT ABTS RSP x%x on "
17525 "xri x%x, Data x%x\n",
17526 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17528 lpfc_nlp_put(ndlp);
17529 ctiocb->context1 = NULL;
17530 lpfc_sli_release_iocbq(phba, ctiocb);
17535 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17536 * @vport: Pointer to the vport on which this sequence was received
17537 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17539 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17540 * receive sequence is only partially assembed by the driver, it shall abort
17541 * the partially assembled frames for the sequence. Otherwise, if the
17542 * unsolicited receive sequence has been completely assembled and passed to
17543 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
17544 * unsolicited sequence has been aborted. After that, it will issue a basic
17545 * accept to accept the abort.
17548 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17549 struct hbq_dmabuf *dmabuf)
17551 struct lpfc_hba *phba = vport->phba;
17552 struct fc_frame_header fc_hdr;
17556 /* Make a copy of fc_hdr before the dmabuf being released */
17557 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
17558 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
17560 if (fctl & FC_FC_EX_CTX) {
17561 /* ABTS by responder to exchange, no cleanup needed */
17564 /* ABTS by initiator to exchange, need to do cleanup */
17565 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17566 if (aborted == false)
17567 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
17569 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17571 if (phba->nvmet_support) {
17572 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17576 /* Respond with BA_ACC or BA_RJT accordingly */
17577 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
17581 * lpfc_seq_complete - Indicates if a sequence is complete
17582 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17584 * This function checks the sequence, starting with the frame described by
17585 * @dmabuf, to see if all the frames associated with this sequence are present.
17586 * the frames associated with this sequence are linked to the @dmabuf using the
17587 * dbuf list. This function looks for two major things. 1) That the first frame
17588 * has a sequence count of zero. 2) There is a frame with last frame of sequence
17589 * set. 3) That there are no holes in the sequence count. The function will
17590 * return 1 when the sequence is complete, otherwise it will return 0.
17593 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17595 struct fc_frame_header *hdr;
17596 struct lpfc_dmabuf *d_buf;
17597 struct hbq_dmabuf *seq_dmabuf;
17601 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17602 /* make sure first fame of sequence has a sequence count of zero */
17603 if (hdr->fh_seq_cnt != seq_count)
17605 fctl = (hdr->fh_f_ctl[0] << 16 |
17606 hdr->fh_f_ctl[1] << 8 |
17608 /* If last frame of sequence we can return success. */
17609 if (fctl & FC_FC_END_SEQ)
17611 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17612 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17613 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17614 /* If there is a hole in the sequence count then fail. */
17615 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
17617 fctl = (hdr->fh_f_ctl[0] << 16 |
17618 hdr->fh_f_ctl[1] << 8 |
17620 /* If last frame of sequence we can return success. */
17621 if (fctl & FC_FC_END_SEQ)
17628 * lpfc_prep_seq - Prep sequence for ULP processing
17629 * @vport: Pointer to the vport on which this sequence was received
17630 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17632 * This function takes a sequence, described by a list of frames, and creates
17633 * a list of iocbq structures to describe the sequence. This iocbq list will be
17634 * used to issue to the generic unsolicited sequence handler. This routine
17635 * returns a pointer to the first iocbq in the list. If the function is unable
17636 * to allocate an iocbq then it throw out the received frames that were not
17637 * able to be described and return a pointer to the first iocbq. If unable to
17638 * allocate any iocbqs (including the first) this function will return NULL.
17640 static struct lpfc_iocbq *
17641 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17643 struct hbq_dmabuf *hbq_buf;
17644 struct lpfc_dmabuf *d_buf, *n_buf;
17645 struct lpfc_iocbq *first_iocbq, *iocbq;
17646 struct fc_frame_header *fc_hdr;
17648 uint32_t len, tot_len;
17649 struct ulp_bde64 *pbde;
17651 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17652 /* remove from receive buffer list */
17653 list_del_init(&seq_dmabuf->hbuf.list);
17654 lpfc_update_rcv_time_stamp(vport);
17655 /* get the Remote Port's SID */
17656 sid = sli4_sid_from_fc_hdr(fc_hdr);
17658 /* Get an iocbq struct to fill in. */
17659 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17661 /* Initialize the first IOCB. */
17662 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
17663 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
17664 first_iocbq->vport = vport;
17666 /* Check FC Header to see what TYPE of frame we are rcv'ing */
17667 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17668 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17669 first_iocbq->iocb.un.rcvels.parmRo =
17670 sli4_did_from_fc_hdr(fc_hdr);
17671 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17673 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
17674 first_iocbq->iocb.ulpContext = NO_XRI;
17675 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17676 be16_to_cpu(fc_hdr->fh_ox_id);
17677 /* iocbq is prepped for internal consumption. Physical vpi. */
17678 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17679 vport->phba->vpi_ids[vport->vpi];
17680 /* put the first buffer into the first IOCBq */
17681 tot_len = bf_get(lpfc_rcqe_length,
17682 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17684 first_iocbq->context2 = &seq_dmabuf->dbuf;
17685 first_iocbq->context3 = NULL;
17686 first_iocbq->iocb.ulpBdeCount = 1;
17687 if (tot_len > LPFC_DATA_BUF_SIZE)
17688 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17689 LPFC_DATA_BUF_SIZE;
17691 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17693 first_iocbq->iocb.un.rcvels.remoteID = sid;
17695 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17697 iocbq = first_iocbq;
17699 * Each IOCBq can have two Buffers assigned, so go through the list
17700 * of buffers for this sequence and save two buffers in each IOCBq
17702 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17704 lpfc_in_buf_free(vport->phba, d_buf);
17707 if (!iocbq->context3) {
17708 iocbq->context3 = d_buf;
17709 iocbq->iocb.ulpBdeCount++;
17710 /* We need to get the size out of the right CQE */
17711 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17712 len = bf_get(lpfc_rcqe_length,
17713 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17714 pbde = (struct ulp_bde64 *)
17715 &iocbq->iocb.unsli3.sli3Words[4];
17716 if (len > LPFC_DATA_BUF_SIZE)
17717 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17719 pbde->tus.f.bdeSize = len;
17721 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17724 iocbq = lpfc_sli_get_iocbq(vport->phba);
17727 first_iocbq->iocb.ulpStatus =
17728 IOSTAT_FCP_RSP_ERROR;
17729 first_iocbq->iocb.un.ulpWord[4] =
17730 IOERR_NO_RESOURCES;
17732 lpfc_in_buf_free(vport->phba, d_buf);
17735 /* We need to get the size out of the right CQE */
17736 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17737 len = bf_get(lpfc_rcqe_length,
17738 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17739 iocbq->context2 = d_buf;
17740 iocbq->context3 = NULL;
17741 iocbq->iocb.ulpBdeCount = 1;
17742 if (len > LPFC_DATA_BUF_SIZE)
17743 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17744 LPFC_DATA_BUF_SIZE;
17746 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
17749 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17751 iocbq->iocb.un.rcvels.remoteID = sid;
17752 list_add_tail(&iocbq->list, &first_iocbq->list);
17755 return first_iocbq;
17759 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17760 struct hbq_dmabuf *seq_dmabuf)
17762 struct fc_frame_header *fc_hdr;
17763 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17764 struct lpfc_hba *phba = vport->phba;
17766 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17767 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17769 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17770 "2707 Ring %d handler: Failed to allocate "
17771 "iocb Rctl x%x Type x%x received\n",
17773 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17776 if (!lpfc_complete_unsol_iocb(phba,
17777 phba->sli4_hba.els_wq->pring,
17778 iocbq, fc_hdr->fh_r_ctl,
17780 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17781 "2540 Ring %d handler: unexpected Rctl "
17782 "x%x Type x%x received\n",
17784 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17786 /* Free iocb created in lpfc_prep_seq */
17787 list_for_each_entry_safe(curr_iocb, next_iocb,
17788 &iocbq->list, list) {
17789 list_del_init(&curr_iocb->list);
17790 lpfc_sli_release_iocbq(phba, curr_iocb);
17792 lpfc_sli_release_iocbq(phba, iocbq);
17796 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17797 struct lpfc_iocbq *rspiocb)
17799 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17801 if (pcmd && pcmd->virt)
17802 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17804 lpfc_sli_release_iocbq(phba, cmdiocb);
17805 lpfc_drain_txq(phba);
17809 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17810 struct hbq_dmabuf *dmabuf)
17812 struct fc_frame_header *fc_hdr;
17813 struct lpfc_hba *phba = vport->phba;
17814 struct lpfc_iocbq *iocbq = NULL;
17815 union lpfc_wqe *wqe;
17816 struct lpfc_dmabuf *pcmd = NULL;
17817 uint32_t frame_len;
17819 unsigned long iflags;
17821 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17822 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17824 /* Send the received frame back */
17825 iocbq = lpfc_sli_get_iocbq(phba);
17827 /* Queue cq event and wakeup worker thread to process it */
17828 spin_lock_irqsave(&phba->hbalock, iflags);
17829 list_add_tail(&dmabuf->cq_event.list,
17830 &phba->sli4_hba.sp_queue_event);
17831 phba->hba_flag |= HBA_SP_QUEUE_EVT;
17832 spin_unlock_irqrestore(&phba->hbalock, iflags);
17833 lpfc_worker_wake_up(phba);
17837 /* Allocate buffer for command payload */
17838 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17840 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
17842 if (!pcmd || !pcmd->virt)
17845 INIT_LIST_HEAD(&pcmd->list);
17847 /* copyin the payload */
17848 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17850 /* fill in BDE's for command */
17851 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17852 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17853 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17854 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17856 iocbq->context2 = pcmd;
17857 iocbq->vport = vport;
17858 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17859 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17862 * Setup rest of the iocb as though it were a WQE
17863 * Build the SEND_FRAME WQE
17865 wqe = (union lpfc_wqe *)&iocbq->iocb;
17867 wqe->send_frame.frame_len = frame_len;
17868 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17869 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17870 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17871 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17872 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17873 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17875 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17876 iocbq->iocb.ulpLe = 1;
17877 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17878 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17879 if (rc == IOCB_ERROR)
17882 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17886 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17887 "2023 Unable to process MDS loopback frame\n");
17888 if (pcmd && pcmd->virt)
17889 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17892 lpfc_sli_release_iocbq(phba, iocbq);
17893 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17897 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
17898 * @phba: Pointer to HBA context object.
17900 * This function is called with no lock held. This function processes all
17901 * the received buffers and gives it to upper layers when a received buffer
17902 * indicates that it is the final frame in the sequence. The interrupt
17903 * service routine processes received buffers at interrupt contexts.
17904 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
17905 * appropriate receive function when the final frame in a sequence is received.
17908 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
17909 struct hbq_dmabuf *dmabuf)
17911 struct hbq_dmabuf *seq_dmabuf;
17912 struct fc_frame_header *fc_hdr;
17913 struct lpfc_vport *vport;
17917 /* Process each received buffer */
17918 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17920 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
17921 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
17922 vport = phba->pport;
17923 /* Handle MDS Loopback frames */
17924 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17928 /* check to see if this a valid type of frame */
17929 if (lpfc_fc_frame_check(phba, fc_hdr)) {
17930 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17934 if ((bf_get(lpfc_cqe_code,
17935 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
17936 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
17937 &dmabuf->cq_event.cqe.rcqe_cmpl);
17939 fcfi = bf_get(lpfc_rcqe_fcf_id,
17940 &dmabuf->cq_event.cqe.rcqe_cmpl);
17942 /* d_id this frame is directed to */
17943 did = sli4_did_from_fc_hdr(fc_hdr);
17945 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
17947 /* throw out the frame */
17948 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17952 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
17953 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
17954 (did != Fabric_DID)) {
17956 * Throw out the frame if we are not pt2pt.
17957 * The pt2pt protocol allows for discovery frames
17958 * to be received without a registered VPI.
17960 if (!(vport->fc_flag & FC_PT2PT) ||
17961 (phba->link_state == LPFC_HBA_READY)) {
17962 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17967 /* Handle the basic abort sequence (BA_ABTS) event */
17968 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
17969 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
17973 /* Link this frame */
17974 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
17976 /* unable to add frame to vport - throw it out */
17977 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17980 /* If not last frame in sequence continue processing frames. */
17981 if (!lpfc_seq_complete(seq_dmabuf))
17984 /* Send the complete sequence to the upper layer protocol */
17985 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
17989 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
17990 * @phba: pointer to lpfc hba data structure.
17992 * This routine is invoked to post rpi header templates to the
17993 * HBA consistent with the SLI-4 interface spec. This routine
17994 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17995 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17997 * This routine does not require any locks. It's usage is expected
17998 * to be driver load or reset recovery when the driver is
18003 * -EIO - The mailbox failed to complete successfully.
18004 * When this error occurs, the driver is not guaranteed
18005 * to have any rpi regions posted to the device and
18006 * must either attempt to repost the regions or take a
18010 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18012 struct lpfc_rpi_hdr *rpi_page;
18016 /* SLI4 ports that support extents do not require RPI headers. */
18017 if (!phba->sli4_hba.rpi_hdrs_in_use)
18019 if (phba->sli4_hba.extents_in_use)
18022 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18024 * Assign the rpi headers a physical rpi only if the driver
18025 * has not initialized those resources. A port reset only
18026 * needs the headers posted.
18028 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18030 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18032 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18033 if (rc != MBX_SUCCESS) {
18034 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18035 "2008 Error %d posting all rpi "
18043 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18044 LPFC_RPI_RSRC_RDY);
18049 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18050 * @phba: pointer to lpfc hba data structure.
18051 * @rpi_page: pointer to the rpi memory region.
18053 * This routine is invoked to post a single rpi header to the
18054 * HBA consistent with the SLI-4 interface spec. This memory region
18055 * maps up to 64 rpi context regions.
18059 * -ENOMEM - No available memory
18060 * -EIO - The mailbox failed to complete successfully.
18063 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18065 LPFC_MBOXQ_t *mboxq;
18066 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18068 uint32_t shdr_status, shdr_add_status;
18069 union lpfc_sli4_cfg_shdr *shdr;
18071 /* SLI4 ports that support extents do not require RPI headers. */
18072 if (!phba->sli4_hba.rpi_hdrs_in_use)
18074 if (phba->sli4_hba.extents_in_use)
18077 /* The port is notified of the header region via a mailbox command. */
18078 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18080 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18081 "2001 Unable to allocate memory for issuing "
18082 "SLI_CONFIG_SPECIAL mailbox command\n");
18086 /* Post all rpi memory regions to the port. */
18087 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18088 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18089 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18090 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18091 sizeof(struct lpfc_sli4_cfg_mhdr),
18092 LPFC_SLI4_MBX_EMBED);
18095 /* Post the physical rpi to the port for this rpi header. */
18096 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18097 rpi_page->start_rpi);
18098 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18099 hdr_tmpl, rpi_page->page_count);
18101 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18102 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18103 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18104 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18105 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18106 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18107 if (rc != MBX_TIMEOUT)
18108 mempool_free(mboxq, phba->mbox_mem_pool);
18109 if (shdr_status || shdr_add_status || rc) {
18110 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18111 "2514 POST_RPI_HDR mailbox failed with "
18112 "status x%x add_status x%x, mbx status x%x\n",
18113 shdr_status, shdr_add_status, rc);
18117 * The next_rpi stores the next logical module-64 rpi value used
18118 * to post physical rpis in subsequent rpi postings.
18120 spin_lock_irq(&phba->hbalock);
18121 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18122 spin_unlock_irq(&phba->hbalock);
18128 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18129 * @phba: pointer to lpfc hba data structure.
18131 * This routine is invoked to post rpi header templates to the
18132 * HBA consistent with the SLI-4 interface spec. This routine
18133 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18134 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18137 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
18138 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18141 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18144 uint16_t max_rpi, rpi_limit;
18145 uint16_t rpi_remaining, lrpi = 0;
18146 struct lpfc_rpi_hdr *rpi_hdr;
18147 unsigned long iflag;
18150 * Fetch the next logical rpi. Because this index is logical,
18151 * the driver starts at 0 each time.
18153 spin_lock_irqsave(&phba->hbalock, iflag);
18154 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18155 rpi_limit = phba->sli4_hba.next_rpi;
18157 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18158 if (rpi >= rpi_limit)
18159 rpi = LPFC_RPI_ALLOC_ERROR;
18161 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18162 phba->sli4_hba.max_cfg_param.rpi_used++;
18163 phba->sli4_hba.rpi_count++;
18165 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18166 "0001 rpi:%x max:%x lim:%x\n",
18167 (int) rpi, max_rpi, rpi_limit);
18170 * Don't try to allocate more rpi header regions if the device limit
18171 * has been exhausted.
18173 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18174 (phba->sli4_hba.rpi_count >= max_rpi)) {
18175 spin_unlock_irqrestore(&phba->hbalock, iflag);
18180 * RPI header postings are not required for SLI4 ports capable of
18183 if (!phba->sli4_hba.rpi_hdrs_in_use) {
18184 spin_unlock_irqrestore(&phba->hbalock, iflag);
18189 * If the driver is running low on rpi resources, allocate another
18190 * page now. Note that the next_rpi value is used because
18191 * it represents how many are actually in use whereas max_rpi notes
18192 * how many are supported max by the device.
18194 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18195 spin_unlock_irqrestore(&phba->hbalock, iflag);
18196 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18197 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18199 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18200 "2002 Error Could not grow rpi "
18203 lrpi = rpi_hdr->start_rpi;
18204 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18205 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18213 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18214 * @phba: pointer to lpfc hba data structure.
18216 * This routine is invoked to release an rpi to the pool of
18217 * available rpis maintained by the driver.
18220 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18222 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18223 phba->sli4_hba.rpi_count--;
18224 phba->sli4_hba.max_cfg_param.rpi_used--;
18229 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18230 * @phba: pointer to lpfc hba data structure.
18232 * This routine is invoked to release an rpi to the pool of
18233 * available rpis maintained by the driver.
18236 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18238 spin_lock_irq(&phba->hbalock);
18239 __lpfc_sli4_free_rpi(phba, rpi);
18240 spin_unlock_irq(&phba->hbalock);
18244 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18245 * @phba: pointer to lpfc hba data structure.
18247 * This routine is invoked to remove the memory region that
18248 * provided rpi via a bitmask.
18251 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18253 kfree(phba->sli4_hba.rpi_bmask);
18254 kfree(phba->sli4_hba.rpi_ids);
18255 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
18259 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18260 * @phba: pointer to lpfc hba data structure.
18262 * This routine is invoked to remove the memory region that
18263 * provided rpi via a bitmask.
18266 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18267 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
18269 LPFC_MBOXQ_t *mboxq;
18270 struct lpfc_hba *phba = ndlp->phba;
18273 /* The port is notified of the header region via a mailbox command. */
18274 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18278 /* Post all rpi memory regions to the port. */
18279 lpfc_resume_rpi(mboxq, ndlp);
18281 mboxq->mbox_cmpl = cmpl;
18282 mboxq->ctx_buf = arg;
18283 mboxq->ctx_ndlp = ndlp;
18285 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18286 mboxq->vport = ndlp->vport;
18287 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18288 if (rc == MBX_NOT_FINISHED) {
18289 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18290 "2010 Resume RPI Mailbox failed "
18291 "status %d, mbxStatus x%x\n", rc,
18292 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18293 mempool_free(mboxq, phba->mbox_mem_pool);
18300 * lpfc_sli4_init_vpi - Initialize a vpi with the port
18301 * @vport: Pointer to the vport for which the vpi is being initialized
18303 * This routine is invoked to activate a vpi with the port.
18307 * -Evalue otherwise
18310 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
18312 LPFC_MBOXQ_t *mboxq;
18314 int retval = MBX_SUCCESS;
18316 struct lpfc_hba *phba = vport->phba;
18317 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18320 lpfc_init_vpi(phba, mboxq, vport->vpi);
18321 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
18322 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
18323 if (rc != MBX_SUCCESS) {
18324 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
18325 "2022 INIT VPI Mailbox failed "
18326 "status %d, mbxStatus x%x\n", rc,
18327 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18330 if (rc != MBX_TIMEOUT)
18331 mempool_free(mboxq, vport->phba->mbox_mem_pool);
18337 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18338 * @phba: pointer to lpfc hba data structure.
18339 * @mboxq: Pointer to mailbox object.
18341 * This routine is invoked to manually add a single FCF record. The caller
18342 * must pass a completely initialized FCF_Record. This routine takes
18343 * care of the nonembedded mailbox operations.
18346 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18349 union lpfc_sli4_cfg_shdr *shdr;
18350 uint32_t shdr_status, shdr_add_status;
18352 virt_addr = mboxq->sge_array->addr[0];
18353 /* The IOCTL status is embedded in the mailbox subheader. */
18354 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18355 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18356 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18358 if ((shdr_status || shdr_add_status) &&
18359 (shdr_status != STATUS_FCF_IN_USE))
18360 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18361 "2558 ADD_FCF_RECORD mailbox failed with "
18362 "status x%x add_status x%x\n",
18363 shdr_status, shdr_add_status);
18365 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18369 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18370 * @phba: pointer to lpfc hba data structure.
18371 * @fcf_record: pointer to the initialized fcf record to add.
18373 * This routine is invoked to manually add a single FCF record. The caller
18374 * must pass a completely initialized FCF_Record. This routine takes
18375 * care of the nonembedded mailbox operations.
18378 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18381 LPFC_MBOXQ_t *mboxq;
18384 struct lpfc_mbx_sge sge;
18385 uint32_t alloc_len, req_len;
18388 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18390 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18391 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18395 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18398 /* Allocate DMA memory and set up the non-embedded mailbox command */
18399 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18400 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18401 req_len, LPFC_SLI4_MBX_NEMBED);
18402 if (alloc_len < req_len) {
18403 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18404 "2523 Allocated DMA memory size (x%x) is "
18405 "less than the requested DMA memory "
18406 "size (x%x)\n", alloc_len, req_len);
18407 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18412 * Get the first SGE entry from the non-embedded DMA memory. This
18413 * routine only uses a single SGE.
18415 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18416 virt_addr = mboxq->sge_array->addr[0];
18418 * Configure the FCF record for FCFI 0. This is the driver's
18419 * hardcoded default and gets used in nonFIP mode.
18421 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18422 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18423 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18426 * Copy the fcf_index and the FCF Record Data. The data starts after
18427 * the FCoE header plus word10. The data copy needs to be endian
18430 bytep += sizeof(uint32_t);
18431 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18432 mboxq->vport = phba->pport;
18433 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18434 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18435 if (rc == MBX_NOT_FINISHED) {
18436 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18437 "2515 ADD_FCF_RECORD mailbox failed with "
18438 "status 0x%x\n", rc);
18439 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18448 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18449 * @phba: pointer to lpfc hba data structure.
18450 * @fcf_record: pointer to the fcf record to write the default data.
18451 * @fcf_index: FCF table entry index.
18453 * This routine is invoked to build the driver's default FCF record. The
18454 * values used are hardcoded. This routine handles memory initialization.
18458 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18459 struct fcf_record *fcf_record,
18460 uint16_t fcf_index)
18462 memset(fcf_record, 0, sizeof(struct fcf_record));
18463 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18464 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18465 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18466 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18467 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18468 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18469 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18470 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18471 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18472 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18473 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18474 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18475 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
18476 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
18477 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18478 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18479 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18480 /* Set the VLAN bit map */
18481 if (phba->valid_vlan) {
18482 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18483 = 1 << (phba->vlan_id % 8);
18488 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
18489 * @phba: pointer to lpfc hba data structure.
18490 * @fcf_index: FCF table entry offset.
18492 * This routine is invoked to scan the entire FCF table by reading FCF
18493 * record and processing it one at a time starting from the @fcf_index
18494 * for initial FCF discovery or fast FCF failover rediscovery.
18496 * Return 0 if the mailbox command is submitted successfully, none 0
18500 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18503 LPFC_MBOXQ_t *mboxq;
18505 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
18506 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
18507 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18509 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18510 "2000 Failed to allocate mbox for "
18513 goto fail_fcf_scan;
18515 /* Construct the read FCF record mailbox command */
18516 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18519 goto fail_fcf_scan;
18521 /* Issue the mailbox command asynchronously */
18522 mboxq->vport = phba->pport;
18523 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
18525 spin_lock_irq(&phba->hbalock);
18526 phba->hba_flag |= FCF_TS_INPROG;
18527 spin_unlock_irq(&phba->hbalock);
18529 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18530 if (rc == MBX_NOT_FINISHED)
18533 /* Reset eligible FCF count for new scan */
18534 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
18535 phba->fcf.eligible_fcf_cnt = 0;
18541 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18542 /* FCF scan failed, clear FCF_TS_INPROG flag */
18543 spin_lock_irq(&phba->hbalock);
18544 phba->hba_flag &= ~FCF_TS_INPROG;
18545 spin_unlock_irq(&phba->hbalock);
18551 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
18552 * @phba: pointer to lpfc hba data structure.
18553 * @fcf_index: FCF table entry offset.
18555 * This routine is invoked to read an FCF record indicated by @fcf_index
18556 * and to use it for FLOGI roundrobin FCF failover.
18558 * Return 0 if the mailbox command is submitted successfully, none 0
18562 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18565 LPFC_MBOXQ_t *mboxq;
18567 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18569 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18570 "2763 Failed to allocate mbox for "
18573 goto fail_fcf_read;
18575 /* Construct the read FCF record mailbox command */
18576 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18579 goto fail_fcf_read;
18581 /* Issue the mailbox command asynchronously */
18582 mboxq->vport = phba->pport;
18583 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18584 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18585 if (rc == MBX_NOT_FINISHED)
18591 if (error && mboxq)
18592 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18597 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
18598 * @phba: pointer to lpfc hba data structure.
18599 * @fcf_index: FCF table entry offset.
18601 * This routine is invoked to read an FCF record indicated by @fcf_index to
18602 * determine whether it's eligible for FLOGI roundrobin failover list.
18604 * Return 0 if the mailbox command is submitted successfully, none 0
18608 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18611 LPFC_MBOXQ_t *mboxq;
18613 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18615 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18616 "2758 Failed to allocate mbox for "
18619 goto fail_fcf_read;
18621 /* Construct the read FCF record mailbox command */
18622 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18625 goto fail_fcf_read;
18627 /* Issue the mailbox command asynchronously */
18628 mboxq->vport = phba->pport;
18629 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18630 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18631 if (rc == MBX_NOT_FINISHED)
18637 if (error && mboxq)
18638 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18643 * lpfc_check_next_fcf_pri_level
18644 * phba pointer to the lpfc_hba struct for this port.
18645 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
18646 * routine when the rr_bmask is empty. The FCF indecies are put into the
18647 * rr_bmask based on their priority level. Starting from the highest priority
18648 * to the lowest. The most likely FCF candidate will be in the highest
18649 * priority group. When this routine is called it searches the fcf_pri list for
18650 * next lowest priority group and repopulates the rr_bmask with only those
18653 * 1=success 0=failure
18656 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18658 uint16_t next_fcf_pri;
18659 uint16_t last_index;
18660 struct lpfc_fcf_pri *fcf_pri;
18664 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18665 LPFC_SLI4_FCF_TBL_INDX_MAX);
18666 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18667 "3060 Last IDX %d\n", last_index);
18669 /* Verify the priority list has 2 or more entries */
18670 spin_lock_irq(&phba->hbalock);
18671 if (list_empty(&phba->fcf.fcf_pri_list) ||
18672 list_is_singular(&phba->fcf.fcf_pri_list)) {
18673 spin_unlock_irq(&phba->hbalock);
18674 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18675 "3061 Last IDX %d\n", last_index);
18676 return 0; /* Empty rr list */
18678 spin_unlock_irq(&phba->hbalock);
18682 * Clear the rr_bmask and set all of the bits that are at this
18685 memset(phba->fcf.fcf_rr_bmask, 0,
18686 sizeof(*phba->fcf.fcf_rr_bmask));
18687 spin_lock_irq(&phba->hbalock);
18688 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18689 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18692 * the 1st priority that has not FLOGI failed
18693 * will be the highest.
18696 next_fcf_pri = fcf_pri->fcf_rec.priority;
18697 spin_unlock_irq(&phba->hbalock);
18698 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18699 rc = lpfc_sli4_fcf_rr_index_set(phba,
18700 fcf_pri->fcf_rec.fcf_index);
18704 spin_lock_irq(&phba->hbalock);
18707 * if next_fcf_pri was not set above and the list is not empty then
18708 * we have failed flogis on all of them. So reset flogi failed
18709 * and start at the beginning.
18711 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18712 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18713 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18715 * the 1st priority that has not FLOGI failed
18716 * will be the highest.
18719 next_fcf_pri = fcf_pri->fcf_rec.priority;
18720 spin_unlock_irq(&phba->hbalock);
18721 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18722 rc = lpfc_sli4_fcf_rr_index_set(phba,
18723 fcf_pri->fcf_rec.fcf_index);
18727 spin_lock_irq(&phba->hbalock);
18731 spin_unlock_irq(&phba->hbalock);
18736 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
18737 * @phba: pointer to lpfc hba data structure.
18739 * This routine is to get the next eligible FCF record index in a round
18740 * robin fashion. If the next eligible FCF record index equals to the
18741 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
18742 * shall be returned, otherwise, the next eligible FCF record's index
18743 * shall be returned.
18746 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18748 uint16_t next_fcf_index;
18751 /* Search start from next bit of currently registered FCF index */
18752 next_fcf_index = phba->fcf.current_rec.fcf_indx;
18755 /* Determine the next fcf index to check */
18756 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
18757 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18758 LPFC_SLI4_FCF_TBL_INDX_MAX,
18761 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
18762 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18764 * If we have wrapped then we need to clear the bits that
18765 * have been tested so that we can detect when we should
18766 * change the priority level.
18768 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18769 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
18773 /* Check roundrobin failover list empty condition */
18774 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18775 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18777 * If next fcf index is not found check if there are lower
18778 * Priority level fcf's in the fcf_priority list.
18779 * Set up the rr_bmask with all of the avaiable fcf bits
18780 * at that level and continue the selection process.
18782 if (lpfc_check_next_fcf_pri_level(phba))
18783 goto initial_priority;
18784 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18785 "2844 No roundrobin failover FCF available\n");
18787 return LPFC_FCOE_FCF_NEXT_NONE;
18790 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18791 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
18792 LPFC_FCF_FLOGI_FAILED) {
18793 if (list_is_singular(&phba->fcf.fcf_pri_list))
18794 return LPFC_FCOE_FCF_NEXT_NONE;
18796 goto next_priority;
18799 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18800 "2845 Get next roundrobin failover FCF (x%x)\n",
18803 return next_fcf_index;
18807 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
18808 * @phba: pointer to lpfc hba data structure.
18810 * This routine sets the FCF record index in to the eligible bmask for
18811 * roundrobin failover search. It checks to make sure that the index
18812 * does not go beyond the range of the driver allocated bmask dimension
18813 * before setting the bit.
18815 * Returns 0 if the index bit successfully set, otherwise, it returns
18819 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18821 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18822 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18823 "2610 FCF (x%x) reached driver's book "
18824 "keeping dimension:x%x\n",
18825 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18828 /* Set the eligible FCF record index bmask */
18829 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18831 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18832 "2790 Set FCF (x%x) to roundrobin FCF failover "
18833 "bmask\n", fcf_index);
18839 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
18840 * @phba: pointer to lpfc hba data structure.
18842 * This routine clears the FCF record index from the eligible bmask for
18843 * roundrobin failover search. It checks to make sure that the index
18844 * does not go beyond the range of the driver allocated bmask dimension
18845 * before clearing the bit.
18848 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
18850 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
18851 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18852 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18853 "2762 FCF (x%x) reached driver's book "
18854 "keeping dimension:x%x\n",
18855 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18858 /* Clear the eligible FCF record index bmask */
18859 spin_lock_irq(&phba->hbalock);
18860 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
18862 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
18863 list_del_init(&fcf_pri->list);
18867 spin_unlock_irq(&phba->hbalock);
18868 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18870 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18871 "2791 Clear FCF (x%x) from roundrobin failover "
18872 "bmask\n", fcf_index);
18876 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
18877 * @phba: pointer to lpfc hba data structure.
18879 * This routine is the completion routine for the rediscover FCF table mailbox
18880 * command. If the mailbox command returned failure, it will try to stop the
18881 * FCF rediscover wait timer.
18884 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
18886 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18887 uint32_t shdr_status, shdr_add_status;
18889 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18891 shdr_status = bf_get(lpfc_mbox_hdr_status,
18892 &redisc_fcf->header.cfg_shdr.response);
18893 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
18894 &redisc_fcf->header.cfg_shdr.response);
18895 if (shdr_status || shdr_add_status) {
18896 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18897 "2746 Requesting for FCF rediscovery failed "
18898 "status x%x add_status x%x\n",
18899 shdr_status, shdr_add_status);
18900 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
18901 spin_lock_irq(&phba->hbalock);
18902 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
18903 spin_unlock_irq(&phba->hbalock);
18905 * CVL event triggered FCF rediscover request failed,
18906 * last resort to re-try current registered FCF entry.
18908 lpfc_retry_pport_discovery(phba);
18910 spin_lock_irq(&phba->hbalock);
18911 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
18912 spin_unlock_irq(&phba->hbalock);
18914 * DEAD FCF event triggered FCF rediscover request
18915 * failed, last resort to fail over as a link down
18916 * to FCF registration.
18918 lpfc_sli4_fcf_dead_failthrough(phba);
18921 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18922 "2775 Start FCF rediscover quiescent timer\n");
18924 * Start FCF rediscovery wait timer for pending FCF
18925 * before rescan FCF record table.
18927 lpfc_fcf_redisc_wait_start_timer(phba);
18930 mempool_free(mbox, phba->mbox_mem_pool);
18934 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
18935 * @phba: pointer to lpfc hba data structure.
18937 * This routine is invoked to request for rediscovery of the entire FCF table
18941 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
18943 LPFC_MBOXQ_t *mbox;
18944 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18947 /* Cancel retry delay timers to all vports before FCF rediscover */
18948 lpfc_cancel_all_vport_retry_delay_timer(phba);
18950 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18952 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18953 "2745 Failed to allocate mbox for "
18954 "requesting FCF rediscover.\n");
18958 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
18959 sizeof(struct lpfc_sli4_cfg_mhdr));
18960 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18961 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
18962 length, LPFC_SLI4_MBX_EMBED);
18964 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18965 /* Set count to 0 for invalidating the entire FCF database */
18966 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
18968 /* Issue the mailbox command asynchronously */
18969 mbox->vport = phba->pport;
18970 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
18971 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
18973 if (rc == MBX_NOT_FINISHED) {
18974 mempool_free(mbox, phba->mbox_mem_pool);
18981 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
18982 * @phba: pointer to lpfc hba data structure.
18984 * This function is the failover routine as a last resort to the FCF DEAD
18985 * event when driver failed to perform fast FCF failover.
18988 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
18990 uint32_t link_state;
18993 * Last resort as FCF DEAD event failover will treat this as
18994 * a link down, but save the link state because we don't want
18995 * it to be changed to Link Down unless it is already down.
18997 link_state = phba->link_state;
18998 lpfc_linkdown(phba);
18999 phba->link_state = link_state;
19001 /* Unregister FCF if no devices connected to it */
19002 lpfc_unregister_unused_fcf(phba);
19006 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
19007 * @phba: pointer to lpfc hba data structure.
19008 * @rgn23_data: pointer to configure region 23 data.
19010 * This function gets SLI3 port configure region 23 data through memory dump
19011 * mailbox command. When it successfully retrieves data, the size of the data
19012 * will be returned, otherwise, 0 will be returned.
19015 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19017 LPFC_MBOXQ_t *pmb = NULL;
19019 uint32_t offset = 0;
19025 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19027 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19028 "2600 failed to allocate mailbox memory\n");
19034 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19035 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19037 if (rc != MBX_SUCCESS) {
19038 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19039 "2601 failed to read config "
19040 "region 23, rc 0x%x Status 0x%x\n",
19041 rc, mb->mbxStatus);
19042 mb->un.varDmp.word_cnt = 0;
19045 * dump mem may return a zero when finished or we got a
19046 * mailbox error, either way we are done.
19048 if (mb->un.varDmp.word_cnt == 0)
19050 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19051 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19053 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19054 rgn23_data + offset,
19055 mb->un.varDmp.word_cnt);
19056 offset += mb->un.varDmp.word_cnt;
19057 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19059 mempool_free(pmb, phba->mbox_mem_pool);
19064 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19065 * @phba: pointer to lpfc hba data structure.
19066 * @rgn23_data: pointer to configure region 23 data.
19068 * This function gets SLI4 port configure region 23 data through memory dump
19069 * mailbox command. When it successfully retrieves data, the size of the data
19070 * will be returned, otherwise, 0 will be returned.
19073 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19075 LPFC_MBOXQ_t *mboxq = NULL;
19076 struct lpfc_dmabuf *mp = NULL;
19077 struct lpfc_mqe *mqe;
19078 uint32_t data_length = 0;
19084 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19086 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19087 "3105 failed to allocate mailbox memory\n");
19091 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19093 mqe = &mboxq->u.mqe;
19094 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19095 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19098 data_length = mqe->un.mb_words[5];
19099 if (data_length == 0)
19101 if (data_length > DMP_RGN23_SIZE) {
19105 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19107 mempool_free(mboxq, phba->mbox_mem_pool);
19109 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19112 return data_length;
19116 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19117 * @phba: pointer to lpfc hba data structure.
19119 * This function read region 23 and parse TLV for port status to
19120 * decide if the user disaled the port. If the TLV indicates the
19121 * port is disabled, the hba_flag is set accordingly.
19124 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19126 uint8_t *rgn23_data = NULL;
19127 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19128 uint32_t offset = 0;
19130 /* Get adapter Region 23 data */
19131 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19135 if (phba->sli_rev < LPFC_SLI_REV4)
19136 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19138 if_type = bf_get(lpfc_sli_intf_if_type,
19139 &phba->sli4_hba.sli_intf);
19140 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19142 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19148 /* Check the region signature first */
19149 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19150 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19151 "2619 Config region 23 has bad signature\n");
19156 /* Check the data structure version */
19157 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19158 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19159 "2620 Config region 23 has bad version\n");
19164 /* Parse TLV entries in the region */
19165 while (offset < data_size) {
19166 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19169 * If the TLV is not driver specific TLV or driver id is
19170 * not linux driver id, skip the record.
19172 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19173 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19174 (rgn23_data[offset + 3] != 0)) {
19175 offset += rgn23_data[offset + 1] * 4 + 4;
19179 /* Driver found a driver specific TLV in the config region */
19180 sub_tlv_len = rgn23_data[offset + 1] * 4;
19185 * Search for configured port state sub-TLV.
19187 while ((offset < data_size) &&
19188 (tlv_offset < sub_tlv_len)) {
19189 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19194 if (rgn23_data[offset] != PORT_STE_TYPE) {
19195 offset += rgn23_data[offset + 1] * 4 + 4;
19196 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19200 /* This HBA contains PORT_STE configured */
19201 if (!rgn23_data[offset + 2])
19202 phba->hba_flag |= LINK_DISABLED;
19214 * lpfc_wr_object - write an object to the firmware
19215 * @phba: HBA structure that indicates port to create a queue on.
19216 * @dmabuf_list: list of dmabufs to write to the port.
19217 * @size: the total byte value of the objects to write to the port.
19218 * @offset: the current offset to be used to start the transfer.
19220 * This routine will create a wr_object mailbox command to send to the port.
19221 * the mailbox command will be constructed using the dma buffers described in
19222 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19223 * BDEs that the imbedded mailbox can support. The @offset variable will be
19224 * used to indicate the starting offset of the transfer and will also return
19225 * the offset after the write object mailbox has completed. @size is used to
19226 * determine the end of the object and whether the eof bit should be set.
19228 * Return 0 is successful and offset will contain the the new offset to use
19229 * for the next write.
19230 * Return negative value for error cases.
19233 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19234 uint32_t size, uint32_t *offset)
19236 struct lpfc_mbx_wr_object *wr_object;
19237 LPFC_MBOXQ_t *mbox;
19239 uint32_t shdr_status, shdr_add_status, shdr_change_status;
19241 struct lpfc_dmabuf *dmabuf;
19242 uint32_t written = 0;
19243 bool check_change_status = false;
19245 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19249 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19250 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19251 sizeof(struct lpfc_mbx_wr_object) -
19252 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19254 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19255 wr_object->u.request.write_offset = *offset;
19256 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19257 wr_object->u.request.object_name[0] =
19258 cpu_to_le32(wr_object->u.request.object_name[0]);
19259 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19260 list_for_each_entry(dmabuf, dmabuf_list, list) {
19261 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19263 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19264 wr_object->u.request.bde[i].addrHigh =
19265 putPaddrHigh(dmabuf->phys);
19266 if (written + SLI4_PAGE_SIZE >= size) {
19267 wr_object->u.request.bde[i].tus.f.bdeSize =
19269 written += (size - written);
19270 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
19271 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19272 check_change_status = true;
19274 wr_object->u.request.bde[i].tus.f.bdeSize =
19276 written += SLI4_PAGE_SIZE;
19280 wr_object->u.request.bde_count = i;
19281 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19282 if (!phba->sli4_hba.intr_enable)
19283 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19285 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
19286 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19288 /* The IOCTL status is embedded in the mailbox subheader. */
19289 shdr_status = bf_get(lpfc_mbox_hdr_status,
19290 &wr_object->header.cfg_shdr.response);
19291 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19292 &wr_object->header.cfg_shdr.response);
19293 if (check_change_status) {
19294 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19295 &wr_object->u.response);
19296 switch (shdr_change_status) {
19297 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19298 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19299 "3198 Firmware write complete: System "
19300 "reboot required to instantiate\n");
19302 case (LPFC_CHANGE_STATUS_FW_RESET):
19303 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19304 "3199 Firmware write complete: Firmware"
19305 " reset required to instantiate\n");
19307 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19308 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19309 "3200 Firmware write complete: Port "
19310 "Migration or PCI Reset required to "
19313 case (LPFC_CHANGE_STATUS_PCI_RESET):
19314 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19315 "3201 Firmware write complete: PCI "
19316 "Reset required to instantiate\n");
19322 if (rc != MBX_TIMEOUT)
19323 mempool_free(mbox, phba->mbox_mem_pool);
19324 if (shdr_status || shdr_add_status || rc) {
19325 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19326 "3025 Write Object mailbox failed with "
19327 "status x%x add_status x%x, mbx status x%x\n",
19328 shdr_status, shdr_add_status, rc);
19330 *offset = shdr_add_status;
19332 *offset += wr_object->u.response.actual_write_length;
19337 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
19338 * @vport: pointer to vport data structure.
19340 * This function iterate through the mailboxq and clean up all REG_LOGIN
19341 * and REG_VPI mailbox commands associated with the vport. This function
19342 * is called when driver want to restart discovery of the vport due to
19343 * a Clear Virtual Link event.
19346 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19348 struct lpfc_hba *phba = vport->phba;
19349 LPFC_MBOXQ_t *mb, *nextmb;
19350 struct lpfc_dmabuf *mp;
19351 struct lpfc_nodelist *ndlp;
19352 struct lpfc_nodelist *act_mbx_ndlp = NULL;
19353 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
19354 LIST_HEAD(mbox_cmd_list);
19355 uint8_t restart_loop;
19357 /* Clean up internally queued mailbox commands with the vport */
19358 spin_lock_irq(&phba->hbalock);
19359 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19360 if (mb->vport != vport)
19363 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19364 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19367 list_del(&mb->list);
19368 list_add_tail(&mb->list, &mbox_cmd_list);
19370 /* Clean up active mailbox command with the vport */
19371 mb = phba->sli.mbox_active;
19372 if (mb && (mb->vport == vport)) {
19373 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19374 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19375 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19376 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19377 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19378 /* Put reference count for delayed processing */
19379 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19380 /* Unregister the RPI when mailbox complete */
19381 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19384 /* Cleanup any mailbox completions which are not yet processed */
19387 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19389 * If this mailox is already processed or it is
19390 * for another vport ignore it.
19392 if ((mb->vport != vport) ||
19393 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19396 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19397 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19400 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19401 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19402 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19403 /* Unregister the RPI when mailbox complete */
19404 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19406 spin_unlock_irq(&phba->hbalock);
19407 spin_lock(shost->host_lock);
19408 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19409 spin_unlock(shost->host_lock);
19410 spin_lock_irq(&phba->hbalock);
19414 } while (restart_loop);
19416 spin_unlock_irq(&phba->hbalock);
19418 /* Release the cleaned-up mailbox commands */
19419 while (!list_empty(&mbox_cmd_list)) {
19420 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
19421 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19422 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
19424 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19427 mb->ctx_buf = NULL;
19428 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19429 mb->ctx_ndlp = NULL;
19431 spin_lock(shost->host_lock);
19432 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19433 spin_unlock(shost->host_lock);
19434 lpfc_nlp_put(ndlp);
19437 mempool_free(mb, phba->mbox_mem_pool);
19440 /* Release the ndlp with the cleaned-up active mailbox command */
19441 if (act_mbx_ndlp) {
19442 spin_lock(shost->host_lock);
19443 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19444 spin_unlock(shost->host_lock);
19445 lpfc_nlp_put(act_mbx_ndlp);
19450 * lpfc_drain_txq - Drain the txq
19451 * @phba: Pointer to HBA context object.
19453 * This function attempt to submit IOCBs on the txq
19454 * to the adapter. For SLI4 adapters, the txq contains
19455 * ELS IOCBs that have been deferred because the there
19456 * are no SGLs. This congestion can occur with large
19457 * vport counts during node discovery.
19461 lpfc_drain_txq(struct lpfc_hba *phba)
19463 LIST_HEAD(completions);
19464 struct lpfc_sli_ring *pring;
19465 struct lpfc_iocbq *piocbq = NULL;
19466 unsigned long iflags = 0;
19467 char *fail_msg = NULL;
19468 struct lpfc_sglq *sglq;
19469 union lpfc_wqe128 wqe;
19470 uint32_t txq_cnt = 0;
19471 struct lpfc_queue *wq;
19473 if (phba->link_flag & LS_MDS_LOOPBACK) {
19474 /* MDS WQE are posted only to first WQ*/
19475 wq = phba->sli4_hba.hdwq[0].fcp_wq;
19480 wq = phba->sli4_hba.els_wq;
19483 pring = lpfc_phba_elsring(phba);
19486 if (unlikely(!pring) || list_empty(&pring->txq))
19489 spin_lock_irqsave(&pring->ring_lock, iflags);
19490 list_for_each_entry(piocbq, &pring->txq, list) {
19494 if (txq_cnt > pring->txq_max)
19495 pring->txq_max = txq_cnt;
19497 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19499 while (!list_empty(&pring->txq)) {
19500 spin_lock_irqsave(&pring->ring_lock, iflags);
19502 piocbq = lpfc_sli_ringtx_get(phba, pring);
19504 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19505 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19506 "2823 txq empty and txq_cnt is %d\n ",
19510 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
19512 __lpfc_sli_ringtx_put(phba, pring, piocbq);
19513 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19518 /* The xri and iocb resources secured,
19519 * attempt to issue request
19521 piocbq->sli4_lxritag = sglq->sli4_lxritag;
19522 piocbq->sli4_xritag = sglq->sli4_xritag;
19523 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19524 fail_msg = "to convert bpl to sgl";
19525 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
19526 fail_msg = "to convert iocb to wqe";
19527 else if (lpfc_sli4_wq_put(wq, &wqe))
19528 fail_msg = " - Wq is full";
19530 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19533 /* Failed means we can't issue and need to cancel */
19534 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19535 "2822 IOCB failed %s iotag 0x%x "
19538 piocbq->iotag, piocbq->sli4_xritag);
19539 list_add_tail(&piocbq->list, &completions);
19541 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19544 /* Cancel all the IOCBs that cannot be issued */
19545 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19546 IOERR_SLI_ABORTED);
19552 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
19553 * @phba: Pointer to HBA context object.
19554 * @pwqe: Pointer to command WQE.
19555 * @sglq: Pointer to the scatter gather queue object.
19557 * This routine converts the bpl or bde that is in the WQE
19558 * to a sgl list for the sli4 hardware. The physical address
19559 * of the bpl/bde is converted back to a virtual address.
19560 * If the WQE contains a BPL then the list of BDE's is
19561 * converted to sli4_sge's. If the WQE contains a single
19562 * BDE then it is converted to a single sli_sge.
19563 * The WQE is still in cpu endianness so the contents of
19564 * the bpl can be used without byte swapping.
19566 * Returns valid XRI = Success, NO_XRI = Failure.
19569 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19570 struct lpfc_sglq *sglq)
19572 uint16_t xritag = NO_XRI;
19573 struct ulp_bde64 *bpl = NULL;
19574 struct ulp_bde64 bde;
19575 struct sli4_sge *sgl = NULL;
19576 struct lpfc_dmabuf *dmabuf;
19577 union lpfc_wqe128 *wqe;
19580 uint32_t offset = 0; /* accumulated offset in the sg request list */
19581 int inbound = 0; /* number of sg reply entries inbound from firmware */
19584 if (!pwqeq || !sglq)
19587 sgl = (struct sli4_sge *)sglq->sgl;
19589 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19591 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19592 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19593 return sglq->sli4_xritag;
19594 numBdes = pwqeq->rsvd2;
19596 /* The addrHigh and addrLow fields within the WQE
19597 * have not been byteswapped yet so there is no
19598 * need to swap them back.
19600 if (pwqeq->context3)
19601 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19605 bpl = (struct ulp_bde64 *)dmabuf->virt;
19609 for (i = 0; i < numBdes; i++) {
19610 /* Should already be byte swapped. */
19611 sgl->addr_hi = bpl->addrHigh;
19612 sgl->addr_lo = bpl->addrLow;
19614 sgl->word2 = le32_to_cpu(sgl->word2);
19615 if ((i+1) == numBdes)
19616 bf_set(lpfc_sli4_sge_last, sgl, 1);
19618 bf_set(lpfc_sli4_sge_last, sgl, 0);
19619 /* swap the size field back to the cpu so we
19620 * can assign it to the sgl.
19622 bde.tus.w = le32_to_cpu(bpl->tus.w);
19623 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19624 /* The offsets in the sgl need to be accumulated
19625 * separately for the request and reply lists.
19626 * The request is always first, the reply follows.
19629 case CMD_GEN_REQUEST64_WQE:
19630 /* add up the reply sg entries */
19631 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19633 /* first inbound? reset the offset */
19636 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19637 bf_set(lpfc_sli4_sge_type, sgl,
19638 LPFC_SGE_TYPE_DATA);
19639 offset += bde.tus.f.bdeSize;
19641 case CMD_FCP_TRSP64_WQE:
19642 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19643 bf_set(lpfc_sli4_sge_type, sgl,
19644 LPFC_SGE_TYPE_DATA);
19646 case CMD_FCP_TSEND64_WQE:
19647 case CMD_FCP_TRECEIVE64_WQE:
19648 bf_set(lpfc_sli4_sge_type, sgl,
19649 bpl->tus.f.bdeFlags);
19653 offset += bde.tus.f.bdeSize;
19654 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19657 sgl->word2 = cpu_to_le32(sgl->word2);
19661 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19662 /* The addrHigh and addrLow fields of the BDE have not
19663 * been byteswapped yet so they need to be swapped
19664 * before putting them in the sgl.
19666 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19667 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19668 sgl->word2 = le32_to_cpu(sgl->word2);
19669 bf_set(lpfc_sli4_sge_last, sgl, 1);
19670 sgl->word2 = cpu_to_le32(sgl->word2);
19671 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19673 return sglq->sli4_xritag;
19677 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
19678 * @phba: Pointer to HBA context object.
19679 * @ring_number: Base sli ring number
19680 * @pwqe: Pointer to command WQE.
19683 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
19684 struct lpfc_iocbq *pwqe)
19686 union lpfc_wqe128 *wqe = &pwqe->wqe;
19687 struct lpfc_nvmet_rcv_ctx *ctxp;
19688 struct lpfc_queue *wq;
19689 struct lpfc_sglq *sglq;
19690 struct lpfc_sli_ring *pring;
19691 unsigned long iflags;
19694 /* NVME_LS and NVME_LS ABTS requests. */
19695 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19696 pring = phba->sli4_hba.nvmels_wq->pring;
19697 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19699 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19701 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19704 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19705 pwqe->sli4_xritag = sglq->sli4_xritag;
19706 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19707 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19710 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19711 pwqe->sli4_xritag);
19712 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19714 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19718 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19719 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19723 /* NVME_FCREQ and NVME_ABTS requests */
19724 if (pwqe->iocb_flag & LPFC_IO_NVME) {
19725 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19729 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
19731 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19733 ret = lpfc_sli4_wq_put(wq, wqe);
19735 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19738 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19739 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19743 /* NVMET requests */
19744 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19745 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19749 ctxp = pwqe->context2;
19750 sglq = ctxp->ctxbuf->sglq;
19751 if (pwqe->sli4_xritag == NO_XRI) {
19752 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19753 pwqe->sli4_xritag = sglq->sli4_xritag;
19755 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19756 pwqe->sli4_xritag);
19757 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
19759 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19761 ret = lpfc_sli4_wq_put(wq, wqe);
19763 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19766 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19767 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19773 #ifdef LPFC_MXP_STAT
19775 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
19776 * @phba: pointer to lpfc hba data structure.
19777 * @hwqid: belong to which HWQ.
19779 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
19780 * 15 seconds after a test case is running.
19782 * The user should call lpfc_debugfs_multixripools_write before running a test
19783 * case to clear stat_snapshot_taken. Then the user starts a test case. During
19784 * test case is running, stat_snapshot_taken is incremented by 1 every time when
19785 * this routine is called from heartbeat timer. When stat_snapshot_taken is
19786 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
19788 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
19790 struct lpfc_sli4_hdw_queue *qp;
19791 struct lpfc_multixri_pool *multixri_pool;
19792 struct lpfc_pvt_pool *pvt_pool;
19793 struct lpfc_pbl_pool *pbl_pool;
19796 qp = &phba->sli4_hba.hdwq[hwqid];
19797 multixri_pool = qp->p_multixri_pool;
19798 if (!multixri_pool)
19801 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
19802 pvt_pool = &qp->p_multixri_pool->pvt_pool;
19803 pbl_pool = &qp->p_multixri_pool->pbl_pool;
19804 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
19806 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
19808 multixri_pool->stat_pbl_count = pbl_pool->count;
19809 multixri_pool->stat_pvt_count = pvt_pool->count;
19810 multixri_pool->stat_busy_count = txcmplq_cnt;
19813 multixri_pool->stat_snapshot_taken++;
19818 * lpfc_adjust_pvt_pool_count - Adjust private pool count
19819 * @phba: pointer to lpfc hba data structure.
19820 * @hwqid: belong to which HWQ.
19822 * This routine moves some XRIs from private to public pool when private pool
19825 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
19827 struct lpfc_multixri_pool *multixri_pool;
19829 u32 prev_io_req_count;
19831 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
19832 if (!multixri_pool)
19834 io_req_count = multixri_pool->io_req_count;
19835 prev_io_req_count = multixri_pool->prev_io_req_count;
19837 if (prev_io_req_count != io_req_count) {
19838 /* Private pool is busy */
19839 multixri_pool->prev_io_req_count = io_req_count;
19841 /* Private pool is not busy.
19842 * Move XRIs from private to public pool.
19844 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
19849 * lpfc_adjust_high_watermark - Adjust high watermark
19850 * @phba: pointer to lpfc hba data structure.
19851 * @hwqid: belong to which HWQ.
19853 * This routine sets high watermark as number of outstanding XRIs,
19854 * but make sure the new value is between xri_limit/2 and xri_limit.
19856 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
19864 struct lpfc_multixri_pool *multixri_pool;
19865 struct lpfc_sli4_hdw_queue *qp;
19867 qp = &phba->sli4_hba.hdwq[hwqid];
19868 multixri_pool = qp->p_multixri_pool;
19869 if (!multixri_pool)
19871 xri_limit = multixri_pool->xri_limit;
19873 watermark_max = xri_limit;
19874 watermark_min = xri_limit / 2;
19876 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
19877 abts_io_bufs = qp->abts_scsi_io_bufs;
19879 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
19880 abts_io_bufs += qp->abts_nvme_io_bufs;
19883 new_watermark = txcmplq_cnt + abts_io_bufs;
19884 new_watermark = min(watermark_max, new_watermark);
19885 new_watermark = max(watermark_min, new_watermark);
19886 multixri_pool->pvt_pool.high_watermark = new_watermark;
19888 #ifdef LPFC_MXP_STAT
19889 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
19895 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
19896 * @phba: pointer to lpfc hba data structure.
19897 * @hwqid: belong to which HWQ.
19899 * This routine is called from hearbeat timer when pvt_pool is idle.
19900 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
19901 * The first step moves (all - low_watermark) amount of XRIs.
19902 * The second step moves the rest of XRIs.
19904 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
19906 struct lpfc_pbl_pool *pbl_pool;
19907 struct lpfc_pvt_pool *pvt_pool;
19908 struct lpfc_sli4_hdw_queue *qp;
19909 struct lpfc_io_buf *lpfc_ncmd;
19910 struct lpfc_io_buf *lpfc_ncmd_next;
19911 unsigned long iflag;
19912 struct list_head tmp_list;
19915 qp = &phba->sli4_hba.hdwq[hwqid];
19916 pbl_pool = &qp->p_multixri_pool->pbl_pool;
19917 pvt_pool = &qp->p_multixri_pool->pvt_pool;
19920 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
19921 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
19923 if (pvt_pool->count > pvt_pool->low_watermark) {
19924 /* Step 1: move (all - low_watermark) from pvt_pool
19928 /* Move low watermark of bufs from pvt_pool to tmp_list */
19929 INIT_LIST_HEAD(&tmp_list);
19930 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
19931 &pvt_pool->list, list) {
19932 list_move_tail(&lpfc_ncmd->list, &tmp_list);
19934 if (tmp_count >= pvt_pool->low_watermark)
19938 /* Move all bufs from pvt_pool to pbl_pool */
19939 list_splice_init(&pvt_pool->list, &pbl_pool->list);
19941 /* Move all bufs from tmp_list to pvt_pool */
19942 list_splice(&tmp_list, &pvt_pool->list);
19944 pbl_pool->count += (pvt_pool->count - tmp_count);
19945 pvt_pool->count = tmp_count;
19947 /* Step 2: move the rest from pvt_pool to pbl_pool */
19948 list_splice_init(&pvt_pool->list, &pbl_pool->list);
19949 pbl_pool->count += pvt_pool->count;
19950 pvt_pool->count = 0;
19953 spin_unlock(&pvt_pool->lock);
19954 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
19958 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
19959 * @phba: pointer to lpfc hba data structure
19960 * @pbl_pool: specified public free XRI pool
19961 * @pvt_pool: specified private free XRI pool
19962 * @count: number of XRIs to move
19964 * This routine tries to move some free common bufs from the specified pbl_pool
19965 * to the specified pvt_pool. It might move less than count XRIs if there's not
19966 * enough in public pool.
19969 * true - if XRIs are successfully moved from the specified pbl_pool to the
19970 * specified pvt_pool
19971 * false - if the specified pbl_pool is empty or locked by someone else
19974 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
19975 struct lpfc_pbl_pool *pbl_pool,
19976 struct lpfc_pvt_pool *pvt_pool, u32 count)
19978 struct lpfc_io_buf *lpfc_ncmd;
19979 struct lpfc_io_buf *lpfc_ncmd_next;
19980 unsigned long iflag;
19983 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
19985 if (pbl_pool->count) {
19986 /* Move a batch of XRIs from public to private pool */
19987 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
19988 list_for_each_entry_safe(lpfc_ncmd,
19992 list_move_tail(&lpfc_ncmd->list,
20001 spin_unlock(&pvt_pool->lock);
20002 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20005 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20012 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20013 * @phba: pointer to lpfc hba data structure.
20014 * @hwqid: belong to which HWQ.
20015 * @count: number of XRIs to move
20017 * This routine tries to find some free common bufs in one of public pools with
20018 * Round Robin method. The search always starts from local hwqid, then the next
20019 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20020 * a batch of free common bufs are moved to private pool on hwqid.
20021 * It might move less than count XRIs if there's not enough in public pool.
20023 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20025 struct lpfc_multixri_pool *multixri_pool;
20026 struct lpfc_multixri_pool *next_multixri_pool;
20027 struct lpfc_pvt_pool *pvt_pool;
20028 struct lpfc_pbl_pool *pbl_pool;
20029 struct lpfc_sli4_hdw_queue *qp;
20034 qp = &phba->sli4_hba.hdwq[hwqid];
20035 multixri_pool = qp->p_multixri_pool;
20036 pvt_pool = &multixri_pool->pvt_pool;
20037 pbl_pool = &multixri_pool->pbl_pool;
20039 /* Check if local pbl_pool is available */
20040 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20042 #ifdef LPFC_MXP_STAT
20043 multixri_pool->local_pbl_hit_count++;
20048 hwq_count = phba->cfg_hdw_queue;
20050 /* Get the next hwqid which was found last time */
20051 next_hwqid = multixri_pool->rrb_next_hwqid;
20054 /* Go to next hwq */
20055 next_hwqid = (next_hwqid + 1) % hwq_count;
20057 next_multixri_pool =
20058 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20059 pbl_pool = &next_multixri_pool->pbl_pool;
20061 /* Check if the public free xri pool is available */
20062 ret = _lpfc_move_xri_pbl_to_pvt(
20063 phba, qp, pbl_pool, pvt_pool, count);
20065 /* Exit while-loop if success or all hwqid are checked */
20066 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20068 /* Starting point for the next time */
20069 multixri_pool->rrb_next_hwqid = next_hwqid;
20072 /* stats: all public pools are empty*/
20073 multixri_pool->pbl_empty_count++;
20076 #ifdef LPFC_MXP_STAT
20078 if (next_hwqid == hwqid)
20079 multixri_pool->local_pbl_hit_count++;
20081 multixri_pool->other_pbl_hit_count++;
20087 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20088 * @phba: pointer to lpfc hba data structure.
20089 * @qp: belong to which HWQ.
20091 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20094 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20096 struct lpfc_multixri_pool *multixri_pool;
20097 struct lpfc_pvt_pool *pvt_pool;
20099 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20100 pvt_pool = &multixri_pool->pvt_pool;
20102 if (pvt_pool->count < pvt_pool->low_watermark)
20103 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20107 * lpfc_release_io_buf - Return one IO buf back to free pool
20108 * @phba: pointer to lpfc hba data structure.
20109 * @lpfc_ncmd: IO buf to be returned.
20110 * @qp: belong to which HWQ.
20112 * This routine returns one IO buf back to free pool. If this is an urgent IO,
20113 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
20114 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
20115 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
20116 * lpfc_io_buf_list_put.
20118 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20119 struct lpfc_sli4_hdw_queue *qp)
20121 unsigned long iflag;
20122 struct lpfc_pbl_pool *pbl_pool;
20123 struct lpfc_pvt_pool *pvt_pool;
20124 struct lpfc_epd_pool *epd_pool;
20130 /* MUST zero fields if buffer is reused by another protocol */
20131 lpfc_ncmd->nvmeCmd = NULL;
20132 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20133 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20135 if (phba->cfg_xri_rebalancing) {
20136 if (lpfc_ncmd->expedite) {
20137 /* Return to expedite pool */
20138 epd_pool = &phba->epd_pool;
20139 spin_lock_irqsave(&epd_pool->lock, iflag);
20140 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20142 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20146 /* Avoid invalid access if an IO sneaks in and is being rejected
20147 * just _after_ xri pools are destroyed in lpfc_offline.
20148 * Nothing much can be done at this point.
20150 if (!qp->p_multixri_pool)
20153 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20154 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20156 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
20157 abts_io_bufs = qp->abts_scsi_io_bufs;
20159 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
20160 abts_io_bufs += qp->abts_nvme_io_bufs;
20163 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20164 xri_limit = qp->p_multixri_pool->xri_limit;
20166 #ifdef LPFC_MXP_STAT
20167 if (xri_owned <= xri_limit)
20168 qp->p_multixri_pool->below_limit_count++;
20170 qp->p_multixri_pool->above_limit_count++;
20173 /* XRI goes to either public or private free xri pool
20174 * based on watermark and xri_limit
20176 if ((pvt_pool->count < pvt_pool->low_watermark) ||
20177 (xri_owned < xri_limit &&
20178 pvt_pool->count < pvt_pool->high_watermark)) {
20179 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20180 qp, free_pvt_pool);
20181 list_add_tail(&lpfc_ncmd->list,
20184 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20186 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20187 qp, free_pub_pool);
20188 list_add_tail(&lpfc_ncmd->list,
20191 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20194 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20196 list_add_tail(&lpfc_ncmd->list,
20197 &qp->lpfc_io_buf_list_put);
20199 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20205 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
20206 * @phba: pointer to lpfc hba data structure.
20207 * @pvt_pool: pointer to private pool data structure.
20208 * @ndlp: pointer to lpfc nodelist data structure.
20210 * This routine tries to get one free IO buf from private pool.
20213 * pointer to one free IO buf - if private pool is not empty
20214 * NULL - if private pool is empty
20216 static struct lpfc_io_buf *
20217 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
20218 struct lpfc_sli4_hdw_queue *qp,
20219 struct lpfc_pvt_pool *pvt_pool,
20220 struct lpfc_nodelist *ndlp)
20222 struct lpfc_io_buf *lpfc_ncmd;
20223 struct lpfc_io_buf *lpfc_ncmd_next;
20224 unsigned long iflag;
20226 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
20227 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20228 &pvt_pool->list, list) {
20229 if (lpfc_test_rrq_active(
20230 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20232 list_del(&lpfc_ncmd->list);
20234 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20237 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20243 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
20244 * @phba: pointer to lpfc hba data structure.
20246 * This routine tries to get one free IO buf from expedite pool.
20249 * pointer to one free IO buf - if expedite pool is not empty
20250 * NULL - if expedite pool is empty
20252 static struct lpfc_io_buf *
20253 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20255 struct lpfc_io_buf *lpfc_ncmd;
20256 struct lpfc_io_buf *lpfc_ncmd_next;
20257 unsigned long iflag;
20258 struct lpfc_epd_pool *epd_pool;
20260 epd_pool = &phba->epd_pool;
20263 spin_lock_irqsave(&epd_pool->lock, iflag);
20264 if (epd_pool->count > 0) {
20265 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20266 &epd_pool->list, list) {
20267 list_del(&lpfc_ncmd->list);
20272 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20278 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
20279 * @phba: pointer to lpfc hba data structure.
20280 * @ndlp: pointer to lpfc nodelist data structure.
20281 * @hwqid: belong to which HWQ
20282 * @expedite: 1 means this request is urgent.
20284 * This routine will do the following actions and then return a pointer to
20287 * 1. If private free xri count is empty, move some XRIs from public to
20289 * 2. Get one XRI from private free xri pool.
20290 * 3. If we fail to get one from pvt_pool and this is an expedite request,
20291 * get one free xri from expedite pool.
20293 * Note: ndlp is only used on SCSI side for RRQ testing.
20294 * The caller should pass NULL for ndlp on NVME side.
20297 * pointer to one free IO buf - if private pool is not empty
20298 * NULL - if private pool is empty
20300 static struct lpfc_io_buf *
20301 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20302 struct lpfc_nodelist *ndlp,
20303 int hwqid, int expedite)
20305 struct lpfc_sli4_hdw_queue *qp;
20306 struct lpfc_multixri_pool *multixri_pool;
20307 struct lpfc_pvt_pool *pvt_pool;
20308 struct lpfc_io_buf *lpfc_ncmd;
20310 qp = &phba->sli4_hba.hdwq[hwqid];
20312 multixri_pool = qp->p_multixri_pool;
20313 pvt_pool = &multixri_pool->pvt_pool;
20314 multixri_pool->io_req_count++;
20316 /* If pvt_pool is empty, move some XRIs from public to private pool */
20317 if (pvt_pool->count == 0)
20318 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20320 /* Get one XRI from private free xri pool */
20321 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
20324 lpfc_ncmd->hdwq = qp;
20325 lpfc_ncmd->hdwq_no = hwqid;
20326 } else if (expedite) {
20327 /* If we fail to get one from pvt_pool and this is an expedite
20328 * request, get one free xri from expedite pool.
20330 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20336 static inline struct lpfc_io_buf *
20337 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20339 struct lpfc_sli4_hdw_queue *qp;
20340 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20342 qp = &phba->sli4_hba.hdwq[idx];
20343 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20344 &qp->lpfc_io_buf_list_get, list) {
20345 if (lpfc_test_rrq_active(phba, ndlp,
20346 lpfc_cmd->cur_iocbq.sli4_lxritag))
20349 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20352 list_del_init(&lpfc_cmd->list);
20354 lpfc_cmd->hdwq = qp;
20355 lpfc_cmd->hdwq_no = idx;
20362 * lpfc_get_io_buf - Get one IO buffer from free pool
20363 * @phba: The HBA for which this call is being executed.
20364 * @ndlp: pointer to lpfc nodelist data structure.
20365 * @hwqid: belong to which HWQ
20366 * @expedite: 1 means this request is urgent.
20368 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
20369 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
20370 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
20372 * Note: ndlp is only used on SCSI side for RRQ testing.
20373 * The caller should pass NULL for ndlp on NVME side.
20377 * Pointer to lpfc_io_buf - Success
20379 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20380 struct lpfc_nodelist *ndlp,
20381 u32 hwqid, int expedite)
20383 struct lpfc_sli4_hdw_queue *qp;
20384 unsigned long iflag;
20385 struct lpfc_io_buf *lpfc_cmd;
20387 qp = &phba->sli4_hba.hdwq[hwqid];
20390 if (phba->cfg_xri_rebalancing)
20391 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20392 phba, ndlp, hwqid, expedite);
20394 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20395 qp, alloc_xri_get);
20396 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20397 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20399 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20400 qp, alloc_xri_put);
20401 list_splice(&qp->lpfc_io_buf_list_put,
20402 &qp->lpfc_io_buf_list_get);
20403 qp->get_io_bufs += qp->put_io_bufs;
20404 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20405 qp->put_io_bufs = 0;
20406 spin_unlock(&qp->io_buf_list_put_lock);
20407 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20409 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20411 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);