1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
39 #include <asm/set_memory.h>
42 #include <linux/nvme-fc-driver.h>
47 #include "lpfc_sli4.h"
49 #include "lpfc_disc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_logmsg.h"
56 #include "lpfc_compat.h"
57 #include "lpfc_debugfs.h"
58 #include "lpfc_vport.h"
59 #include "lpfc_version.h"
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type {
70 /* Provide function prototypes local to this module. */
71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
74 uint8_t *, uint32_t *);
75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80 struct hbq_dmabuf *dmabuf);
81 static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
86 struct lpfc_eqe *eqe, uint32_t qidx);
87 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
88 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
89 static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
90 struct lpfc_sli_ring *pring,
91 struct lpfc_iocbq *cmdiocb);
94 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
100 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
101 * @q: The Work Queue to operate on.
102 * @wqe: The work Queue Entry to put on the Work queue.
104 * This routine will copy the contents of @wqe to the next available entry on
105 * the @q. This function will then ring the Work Queue Doorbell to signal the
106 * HBA to start processing the Work Queue Entry. This function returns 0 if
107 * successful. If no entries are available on @q then this function will return
109 * The caller is expected to hold the hbalock when calling this routine.
112 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
114 union lpfc_wqe *temp_wqe;
115 struct lpfc_register doorbell;
121 /* sanity check on queue memory */
124 temp_wqe = q->qe[q->host_index].wqe;
126 /* If the host has not yet processed the next entry then we are done */
127 idx = ((q->host_index + 1) % q->entry_count);
128 if (idx == q->hba_index) {
133 /* set consumption flag every once in a while */
134 if (!((q->host_index + 1) % q->entry_repost))
135 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
137 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
138 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
139 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
140 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
141 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
142 /* write to DPP aperture taking advatage of Combined Writes */
143 tmp = (uint8_t *)wqe;
144 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
145 writeq(*((uint64_t *)(tmp + i)), q->dpp_regaddr + i);
147 /* ensure WQE bcopy and DPP flushed before doorbell write */
150 /* Update the host index before invoking device */
151 host_index = q->host_index;
157 if (q->db_format == LPFC_DB_LIST_FORMAT) {
158 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
159 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
160 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
161 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
163 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
166 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
167 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
168 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
170 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
171 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
172 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
176 writel(doorbell.word0, q->db_regaddr);
182 * lpfc_sli4_wq_release - Updates internal hba index for WQ
183 * @q: The Work Queue to operate on.
184 * @index: The index to advance the hba index to.
186 * This routine will update the HBA index of a queue to reflect consumption of
187 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
188 * an entry the host calls this function to update the queue's internal
189 * pointers. This routine returns the number of entries that were consumed by
193 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
195 uint32_t released = 0;
197 /* sanity check on queue memory */
201 if (q->hba_index == index)
204 q->hba_index = ((q->hba_index + 1) % q->entry_count);
206 } while (q->hba_index != index);
211 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
212 * @q: The Mailbox Queue to operate on.
213 * @wqe: The Mailbox Queue Entry to put on the Work queue.
215 * This routine will copy the contents of @mqe to the next available entry on
216 * the @q. This function will then ring the Work Queue Doorbell to signal the
217 * HBA to start processing the Work Queue Entry. This function returns 0 if
218 * successful. If no entries are available on @q then this function will return
220 * The caller is expected to hold the hbalock when calling this routine.
223 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
225 struct lpfc_mqe *temp_mqe;
226 struct lpfc_register doorbell;
228 /* sanity check on queue memory */
231 temp_mqe = q->qe[q->host_index].mqe;
233 /* If the host has not yet processed the next entry then we are done */
234 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
236 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
237 /* Save off the mailbox pointer for completion */
238 q->phba->mbox = (MAILBOX_t *)temp_mqe;
240 /* Update the host index before invoking device */
241 q->host_index = ((q->host_index + 1) % q->entry_count);
245 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
246 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
247 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
252 * lpfc_sli4_mq_release - Updates internal hba index for MQ
253 * @q: The Mailbox Queue to operate on.
255 * This routine will update the HBA index of a queue to reflect consumption of
256 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
257 * an entry the host calls this function to update the queue's internal
258 * pointers. This routine returns the number of entries that were consumed by
262 lpfc_sli4_mq_release(struct lpfc_queue *q)
264 /* sanity check on queue memory */
268 /* Clear the mailbox pointer for completion */
269 q->phba->mbox = NULL;
270 q->hba_index = ((q->hba_index + 1) % q->entry_count);
275 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
276 * @q: The Event Queue to get the first valid EQE from
278 * This routine will get the first valid Event Queue Entry from @q, update
279 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
280 * the Queue (no more work to do), or the Queue is full of EQEs that have been
281 * processed, but not popped back to the HBA then this routine will return NULL.
283 static struct lpfc_eqe *
284 lpfc_sli4_eq_get(struct lpfc_queue *q)
286 struct lpfc_hba *phba;
287 struct lpfc_eqe *eqe;
290 /* sanity check on queue memory */
294 eqe = q->qe[q->hba_index].eqe;
296 /* If the next EQE is not valid then we are done */
297 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
299 /* If the host has not yet processed the next entry then we are done */
300 idx = ((q->hba_index + 1) % q->entry_count);
301 if (idx == q->host_index)
305 /* if the index wrapped around, toggle the valid bit */
306 if (phba->sli4_hba.pc_sli4_params.eqav && !q->hba_index)
307 q->qe_valid = (q->qe_valid) ? 0 : 1;
311 * insert barrier for instruction interlock : data from the hardware
312 * must have the valid bit checked before it can be copied and acted
313 * upon. Speculative instructions were allowing a bcopy at the start
314 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
315 * after our return, to copy data before the valid bit check above
316 * was done. As such, some of the copied data was stale. The barrier
317 * ensures the check is before any data is copied.
324 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
325 * @q: The Event Queue to disable interrupts
329 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
331 struct lpfc_register doorbell;
334 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
335 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
336 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
337 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
338 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
339 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
343 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
344 * @q: The Event Queue to disable interrupts
348 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
350 struct lpfc_register doorbell;
353 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
354 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
355 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
356 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
357 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
358 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
362 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
363 * @q: The Event Queue that the host has completed processing for.
364 * @arm: Indicates whether the host wants to arms this CQ.
366 * This routine will mark all Event Queue Entries on @q, from the last
367 * known completed entry to the last entry that was processed, as completed
368 * by clearing the valid bit for each completion queue entry. Then it will
369 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
370 * The internal host index in the @q will be updated by this routine to indicate
371 * that the host has finished processing the entries. The @arm parameter
372 * indicates that the queue should be rearmed when ringing the doorbell.
374 * This function will return the number of EQEs that were popped.
377 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
379 uint32_t released = 0;
380 struct lpfc_hba *phba;
381 struct lpfc_eqe *temp_eqe;
382 struct lpfc_register doorbell;
384 /* sanity check on queue memory */
389 /* while there are valid entries */
390 while (q->hba_index != q->host_index) {
391 if (!phba->sli4_hba.pc_sli4_params.eqav) {
392 temp_eqe = q->qe[q->host_index].eqe;
393 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
396 q->host_index = ((q->host_index + 1) % q->entry_count);
398 if (unlikely(released == 0 && !arm))
401 /* ring doorbell for number popped */
404 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
405 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
407 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
408 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
409 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
410 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
411 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
412 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
413 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
414 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
415 readl(q->phba->sli4_hba.EQDBregaddr);
420 * lpfc_sli4_if6_eq_release - Indicates the host has finished processing an EQ
421 * @q: The Event Queue that the host has completed processing for.
422 * @arm: Indicates whether the host wants to arms this CQ.
424 * This routine will mark all Event Queue Entries on @q, from the last
425 * known completed entry to the last entry that was processed, as completed
426 * by clearing the valid bit for each completion queue entry. Then it will
427 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
428 * The internal host index in the @q will be updated by this routine to indicate
429 * that the host has finished processing the entries. The @arm parameter
430 * indicates that the queue should be rearmed when ringing the doorbell.
432 * This function will return the number of EQEs that were popped.
435 lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm)
437 uint32_t released = 0;
438 struct lpfc_hba *phba;
439 struct lpfc_eqe *temp_eqe;
440 struct lpfc_register doorbell;
442 /* sanity check on queue memory */
447 /* while there are valid entries */
448 while (q->hba_index != q->host_index) {
449 if (!phba->sli4_hba.pc_sli4_params.eqav) {
450 temp_eqe = q->qe[q->host_index].eqe;
451 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
454 q->host_index = ((q->host_index + 1) % q->entry_count);
456 if (unlikely(released == 0 && !arm))
459 /* ring doorbell for number popped */
462 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
463 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, released);
464 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
465 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
466 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
467 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
468 readl(q->phba->sli4_hba.EQDBregaddr);
473 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
474 * @q: The Completion Queue to get the first valid CQE from
476 * This routine will get the first valid Completion Queue Entry from @q, update
477 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
478 * the Queue (no more work to do), or the Queue is full of CQEs that have been
479 * processed, but not popped back to the HBA then this routine will return NULL.
481 static struct lpfc_cqe *
482 lpfc_sli4_cq_get(struct lpfc_queue *q)
484 struct lpfc_hba *phba;
485 struct lpfc_cqe *cqe;
488 /* sanity check on queue memory */
492 cqe = q->qe[q->hba_index].cqe;
494 /* If the next CQE is not valid then we are done */
495 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
497 /* If the host has not yet processed the next entry then we are done */
498 idx = ((q->hba_index + 1) % q->entry_count);
499 if (idx == q->host_index)
503 /* if the index wrapped around, toggle the valid bit */
504 if (phba->sli4_hba.pc_sli4_params.cqav && !q->hba_index)
505 q->qe_valid = (q->qe_valid) ? 0 : 1;
508 * insert barrier for instruction interlock : data from the hardware
509 * must have the valid bit checked before it can be copied and acted
510 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
511 * instructions allowing action on content before valid bit checked,
512 * add barrier here as well. May not be needed as "content" is a
513 * single 32-bit entity here (vs multi word structure for cq's).
520 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
521 * @q: The Completion Queue that the host has completed processing for.
522 * @arm: Indicates whether the host wants to arms this CQ.
524 * This routine will mark all Completion queue entries on @q, from the last
525 * known completed entry to the last entry that was processed, as completed
526 * by clearing the valid bit for each completion queue entry. Then it will
527 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
528 * The internal host index in the @q will be updated by this routine to indicate
529 * that the host has finished processing the entries. The @arm parameter
530 * indicates that the queue should be rearmed when ringing the doorbell.
532 * This function will return the number of CQEs that were released.
535 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
537 uint32_t released = 0;
538 struct lpfc_hba *phba;
539 struct lpfc_cqe *temp_qe;
540 struct lpfc_register doorbell;
542 /* sanity check on queue memory */
547 /* while there are valid entries */
548 while (q->hba_index != q->host_index) {
549 if (!phba->sli4_hba.pc_sli4_params.cqav) {
550 temp_qe = q->qe[q->host_index].cqe;
551 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
554 q->host_index = ((q->host_index + 1) % q->entry_count);
556 if (unlikely(released == 0 && !arm))
559 /* ring doorbell for number popped */
562 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
563 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
564 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
565 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
566 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
567 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
568 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
573 * lpfc_sli4_if6_cq_release - Indicates the host has finished processing a CQ
574 * @q: The Completion Queue that the host has completed processing for.
575 * @arm: Indicates whether the host wants to arms this CQ.
577 * This routine will mark all Completion queue entries on @q, from the last
578 * known completed entry to the last entry that was processed, as completed
579 * by clearing the valid bit for each completion queue entry. Then it will
580 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
581 * The internal host index in the @q will be updated by this routine to indicate
582 * that the host has finished processing the entries. The @arm parameter
583 * indicates that the queue should be rearmed when ringing the doorbell.
585 * This function will return the number of CQEs that were released.
588 lpfc_sli4_if6_cq_release(struct lpfc_queue *q, bool arm)
590 uint32_t released = 0;
591 struct lpfc_hba *phba;
592 struct lpfc_cqe *temp_qe;
593 struct lpfc_register doorbell;
595 /* sanity check on queue memory */
600 /* while there are valid entries */
601 while (q->hba_index != q->host_index) {
602 if (!phba->sli4_hba.pc_sli4_params.cqav) {
603 temp_qe = q->qe[q->host_index].cqe;
604 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
607 q->host_index = ((q->host_index + 1) % q->entry_count);
609 if (unlikely(released == 0 && !arm))
612 /* ring doorbell for number popped */
615 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
616 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, released);
617 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
618 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
623 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
624 * @q: The Header Receive Queue to operate on.
625 * @wqe: The Receive Queue Entry to put on the Receive queue.
627 * This routine will copy the contents of @wqe to the next available entry on
628 * the @q. This function will then ring the Receive Queue Doorbell to signal the
629 * HBA to start processing the Receive Queue Entry. This function returns the
630 * index that the rqe was copied to if successful. If no entries are available
631 * on @q then this function will return -ENOMEM.
632 * The caller is expected to hold the hbalock when calling this routine.
635 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
636 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
638 struct lpfc_rqe *temp_hrqe;
639 struct lpfc_rqe *temp_drqe;
640 struct lpfc_register doorbell;
644 /* sanity check on queue memory */
645 if (unlikely(!hq) || unlikely(!dq))
647 hq_put_index = hq->host_index;
648 dq_put_index = dq->host_index;
649 temp_hrqe = hq->qe[hq_put_index].rqe;
650 temp_drqe = dq->qe[dq_put_index].rqe;
652 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
654 if (hq_put_index != dq_put_index)
656 /* If the host has not yet processed the next entry then we are done */
657 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
659 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
660 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
662 /* Update the host index to point to the next slot */
663 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
664 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
667 /* Ring The Header Receive Queue Doorbell */
668 if (!(hq->host_index % hq->entry_repost)) {
670 if (hq->db_format == LPFC_DB_RING_FORMAT) {
671 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
673 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
674 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
675 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
677 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
679 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
683 writel(doorbell.word0, hq->db_regaddr);
689 * lpfc_sli4_rq_release - Updates internal hba index for RQ
690 * @q: The Header Receive Queue to operate on.
692 * This routine will update the HBA index of a queue to reflect consumption of
693 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
694 * consumed an entry the host calls this function to update the queue's
695 * internal pointers. This routine returns the number of entries that were
696 * consumed by the HBA.
699 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
701 /* sanity check on queue memory */
702 if (unlikely(!hq) || unlikely(!dq))
705 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
707 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
708 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
713 * lpfc_cmd_iocb - Get next command iocb entry in the ring
714 * @phba: Pointer to HBA context object.
715 * @pring: Pointer to driver SLI ring object.
717 * This function returns pointer to next command iocb entry
718 * in the command ring. The caller must hold hbalock to prevent
719 * other threads consume the next command iocb.
720 * SLI-2/SLI-3 provide different sized iocbs.
722 static inline IOCB_t *
723 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
725 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
726 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
730 * lpfc_resp_iocb - Get next response iocb entry in the ring
731 * @phba: Pointer to HBA context object.
732 * @pring: Pointer to driver SLI ring object.
734 * This function returns pointer to next response iocb entry
735 * in the response ring. The caller must hold hbalock to make sure
736 * that no other thread consume the next response iocb.
737 * SLI-2/SLI-3 provide different sized iocbs.
739 static inline IOCB_t *
740 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
742 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
743 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
747 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
748 * @phba: Pointer to HBA context object.
750 * This function is called with hbalock held. This function
751 * allocates a new driver iocb object from the iocb pool. If the
752 * allocation is successful, it returns pointer to the newly
753 * allocated iocb object else it returns NULL.
756 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
758 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
759 struct lpfc_iocbq * iocbq = NULL;
761 lockdep_assert_held(&phba->hbalock);
763 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
766 if (phba->iocb_cnt > phba->iocb_max)
767 phba->iocb_max = phba->iocb_cnt;
772 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
773 * @phba: Pointer to HBA context object.
774 * @xritag: XRI value.
776 * This function clears the sglq pointer from the array of acive
777 * sglq's. The xritag that is passed in is used to index into the
778 * array. Before the xritag can be used it needs to be adjusted
779 * by subtracting the xribase.
781 * Returns sglq ponter = success, NULL = Failure.
784 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
786 struct lpfc_sglq *sglq;
788 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
789 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
794 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
795 * @phba: Pointer to HBA context object.
796 * @xritag: XRI value.
798 * This function returns the sglq pointer from the array of acive
799 * sglq's. The xritag that is passed in is used to index into the
800 * array. Before the xritag can be used it needs to be adjusted
801 * by subtracting the xribase.
803 * Returns sglq ponter = success, NULL = Failure.
806 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
808 struct lpfc_sglq *sglq;
810 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
815 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
816 * @phba: Pointer to HBA context object.
817 * @xritag: xri used in this exchange.
818 * @rrq: The RRQ to be cleared.
822 lpfc_clr_rrq_active(struct lpfc_hba *phba,
824 struct lpfc_node_rrq *rrq)
826 struct lpfc_nodelist *ndlp = NULL;
828 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
829 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
831 /* The target DID could have been swapped (cable swap)
832 * we should use the ndlp from the findnode if it is
835 if ((!ndlp) && rrq->ndlp)
841 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
844 rrq->rrq_stop_time = 0;
847 mempool_free(rrq, phba->rrq_pool);
851 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
852 * @phba: Pointer to HBA context object.
854 * This function is called with hbalock held. This function
855 * Checks if stop_time (ratov from setting rrq active) has
856 * been reached, if it has and the send_rrq flag is set then
857 * it will call lpfc_send_rrq. If the send_rrq flag is not set
858 * then it will just call the routine to clear the rrq and
859 * free the rrq resource.
860 * The timer is set to the next rrq that is going to expire before
861 * leaving the routine.
865 lpfc_handle_rrq_active(struct lpfc_hba *phba)
867 struct lpfc_node_rrq *rrq;
868 struct lpfc_node_rrq *nextrrq;
869 unsigned long next_time;
870 unsigned long iflags;
873 spin_lock_irqsave(&phba->hbalock, iflags);
874 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
875 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
876 list_for_each_entry_safe(rrq, nextrrq,
877 &phba->active_rrq_list, list) {
878 if (time_after(jiffies, rrq->rrq_stop_time))
879 list_move(&rrq->list, &send_rrq);
880 else if (time_before(rrq->rrq_stop_time, next_time))
881 next_time = rrq->rrq_stop_time;
883 spin_unlock_irqrestore(&phba->hbalock, iflags);
884 if ((!list_empty(&phba->active_rrq_list)) &&
885 (!(phba->pport->load_flag & FC_UNLOADING)))
886 mod_timer(&phba->rrq_tmr, next_time);
887 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
888 list_del(&rrq->list);
890 /* this call will free the rrq */
891 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
892 else if (lpfc_send_rrq(phba, rrq)) {
893 /* if we send the rrq then the completion handler
894 * will clear the bit in the xribitmap.
896 lpfc_clr_rrq_active(phba, rrq->xritag,
903 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
904 * @vport: Pointer to vport context object.
905 * @xri: The xri used in the exchange.
906 * @did: The targets DID for this exchange.
908 * returns NULL = rrq not found in the phba->active_rrq_list.
909 * rrq = rrq for this xri and target.
911 struct lpfc_node_rrq *
912 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
914 struct lpfc_hba *phba = vport->phba;
915 struct lpfc_node_rrq *rrq;
916 struct lpfc_node_rrq *nextrrq;
917 unsigned long iflags;
919 if (phba->sli_rev != LPFC_SLI_REV4)
921 spin_lock_irqsave(&phba->hbalock, iflags);
922 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
923 if (rrq->vport == vport && rrq->xritag == xri &&
924 rrq->nlp_DID == did){
925 list_del(&rrq->list);
926 spin_unlock_irqrestore(&phba->hbalock, iflags);
930 spin_unlock_irqrestore(&phba->hbalock, iflags);
935 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
936 * @vport: Pointer to vport context object.
937 * @ndlp: Pointer to the lpfc_node_list structure.
938 * If ndlp is NULL Remove all active RRQs for this vport from the
939 * phba->active_rrq_list and clear the rrq.
940 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
943 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
946 struct lpfc_hba *phba = vport->phba;
947 struct lpfc_node_rrq *rrq;
948 struct lpfc_node_rrq *nextrrq;
949 unsigned long iflags;
952 if (phba->sli_rev != LPFC_SLI_REV4)
955 lpfc_sli4_vport_delete_els_xri_aborted(vport);
956 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
958 spin_lock_irqsave(&phba->hbalock, iflags);
959 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
960 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
961 list_move(&rrq->list, &rrq_list);
962 spin_unlock_irqrestore(&phba->hbalock, iflags);
964 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
965 list_del(&rrq->list);
966 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
971 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
972 * @phba: Pointer to HBA context object.
973 * @ndlp: Targets nodelist pointer for this exchange.
974 * @xritag the xri in the bitmap to test.
976 * This function is called with hbalock held. This function
977 * returns 0 = rrq not active for this xri
978 * 1 = rrq is valid for this xri.
981 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
984 lockdep_assert_held(&phba->hbalock);
987 if (!ndlp->active_rrqs_xri_bitmap)
989 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
996 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
997 * @phba: Pointer to HBA context object.
998 * @ndlp: nodelist pointer for this target.
999 * @xritag: xri used in this exchange.
1000 * @rxid: Remote Exchange ID.
1001 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1003 * This function takes the hbalock.
1004 * The active bit is always set in the active rrq xri_bitmap even
1005 * if there is no slot avaiable for the other rrq information.
1007 * returns 0 rrq actived for this xri
1008 * < 0 No memory or invalid ndlp.
1011 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1012 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1014 unsigned long iflags;
1015 struct lpfc_node_rrq *rrq;
1021 if (!phba->cfg_enable_rrq)
1024 spin_lock_irqsave(&phba->hbalock, iflags);
1025 if (phba->pport->load_flag & FC_UNLOADING) {
1026 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1031 * set the active bit even if there is no mem available.
1033 if (NLP_CHK_FREE_REQ(ndlp))
1036 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1039 if (!ndlp->active_rrqs_xri_bitmap)
1042 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1045 spin_unlock_irqrestore(&phba->hbalock, iflags);
1046 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1048 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1049 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1050 " DID:0x%x Send:%d\n",
1051 xritag, rxid, ndlp->nlp_DID, send_rrq);
1054 if (phba->cfg_enable_rrq == 1)
1055 rrq->send_rrq = send_rrq;
1058 rrq->xritag = xritag;
1059 rrq->rrq_stop_time = jiffies +
1060 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1062 rrq->nlp_DID = ndlp->nlp_DID;
1063 rrq->vport = ndlp->vport;
1065 spin_lock_irqsave(&phba->hbalock, iflags);
1066 empty = list_empty(&phba->active_rrq_list);
1067 list_add_tail(&rrq->list, &phba->active_rrq_list);
1068 phba->hba_flag |= HBA_RRQ_ACTIVE;
1070 lpfc_worker_wake_up(phba);
1071 spin_unlock_irqrestore(&phba->hbalock, iflags);
1074 spin_unlock_irqrestore(&phba->hbalock, iflags);
1075 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1076 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1077 " DID:0x%x Send:%d\n",
1078 xritag, rxid, ndlp->nlp_DID, send_rrq);
1083 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1084 * @phba: Pointer to HBA context object.
1085 * @piocb: Pointer to the iocbq.
1087 * This function is called with the ring lock held. This function
1088 * gets a new driver sglq object from the sglq list. If the
1089 * list is not empty then it is successful, it returns pointer to the newly
1090 * allocated sglq object else it returns NULL.
1092 static struct lpfc_sglq *
1093 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1095 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1096 struct lpfc_sglq *sglq = NULL;
1097 struct lpfc_sglq *start_sglq = NULL;
1098 struct lpfc_scsi_buf *lpfc_cmd;
1099 struct lpfc_nodelist *ndlp;
1102 lockdep_assert_held(&phba->hbalock);
1104 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1105 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
1106 ndlp = lpfc_cmd->rdata->pnode;
1107 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1108 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1109 ndlp = piocbq->context_un.ndlp;
1110 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1111 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1114 ndlp = piocbq->context_un.ndlp;
1116 ndlp = piocbq->context1;
1119 spin_lock(&phba->sli4_hba.sgl_list_lock);
1120 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1125 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1126 test_bit(sglq->sli4_lxritag,
1127 ndlp->active_rrqs_xri_bitmap)) {
1128 /* This xri has an rrq outstanding for this DID.
1129 * put it back in the list and get another xri.
1131 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1133 list_remove_head(lpfc_els_sgl_list, sglq,
1134 struct lpfc_sglq, list);
1135 if (sglq == start_sglq) {
1136 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1144 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1145 sglq->state = SGL_ALLOCATED;
1147 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1152 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1153 * @phba: Pointer to HBA context object.
1154 * @piocb: Pointer to the iocbq.
1156 * This function is called with the sgl_list lock held. This function
1157 * gets a new driver sglq object from the sglq list. If the
1158 * list is not empty then it is successful, it returns pointer to the newly
1159 * allocated sglq object else it returns NULL.
1162 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1164 struct list_head *lpfc_nvmet_sgl_list;
1165 struct lpfc_sglq *sglq = NULL;
1167 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1169 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1171 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1174 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1175 sglq->state = SGL_ALLOCATED;
1180 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1181 * @phba: Pointer to HBA context object.
1183 * This function is called with no lock held. This function
1184 * allocates a new driver iocb object from the iocb pool. If the
1185 * allocation is successful, it returns pointer to the newly
1186 * allocated iocb object else it returns NULL.
1189 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1191 struct lpfc_iocbq * iocbq = NULL;
1192 unsigned long iflags;
1194 spin_lock_irqsave(&phba->hbalock, iflags);
1195 iocbq = __lpfc_sli_get_iocbq(phba);
1196 spin_unlock_irqrestore(&phba->hbalock, iflags);
1201 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1202 * @phba: Pointer to HBA context object.
1203 * @iocbq: Pointer to driver iocb object.
1205 * This function is called with hbalock held to release driver
1206 * iocb object to the iocb pool. The iotag in the iocb object
1207 * does not change for each use of the iocb object. This function
1208 * clears all other fields of the iocb object when it is freed.
1209 * The sqlq structure that holds the xritag and phys and virtual
1210 * mappings for the scatter gather list is retrieved from the
1211 * active array of sglq. The get of the sglq pointer also clears
1212 * the entry in the array. If the status of the IO indiactes that
1213 * this IO was aborted then the sglq entry it put on the
1214 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1215 * IO has good status or fails for any other reason then the sglq
1216 * entry is added to the free list (lpfc_els_sgl_list).
1219 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1221 struct lpfc_sglq *sglq;
1222 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1223 unsigned long iflag = 0;
1224 struct lpfc_sli_ring *pring;
1226 lockdep_assert_held(&phba->hbalock);
1228 if (iocbq->sli4_xritag == NO_XRI)
1231 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1235 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1236 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1238 sglq->state = SGL_FREED;
1240 list_add_tail(&sglq->list,
1241 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1242 spin_unlock_irqrestore(
1243 &phba->sli4_hba.sgl_list_lock, iflag);
1247 pring = phba->sli4_hba.els_wq->pring;
1248 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1249 (sglq->state != SGL_XRI_ABORTED)) {
1250 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1252 list_add(&sglq->list,
1253 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1254 spin_unlock_irqrestore(
1255 &phba->sli4_hba.sgl_list_lock, iflag);
1257 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1259 sglq->state = SGL_FREED;
1261 list_add_tail(&sglq->list,
1262 &phba->sli4_hba.lpfc_els_sgl_list);
1263 spin_unlock_irqrestore(
1264 &phba->sli4_hba.sgl_list_lock, iflag);
1266 /* Check if TXQ queue needs to be serviced */
1267 if (!list_empty(&pring->txq))
1268 lpfc_worker_wake_up(phba);
1274 * Clean all volatile data fields, preserve iotag and node struct.
1276 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1277 iocbq->sli4_lxritag = NO_XRI;
1278 iocbq->sli4_xritag = NO_XRI;
1279 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1281 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1286 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1287 * @phba: Pointer to HBA context object.
1288 * @iocbq: Pointer to driver iocb object.
1290 * This function is called with hbalock held to release driver
1291 * iocb object to the iocb pool. The iotag in the iocb object
1292 * does not change for each use of the iocb object. This function
1293 * clears all other fields of the iocb object when it is freed.
1296 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1298 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1300 lockdep_assert_held(&phba->hbalock);
1303 * Clean all volatile data fields, preserve iotag and node struct.
1305 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1306 iocbq->sli4_xritag = NO_XRI;
1307 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1311 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1312 * @phba: Pointer to HBA context object.
1313 * @iocbq: Pointer to driver iocb object.
1315 * This function is called with hbalock held to release driver
1316 * iocb object to the iocb pool. The iotag in the iocb object
1317 * does not change for each use of the iocb object. This function
1318 * clears all other fields of the iocb object when it is freed.
1321 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1323 lockdep_assert_held(&phba->hbalock);
1325 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1330 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1331 * @phba: Pointer to HBA context object.
1332 * @iocbq: Pointer to driver iocb object.
1334 * This function is called with no lock held to release the iocb to
1338 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1340 unsigned long iflags;
1343 * Clean all volatile data fields, preserve iotag and node struct.
1345 spin_lock_irqsave(&phba->hbalock, iflags);
1346 __lpfc_sli_release_iocbq(phba, iocbq);
1347 spin_unlock_irqrestore(&phba->hbalock, iflags);
1351 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1352 * @phba: Pointer to HBA context object.
1353 * @iocblist: List of IOCBs.
1354 * @ulpstatus: ULP status in IOCB command field.
1355 * @ulpWord4: ULP word-4 in IOCB command field.
1357 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1358 * on the list by invoking the complete callback function associated with the
1359 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1363 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1364 uint32_t ulpstatus, uint32_t ulpWord4)
1366 struct lpfc_iocbq *piocb;
1368 while (!list_empty(iocblist)) {
1369 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1370 if (!piocb->iocb_cmpl)
1371 lpfc_sli_release_iocbq(phba, piocb);
1373 piocb->iocb.ulpStatus = ulpstatus;
1374 piocb->iocb.un.ulpWord[4] = ulpWord4;
1375 (piocb->iocb_cmpl) (phba, piocb, piocb);
1382 * lpfc_sli_iocb_cmd_type - Get the iocb type
1383 * @iocb_cmnd: iocb command code.
1385 * This function is called by ring event handler function to get the iocb type.
1386 * This function translates the iocb command to an iocb command type used to
1387 * decide the final disposition of each completed IOCB.
1388 * The function returns
1389 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1390 * LPFC_SOL_IOCB if it is a solicited iocb completion
1391 * LPFC_ABORT_IOCB if it is an abort iocb
1392 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1394 * The caller is not required to hold any lock.
1396 static lpfc_iocb_type
1397 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1399 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1401 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1404 switch (iocb_cmnd) {
1405 case CMD_XMIT_SEQUENCE_CR:
1406 case CMD_XMIT_SEQUENCE_CX:
1407 case CMD_XMIT_BCAST_CN:
1408 case CMD_XMIT_BCAST_CX:
1409 case CMD_ELS_REQUEST_CR:
1410 case CMD_ELS_REQUEST_CX:
1411 case CMD_CREATE_XRI_CR:
1412 case CMD_CREATE_XRI_CX:
1413 case CMD_GET_RPI_CN:
1414 case CMD_XMIT_ELS_RSP_CX:
1415 case CMD_GET_RPI_CR:
1416 case CMD_FCP_IWRITE_CR:
1417 case CMD_FCP_IWRITE_CX:
1418 case CMD_FCP_IREAD_CR:
1419 case CMD_FCP_IREAD_CX:
1420 case CMD_FCP_ICMND_CR:
1421 case CMD_FCP_ICMND_CX:
1422 case CMD_FCP_TSEND_CX:
1423 case CMD_FCP_TRSP_CX:
1424 case CMD_FCP_TRECEIVE_CX:
1425 case CMD_FCP_AUTO_TRSP_CX:
1426 case CMD_ADAPTER_MSG:
1427 case CMD_ADAPTER_DUMP:
1428 case CMD_XMIT_SEQUENCE64_CR:
1429 case CMD_XMIT_SEQUENCE64_CX:
1430 case CMD_XMIT_BCAST64_CN:
1431 case CMD_XMIT_BCAST64_CX:
1432 case CMD_ELS_REQUEST64_CR:
1433 case CMD_ELS_REQUEST64_CX:
1434 case CMD_FCP_IWRITE64_CR:
1435 case CMD_FCP_IWRITE64_CX:
1436 case CMD_FCP_IREAD64_CR:
1437 case CMD_FCP_IREAD64_CX:
1438 case CMD_FCP_ICMND64_CR:
1439 case CMD_FCP_ICMND64_CX:
1440 case CMD_FCP_TSEND64_CX:
1441 case CMD_FCP_TRSP64_CX:
1442 case CMD_FCP_TRECEIVE64_CX:
1443 case CMD_GEN_REQUEST64_CR:
1444 case CMD_GEN_REQUEST64_CX:
1445 case CMD_XMIT_ELS_RSP64_CX:
1446 case DSSCMD_IWRITE64_CR:
1447 case DSSCMD_IWRITE64_CX:
1448 case DSSCMD_IREAD64_CR:
1449 case DSSCMD_IREAD64_CX:
1450 type = LPFC_SOL_IOCB;
1452 case CMD_ABORT_XRI_CN:
1453 case CMD_ABORT_XRI_CX:
1454 case CMD_CLOSE_XRI_CN:
1455 case CMD_CLOSE_XRI_CX:
1456 case CMD_XRI_ABORTED_CX:
1457 case CMD_ABORT_MXRI64_CN:
1458 case CMD_XMIT_BLS_RSP64_CX:
1459 type = LPFC_ABORT_IOCB;
1461 case CMD_RCV_SEQUENCE_CX:
1462 case CMD_RCV_ELS_REQ_CX:
1463 case CMD_RCV_SEQUENCE64_CX:
1464 case CMD_RCV_ELS_REQ64_CX:
1465 case CMD_ASYNC_STATUS:
1466 case CMD_IOCB_RCV_SEQ64_CX:
1467 case CMD_IOCB_RCV_ELS64_CX:
1468 case CMD_IOCB_RCV_CONT64_CX:
1469 case CMD_IOCB_RET_XRI64_CX:
1470 type = LPFC_UNSOL_IOCB;
1472 case CMD_IOCB_XMIT_MSEQ64_CR:
1473 case CMD_IOCB_XMIT_MSEQ64_CX:
1474 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1475 case CMD_IOCB_RCV_ELS_LIST64_CX:
1476 case CMD_IOCB_CLOSE_EXTENDED_CN:
1477 case CMD_IOCB_ABORT_EXTENDED_CN:
1478 case CMD_IOCB_RET_HBQE64_CN:
1479 case CMD_IOCB_FCP_IBIDIR64_CR:
1480 case CMD_IOCB_FCP_IBIDIR64_CX:
1481 case CMD_IOCB_FCP_ITASKMGT64_CX:
1482 case CMD_IOCB_LOGENTRY_CN:
1483 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1484 printk("%s - Unhandled SLI-3 Command x%x\n",
1485 __func__, iocb_cmnd);
1486 type = LPFC_UNKNOWN_IOCB;
1489 type = LPFC_UNKNOWN_IOCB;
1497 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1498 * @phba: Pointer to HBA context object.
1500 * This function is called from SLI initialization code
1501 * to configure every ring of the HBA's SLI interface. The
1502 * caller is not required to hold any lock. This function issues
1503 * a config_ring mailbox command for each ring.
1504 * This function returns zero if successful else returns a negative
1508 lpfc_sli_ring_map(struct lpfc_hba *phba)
1510 struct lpfc_sli *psli = &phba->sli;
1515 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1519 phba->link_state = LPFC_INIT_MBX_CMDS;
1520 for (i = 0; i < psli->num_rings; i++) {
1521 lpfc_config_ring(phba, i, pmb);
1522 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1523 if (rc != MBX_SUCCESS) {
1524 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1525 "0446 Adapter failed to init (%d), "
1526 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1528 rc, pmbox->mbxCommand,
1529 pmbox->mbxStatus, i);
1530 phba->link_state = LPFC_HBA_ERROR;
1535 mempool_free(pmb, phba->mbox_mem_pool);
1540 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1541 * @phba: Pointer to HBA context object.
1542 * @pring: Pointer to driver SLI ring object.
1543 * @piocb: Pointer to the driver iocb object.
1545 * This function is called with hbalock held. The function adds the
1546 * new iocb to txcmplq of the given ring. This function always returns
1547 * 0. If this function is called for ELS ring, this function checks if
1548 * there is a vport associated with the ELS command. This function also
1549 * starts els_tmofunc timer if this is an ELS command.
1552 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1553 struct lpfc_iocbq *piocb)
1555 lockdep_assert_held(&phba->hbalock);
1559 list_add_tail(&piocb->list, &pring->txcmplq);
1560 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1562 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1563 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1564 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1565 BUG_ON(!piocb->vport);
1566 if (!(piocb->vport->load_flag & FC_UNLOADING))
1567 mod_timer(&piocb->vport->els_tmofunc,
1569 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1576 * lpfc_sli_ringtx_get - Get first element of the txq
1577 * @phba: Pointer to HBA context object.
1578 * @pring: Pointer to driver SLI ring object.
1580 * This function is called with hbalock held to get next
1581 * iocb in txq of the given ring. If there is any iocb in
1582 * the txq, the function returns first iocb in the list after
1583 * removing the iocb from the list, else it returns NULL.
1586 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1588 struct lpfc_iocbq *cmd_iocb;
1590 lockdep_assert_held(&phba->hbalock);
1592 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1597 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1598 * @phba: Pointer to HBA context object.
1599 * @pring: Pointer to driver SLI ring object.
1601 * This function is called with hbalock held and the caller must post the
1602 * iocb without releasing the lock. If the caller releases the lock,
1603 * iocb slot returned by the function is not guaranteed to be available.
1604 * The function returns pointer to the next available iocb slot if there
1605 * is available slot in the ring, else it returns NULL.
1606 * If the get index of the ring is ahead of the put index, the function
1607 * will post an error attention event to the worker thread to take the
1608 * HBA to offline state.
1611 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1613 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1614 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1616 lockdep_assert_held(&phba->hbalock);
1618 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1619 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1620 pring->sli.sli3.next_cmdidx = 0;
1622 if (unlikely(pring->sli.sli3.local_getidx ==
1623 pring->sli.sli3.next_cmdidx)) {
1625 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1627 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1628 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1629 "0315 Ring %d issue: portCmdGet %d "
1630 "is bigger than cmd ring %d\n",
1632 pring->sli.sli3.local_getidx,
1635 phba->link_state = LPFC_HBA_ERROR;
1637 * All error attention handlers are posted to
1640 phba->work_ha |= HA_ERATT;
1641 phba->work_hs = HS_FFER3;
1643 lpfc_worker_wake_up(phba);
1648 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1652 return lpfc_cmd_iocb(phba, pring);
1656 * lpfc_sli_next_iotag - Get an iotag for the iocb
1657 * @phba: Pointer to HBA context object.
1658 * @iocbq: Pointer to driver iocb object.
1660 * This function gets an iotag for the iocb. If there is no unused iotag and
1661 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1662 * array and assigns a new iotag.
1663 * The function returns the allocated iotag if successful, else returns zero.
1664 * Zero is not a valid iotag.
1665 * The caller is not required to hold any lock.
1668 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1670 struct lpfc_iocbq **new_arr;
1671 struct lpfc_iocbq **old_arr;
1673 struct lpfc_sli *psli = &phba->sli;
1676 spin_lock_irq(&phba->hbalock);
1677 iotag = psli->last_iotag;
1678 if(++iotag < psli->iocbq_lookup_len) {
1679 psli->last_iotag = iotag;
1680 psli->iocbq_lookup[iotag] = iocbq;
1681 spin_unlock_irq(&phba->hbalock);
1682 iocbq->iotag = iotag;
1684 } else if (psli->iocbq_lookup_len < (0xffff
1685 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1686 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1687 spin_unlock_irq(&phba->hbalock);
1688 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1691 spin_lock_irq(&phba->hbalock);
1692 old_arr = psli->iocbq_lookup;
1693 if (new_len <= psli->iocbq_lookup_len) {
1694 /* highly unprobable case */
1696 iotag = psli->last_iotag;
1697 if(++iotag < psli->iocbq_lookup_len) {
1698 psli->last_iotag = iotag;
1699 psli->iocbq_lookup[iotag] = iocbq;
1700 spin_unlock_irq(&phba->hbalock);
1701 iocbq->iotag = iotag;
1704 spin_unlock_irq(&phba->hbalock);
1707 if (psli->iocbq_lookup)
1708 memcpy(new_arr, old_arr,
1709 ((psli->last_iotag + 1) *
1710 sizeof (struct lpfc_iocbq *)));
1711 psli->iocbq_lookup = new_arr;
1712 psli->iocbq_lookup_len = new_len;
1713 psli->last_iotag = iotag;
1714 psli->iocbq_lookup[iotag] = iocbq;
1715 spin_unlock_irq(&phba->hbalock);
1716 iocbq->iotag = iotag;
1721 spin_unlock_irq(&phba->hbalock);
1723 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1724 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1731 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1732 * @phba: Pointer to HBA context object.
1733 * @pring: Pointer to driver SLI ring object.
1734 * @iocb: Pointer to iocb slot in the ring.
1735 * @nextiocb: Pointer to driver iocb object which need to be
1736 * posted to firmware.
1738 * This function is called with hbalock held to post a new iocb to
1739 * the firmware. This function copies the new iocb to ring iocb slot and
1740 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1741 * a completion call back for this iocb else the function will free the
1745 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1746 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1748 lockdep_assert_held(&phba->hbalock);
1752 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1755 if (pring->ringno == LPFC_ELS_RING) {
1756 lpfc_debugfs_slow_ring_trc(phba,
1757 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1758 *(((uint32_t *) &nextiocb->iocb) + 4),
1759 *(((uint32_t *) &nextiocb->iocb) + 6),
1760 *(((uint32_t *) &nextiocb->iocb) + 7));
1764 * Issue iocb command to adapter
1766 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1768 pring->stats.iocb_cmd++;
1771 * If there is no completion routine to call, we can release the
1772 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1773 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1775 if (nextiocb->iocb_cmpl)
1776 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1778 __lpfc_sli_release_iocbq(phba, nextiocb);
1781 * Let the HBA know what IOCB slot will be the next one the
1782 * driver will put a command into.
1784 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1785 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1789 * lpfc_sli_update_full_ring - Update the chip attention register
1790 * @phba: Pointer to HBA context object.
1791 * @pring: Pointer to driver SLI ring object.
1793 * The caller is not required to hold any lock for calling this function.
1794 * This function updates the chip attention bits for the ring to inform firmware
1795 * that there are pending work to be done for this ring and requests an
1796 * interrupt when there is space available in the ring. This function is
1797 * called when the driver is unable to post more iocbs to the ring due
1798 * to unavailability of space in the ring.
1801 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1803 int ringno = pring->ringno;
1805 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1810 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1811 * The HBA will tell us when an IOCB entry is available.
1813 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1814 readl(phba->CAregaddr); /* flush */
1816 pring->stats.iocb_cmd_full++;
1820 * lpfc_sli_update_ring - Update chip attention register
1821 * @phba: Pointer to HBA context object.
1822 * @pring: Pointer to driver SLI ring object.
1824 * This function updates the chip attention register bit for the
1825 * given ring to inform HBA that there is more work to be done
1826 * in this ring. The caller is not required to hold any lock.
1829 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1831 int ringno = pring->ringno;
1834 * Tell the HBA that there is work to do in this ring.
1836 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1838 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1839 readl(phba->CAregaddr); /* flush */
1844 * lpfc_sli_resume_iocb - Process iocbs in the txq
1845 * @phba: Pointer to HBA context object.
1846 * @pring: Pointer to driver SLI ring object.
1848 * This function is called with hbalock held to post pending iocbs
1849 * in the txq to the firmware. This function is called when driver
1850 * detects space available in the ring.
1853 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1856 struct lpfc_iocbq *nextiocb;
1858 lockdep_assert_held(&phba->hbalock);
1862 * (a) there is anything on the txq to send
1864 * (c) link attention events can be processed (fcp ring only)
1865 * (d) IOCB processing is not blocked by the outstanding mbox command.
1868 if (lpfc_is_link_up(phba) &&
1869 (!list_empty(&pring->txq)) &&
1870 (pring->ringno != LPFC_FCP_RING ||
1871 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1873 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1874 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1875 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1878 lpfc_sli_update_ring(phba, pring);
1880 lpfc_sli_update_full_ring(phba, pring);
1887 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1888 * @phba: Pointer to HBA context object.
1889 * @hbqno: HBQ number.
1891 * This function is called with hbalock held to get the next
1892 * available slot for the given HBQ. If there is free slot
1893 * available for the HBQ it will return pointer to the next available
1894 * HBQ entry else it will return NULL.
1896 static struct lpfc_hbq_entry *
1897 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1899 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1901 lockdep_assert_held(&phba->hbalock);
1903 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1904 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1905 hbqp->next_hbqPutIdx = 0;
1907 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1908 uint32_t raw_index = phba->hbq_get[hbqno];
1909 uint32_t getidx = le32_to_cpu(raw_index);
1911 hbqp->local_hbqGetIdx = getidx;
1913 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1914 lpfc_printf_log(phba, KERN_ERR,
1915 LOG_SLI | LOG_VPORT,
1916 "1802 HBQ %d: local_hbqGetIdx "
1917 "%u is > than hbqp->entry_count %u\n",
1918 hbqno, hbqp->local_hbqGetIdx,
1921 phba->link_state = LPFC_HBA_ERROR;
1925 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1929 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1934 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1935 * @phba: Pointer to HBA context object.
1937 * This function is called with no lock held to free all the
1938 * hbq buffers while uninitializing the SLI interface. It also
1939 * frees the HBQ buffers returned by the firmware but not yet
1940 * processed by the upper layers.
1943 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1945 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1946 struct hbq_dmabuf *hbq_buf;
1947 unsigned long flags;
1950 hbq_count = lpfc_sli_hbq_count();
1951 /* Return all memory used by all HBQs */
1952 spin_lock_irqsave(&phba->hbalock, flags);
1953 for (i = 0; i < hbq_count; ++i) {
1954 list_for_each_entry_safe(dmabuf, next_dmabuf,
1955 &phba->hbqs[i].hbq_buffer_list, list) {
1956 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1957 list_del(&hbq_buf->dbuf.list);
1958 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1960 phba->hbqs[i].buffer_count = 0;
1963 /* Mark the HBQs not in use */
1964 phba->hbq_in_use = 0;
1965 spin_unlock_irqrestore(&phba->hbalock, flags);
1969 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1970 * @phba: Pointer to HBA context object.
1971 * @hbqno: HBQ number.
1972 * @hbq_buf: Pointer to HBQ buffer.
1974 * This function is called with the hbalock held to post a
1975 * hbq buffer to the firmware. If the function finds an empty
1976 * slot in the HBQ, it will post the buffer. The function will return
1977 * pointer to the hbq entry if it successfully post the buffer
1978 * else it will return NULL.
1981 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1982 struct hbq_dmabuf *hbq_buf)
1984 lockdep_assert_held(&phba->hbalock);
1985 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1989 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1990 * @phba: Pointer to HBA context object.
1991 * @hbqno: HBQ number.
1992 * @hbq_buf: Pointer to HBQ buffer.
1994 * This function is called with the hbalock held to post a hbq buffer to the
1995 * firmware. If the function finds an empty slot in the HBQ, it will post the
1996 * buffer and place it on the hbq_buffer_list. The function will return zero if
1997 * it successfully post the buffer else it will return an error.
2000 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2001 struct hbq_dmabuf *hbq_buf)
2003 struct lpfc_hbq_entry *hbqe;
2004 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2006 lockdep_assert_held(&phba->hbalock);
2007 /* Get next HBQ entry slot to use */
2008 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2010 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2012 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2013 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2014 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2015 hbqe->bde.tus.f.bdeFlags = 0;
2016 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2017 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2019 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2020 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2022 readl(phba->hbq_put + hbqno);
2023 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2030 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2031 * @phba: Pointer to HBA context object.
2032 * @hbqno: HBQ number.
2033 * @hbq_buf: Pointer to HBQ buffer.
2035 * This function is called with the hbalock held to post an RQE to the SLI4
2036 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2037 * the hbq_buffer_list and return zero, otherwise it will return an error.
2040 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2041 struct hbq_dmabuf *hbq_buf)
2044 struct lpfc_rqe hrqe;
2045 struct lpfc_rqe drqe;
2046 struct lpfc_queue *hrq;
2047 struct lpfc_queue *drq;
2049 if (hbqno != LPFC_ELS_HBQ)
2051 hrq = phba->sli4_hba.hdr_rq;
2052 drq = phba->sli4_hba.dat_rq;
2054 lockdep_assert_held(&phba->hbalock);
2055 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2056 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2057 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2058 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2059 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2062 hbq_buf->tag = (rc | (hbqno << 16));
2063 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2067 /* HBQ for ELS and CT traffic. */
2068 static struct lpfc_hbq_init lpfc_els_hbq = {
2073 .ring_mask = (1 << LPFC_ELS_RING),
2080 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2085 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2086 * @phba: Pointer to HBA context object.
2087 * @hbqno: HBQ number.
2088 * @count: Number of HBQ buffers to be posted.
2090 * This function is called with no lock held to post more hbq buffers to the
2091 * given HBQ. The function returns the number of HBQ buffers successfully
2095 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2097 uint32_t i, posted = 0;
2098 unsigned long flags;
2099 struct hbq_dmabuf *hbq_buffer;
2100 LIST_HEAD(hbq_buf_list);
2101 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2104 if ((phba->hbqs[hbqno].buffer_count + count) >
2105 lpfc_hbq_defs[hbqno]->entry_count)
2106 count = lpfc_hbq_defs[hbqno]->entry_count -
2107 phba->hbqs[hbqno].buffer_count;
2110 /* Allocate HBQ entries */
2111 for (i = 0; i < count; i++) {
2112 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2115 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2117 /* Check whether HBQ is still in use */
2118 spin_lock_irqsave(&phba->hbalock, flags);
2119 if (!phba->hbq_in_use)
2121 while (!list_empty(&hbq_buf_list)) {
2122 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2124 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2126 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2127 phba->hbqs[hbqno].buffer_count++;
2130 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2132 spin_unlock_irqrestore(&phba->hbalock, flags);
2135 spin_unlock_irqrestore(&phba->hbalock, flags);
2136 while (!list_empty(&hbq_buf_list)) {
2137 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2139 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2145 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2146 * @phba: Pointer to HBA context object.
2149 * This function posts more buffers to the HBQ. This function
2150 * is called with no lock held. The function returns the number of HBQ entries
2151 * successfully allocated.
2154 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2156 if (phba->sli_rev == LPFC_SLI_REV4)
2159 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2160 lpfc_hbq_defs[qno]->add_count);
2164 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2165 * @phba: Pointer to HBA context object.
2166 * @qno: HBQ queue number.
2168 * This function is called from SLI initialization code path with
2169 * no lock held to post initial HBQ buffers to firmware. The
2170 * function returns the number of HBQ entries successfully allocated.
2173 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2175 if (phba->sli_rev == LPFC_SLI_REV4)
2176 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2177 lpfc_hbq_defs[qno]->entry_count);
2179 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2180 lpfc_hbq_defs[qno]->init_count);
2184 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2185 * @phba: Pointer to HBA context object.
2186 * @hbqno: HBQ number.
2188 * This function removes the first hbq buffer on an hbq list and returns a
2189 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2191 static struct hbq_dmabuf *
2192 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2194 struct lpfc_dmabuf *d_buf;
2196 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2199 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2203 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2204 * @phba: Pointer to HBA context object.
2205 * @hbqno: HBQ number.
2207 * This function removes the first RQ buffer on an RQ buffer list and returns a
2208 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2210 static struct rqb_dmabuf *
2211 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2213 struct lpfc_dmabuf *h_buf;
2214 struct lpfc_rqb *rqbp;
2217 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2218 struct lpfc_dmabuf, list);
2221 rqbp->buffer_count--;
2222 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2226 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2227 * @phba: Pointer to HBA context object.
2228 * @tag: Tag of the hbq buffer.
2230 * This function searches for the hbq buffer associated with the given tag in
2231 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2232 * otherwise it returns NULL.
2234 static struct hbq_dmabuf *
2235 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2237 struct lpfc_dmabuf *d_buf;
2238 struct hbq_dmabuf *hbq_buf;
2242 if (hbqno >= LPFC_MAX_HBQS)
2245 spin_lock_irq(&phba->hbalock);
2246 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2247 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2248 if (hbq_buf->tag == tag) {
2249 spin_unlock_irq(&phba->hbalock);
2253 spin_unlock_irq(&phba->hbalock);
2254 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2255 "1803 Bad hbq tag. Data: x%x x%x\n",
2256 tag, phba->hbqs[tag >> 16].buffer_count);
2261 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2262 * @phba: Pointer to HBA context object.
2263 * @hbq_buffer: Pointer to HBQ buffer.
2265 * This function is called with hbalock. This function gives back
2266 * the hbq buffer to firmware. If the HBQ does not have space to
2267 * post the buffer, it will free the buffer.
2270 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2275 hbqno = hbq_buffer->tag >> 16;
2276 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2277 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2282 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2283 * @mbxCommand: mailbox command code.
2285 * This function is called by the mailbox event handler function to verify
2286 * that the completed mailbox command is a legitimate mailbox command. If the
2287 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2288 * and the mailbox event handler will take the HBA offline.
2291 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2295 switch (mbxCommand) {
2299 case MBX_WRITE_VPARMS:
2300 case MBX_RUN_BIU_DIAG:
2303 case MBX_CONFIG_LINK:
2304 case MBX_CONFIG_RING:
2305 case MBX_RESET_RING:
2306 case MBX_READ_CONFIG:
2307 case MBX_READ_RCONFIG:
2308 case MBX_READ_SPARM:
2309 case MBX_READ_STATUS:
2313 case MBX_READ_LNK_STAT:
2315 case MBX_UNREG_LOGIN:
2317 case MBX_DUMP_MEMORY:
2318 case MBX_DUMP_CONTEXT:
2321 case MBX_UPDATE_CFG:
2323 case MBX_DEL_LD_ENTRY:
2324 case MBX_RUN_PROGRAM:
2326 case MBX_SET_VARIABLE:
2327 case MBX_UNREG_D_ID:
2328 case MBX_KILL_BOARD:
2329 case MBX_CONFIG_FARP:
2332 case MBX_RUN_BIU_DIAG64:
2333 case MBX_CONFIG_PORT:
2334 case MBX_READ_SPARM64:
2335 case MBX_READ_RPI64:
2336 case MBX_REG_LOGIN64:
2337 case MBX_READ_TOPOLOGY:
2340 case MBX_LOAD_EXP_ROM:
2341 case MBX_ASYNCEVT_ENABLE:
2345 case MBX_PORT_CAPABILITIES:
2346 case MBX_PORT_IOV_CONTROL:
2347 case MBX_SLI4_CONFIG:
2348 case MBX_SLI4_REQ_FTRS:
2350 case MBX_UNREG_FCFI:
2355 case MBX_RESUME_RPI:
2356 case MBX_READ_EVENT_LOG_STATUS:
2357 case MBX_READ_EVENT_LOG:
2358 case MBX_SECURITY_MGMT:
2360 case MBX_ACCESS_VDATA:
2371 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2372 * @phba: Pointer to HBA context object.
2373 * @pmboxq: Pointer to mailbox command.
2375 * This is completion handler function for mailbox commands issued from
2376 * lpfc_sli_issue_mbox_wait function. This function is called by the
2377 * mailbox event handler function with no lock held. This function
2378 * will wake up thread waiting on the wait queue pointed by context1
2382 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2384 wait_queue_head_t *pdone_q;
2385 unsigned long drvr_flag;
2388 * If pdone_q is empty, the driver thread gave up waiting and
2389 * continued running.
2391 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2392 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2393 pdone_q = (wait_queue_head_t *) pmboxq->context1;
2395 wake_up_interruptible(pdone_q);
2396 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2402 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2403 * @phba: Pointer to HBA context object.
2404 * @pmb: Pointer to mailbox object.
2406 * This function is the default mailbox completion handler. It
2407 * frees the memory resources associated with the completed mailbox
2408 * command. If the completed command is a REG_LOGIN mailbox command,
2409 * this function will issue a UREG_LOGIN to re-claim the RPI.
2412 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2414 struct lpfc_vport *vport = pmb->vport;
2415 struct lpfc_dmabuf *mp;
2416 struct lpfc_nodelist *ndlp;
2417 struct Scsi_Host *shost;
2421 mp = (struct lpfc_dmabuf *) (pmb->context1);
2424 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2429 * If a REG_LOGIN succeeded after node is destroyed or node
2430 * is in re-discovery driver need to cleanup the RPI.
2432 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2433 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2434 !pmb->u.mb.mbxStatus) {
2435 rpi = pmb->u.mb.un.varWords[0];
2436 vpi = pmb->u.mb.un.varRegLogin.vpi;
2437 lpfc_unreg_login(phba, vpi, rpi, pmb);
2439 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2440 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2441 if (rc != MBX_NOT_FINISHED)
2445 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2446 !(phba->pport->load_flag & FC_UNLOADING) &&
2447 !pmb->u.mb.mbxStatus) {
2448 shost = lpfc_shost_from_vport(vport);
2449 spin_lock_irq(shost->host_lock);
2450 vport->vpi_state |= LPFC_VPI_REGISTERED;
2451 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2452 spin_unlock_irq(shost->host_lock);
2455 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2456 ndlp = (struct lpfc_nodelist *)pmb->context2;
2458 pmb->context2 = NULL;
2461 /* Check security permission status on INIT_LINK mailbox command */
2462 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2463 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2464 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2465 "2860 SLI authentication is required "
2466 "for INIT_LINK but has not done yet\n");
2468 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2469 lpfc_sli4_mbox_cmd_free(phba, pmb);
2471 mempool_free(pmb, phba->mbox_mem_pool);
2474 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2475 * @phba: Pointer to HBA context object.
2476 * @pmb: Pointer to mailbox object.
2478 * This function is the unreg rpi mailbox completion handler. It
2479 * frees the memory resources associated with the completed mailbox
2480 * command. An additional refrenece is put on the ndlp to prevent
2481 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2482 * the unreg mailbox command completes, this routine puts the
2487 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2489 struct lpfc_vport *vport = pmb->vport;
2490 struct lpfc_nodelist *ndlp;
2492 ndlp = pmb->context1;
2493 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2494 if (phba->sli_rev == LPFC_SLI_REV4 &&
2495 (bf_get(lpfc_sli_intf_if_type,
2496 &phba->sli4_hba.sli_intf) >=
2497 LPFC_SLI_INTF_IF_TYPE_2)) {
2499 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2500 "0010 UNREG_LOGIN vpi:%x "
2501 "rpi:%x DID:%x map:%x %p\n",
2502 vport->vpi, ndlp->nlp_rpi,
2504 ndlp->nlp_usg_map, ndlp);
2505 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2511 mempool_free(pmb, phba->mbox_mem_pool);
2515 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2516 * @phba: Pointer to HBA context object.
2518 * This function is called with no lock held. This function processes all
2519 * the completed mailbox commands and gives it to upper layers. The interrupt
2520 * service routine processes mailbox completion interrupt and adds completed
2521 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2522 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2523 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2524 * function returns the mailbox commands to the upper layer by calling the
2525 * completion handler function of each mailbox.
2528 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2535 phba->sli.slistat.mbox_event++;
2537 /* Get all completed mailboxe buffers into the cmplq */
2538 spin_lock_irq(&phba->hbalock);
2539 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2540 spin_unlock_irq(&phba->hbalock);
2542 /* Get a Mailbox buffer to setup mailbox commands for callback */
2544 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2550 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2552 lpfc_debugfs_disc_trc(pmb->vport,
2553 LPFC_DISC_TRC_MBOX_VPORT,
2554 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2555 (uint32_t)pmbox->mbxCommand,
2556 pmbox->un.varWords[0],
2557 pmbox->un.varWords[1]);
2560 lpfc_debugfs_disc_trc(phba->pport,
2562 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2563 (uint32_t)pmbox->mbxCommand,
2564 pmbox->un.varWords[0],
2565 pmbox->un.varWords[1]);
2570 * It is a fatal error if unknown mbox command completion.
2572 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2574 /* Unknown mailbox command compl */
2575 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2576 "(%d):0323 Unknown Mailbox command "
2577 "x%x (x%x/x%x) Cmpl\n",
2578 pmb->vport ? pmb->vport->vpi : 0,
2580 lpfc_sli_config_mbox_subsys_get(phba,
2582 lpfc_sli_config_mbox_opcode_get(phba,
2584 phba->link_state = LPFC_HBA_ERROR;
2585 phba->work_hs = HS_FFER3;
2586 lpfc_handle_eratt(phba);
2590 if (pmbox->mbxStatus) {
2591 phba->sli.slistat.mbox_stat_err++;
2592 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2593 /* Mbox cmd cmpl error - RETRYing */
2594 lpfc_printf_log(phba, KERN_INFO,
2596 "(%d):0305 Mbox cmd cmpl "
2597 "error - RETRYing Data: x%x "
2598 "(x%x/x%x) x%x x%x x%x\n",
2599 pmb->vport ? pmb->vport->vpi : 0,
2601 lpfc_sli_config_mbox_subsys_get(phba,
2603 lpfc_sli_config_mbox_opcode_get(phba,
2606 pmbox->un.varWords[0],
2607 pmb->vport->port_state);
2608 pmbox->mbxStatus = 0;
2609 pmbox->mbxOwner = OWN_HOST;
2610 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2611 if (rc != MBX_NOT_FINISHED)
2616 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2617 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2618 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2619 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2621 pmb->vport ? pmb->vport->vpi : 0,
2623 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2624 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2626 *((uint32_t *) pmbox),
2627 pmbox->un.varWords[0],
2628 pmbox->un.varWords[1],
2629 pmbox->un.varWords[2],
2630 pmbox->un.varWords[3],
2631 pmbox->un.varWords[4],
2632 pmbox->un.varWords[5],
2633 pmbox->un.varWords[6],
2634 pmbox->un.varWords[7],
2635 pmbox->un.varWords[8],
2636 pmbox->un.varWords[9],
2637 pmbox->un.varWords[10]);
2640 pmb->mbox_cmpl(phba,pmb);
2646 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2647 * @phba: Pointer to HBA context object.
2648 * @pring: Pointer to driver SLI ring object.
2651 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2652 * is set in the tag the buffer is posted for a particular exchange,
2653 * the function will return the buffer without replacing the buffer.
2654 * If the buffer is for unsolicited ELS or CT traffic, this function
2655 * returns the buffer and also posts another buffer to the firmware.
2657 static struct lpfc_dmabuf *
2658 lpfc_sli_get_buff(struct lpfc_hba *phba,
2659 struct lpfc_sli_ring *pring,
2662 struct hbq_dmabuf *hbq_entry;
2664 if (tag & QUE_BUFTAG_BIT)
2665 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2666 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2669 return &hbq_entry->dbuf;
2673 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2674 * @phba: Pointer to HBA context object.
2675 * @pring: Pointer to driver SLI ring object.
2676 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2677 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2678 * @fch_type: the type for the first frame of the sequence.
2680 * This function is called with no lock held. This function uses the r_ctl and
2681 * type of the received sequence to find the correct callback function to call
2682 * to process the sequence.
2685 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2686 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2693 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
2699 /* unSolicited Responses */
2700 if (pring->prt[0].profile) {
2701 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2702 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2706 /* We must search, based on rctl / type
2707 for the right routine */
2708 for (i = 0; i < pring->num_mask; i++) {
2709 if ((pring->prt[i].rctl == fch_r_ctl) &&
2710 (pring->prt[i].type == fch_type)) {
2711 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2712 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2713 (phba, pring, saveq);
2721 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2722 * @phba: Pointer to HBA context object.
2723 * @pring: Pointer to driver SLI ring object.
2724 * @saveq: Pointer to the unsolicited iocb.
2726 * This function is called with no lock held by the ring event handler
2727 * when there is an unsolicited iocb posted to the response ring by the
2728 * firmware. This function gets the buffer associated with the iocbs
2729 * and calls the event handler for the ring. This function handles both
2730 * qring buffers and hbq buffers.
2731 * When the function returns 1 the caller can free the iocb object otherwise
2732 * upper layer functions will free the iocb objects.
2735 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2736 struct lpfc_iocbq *saveq)
2740 uint32_t Rctl, Type;
2741 struct lpfc_iocbq *iocbq;
2742 struct lpfc_dmabuf *dmzbuf;
2744 irsp = &(saveq->iocb);
2746 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2747 if (pring->lpfc_sli_rcv_async_status)
2748 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2750 lpfc_printf_log(phba,
2753 "0316 Ring %d handler: unexpected "
2754 "ASYNC_STATUS iocb received evt_code "
2757 irsp->un.asyncstat.evt_code);
2761 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2762 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2763 if (irsp->ulpBdeCount > 0) {
2764 dmzbuf = lpfc_sli_get_buff(phba, pring,
2765 irsp->un.ulpWord[3]);
2766 lpfc_in_buf_free(phba, dmzbuf);
2769 if (irsp->ulpBdeCount > 1) {
2770 dmzbuf = lpfc_sli_get_buff(phba, pring,
2771 irsp->unsli3.sli3Words[3]);
2772 lpfc_in_buf_free(phba, dmzbuf);
2775 if (irsp->ulpBdeCount > 2) {
2776 dmzbuf = lpfc_sli_get_buff(phba, pring,
2777 irsp->unsli3.sli3Words[7]);
2778 lpfc_in_buf_free(phba, dmzbuf);
2784 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2785 if (irsp->ulpBdeCount != 0) {
2786 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2787 irsp->un.ulpWord[3]);
2788 if (!saveq->context2)
2789 lpfc_printf_log(phba,
2792 "0341 Ring %d Cannot find buffer for "
2793 "an unsolicited iocb. tag 0x%x\n",
2795 irsp->un.ulpWord[3]);
2797 if (irsp->ulpBdeCount == 2) {
2798 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2799 irsp->unsli3.sli3Words[7]);
2800 if (!saveq->context3)
2801 lpfc_printf_log(phba,
2804 "0342 Ring %d Cannot find buffer for an"
2805 " unsolicited iocb. tag 0x%x\n",
2807 irsp->unsli3.sli3Words[7]);
2809 list_for_each_entry(iocbq, &saveq->list, list) {
2810 irsp = &(iocbq->iocb);
2811 if (irsp->ulpBdeCount != 0) {
2812 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2813 irsp->un.ulpWord[3]);
2814 if (!iocbq->context2)
2815 lpfc_printf_log(phba,
2818 "0343 Ring %d Cannot find "
2819 "buffer for an unsolicited iocb"
2820 ". tag 0x%x\n", pring->ringno,
2821 irsp->un.ulpWord[3]);
2823 if (irsp->ulpBdeCount == 2) {
2824 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2825 irsp->unsli3.sli3Words[7]);
2826 if (!iocbq->context3)
2827 lpfc_printf_log(phba,
2830 "0344 Ring %d Cannot find "
2831 "buffer for an unsolicited "
2834 irsp->unsli3.sli3Words[7]);
2838 if (irsp->ulpBdeCount != 0 &&
2839 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2840 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2843 /* search continue save q for same XRI */
2844 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2845 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2846 saveq->iocb.unsli3.rcvsli3.ox_id) {
2847 list_add_tail(&saveq->list, &iocbq->list);
2853 list_add_tail(&saveq->clist,
2854 &pring->iocb_continue_saveq);
2855 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2856 list_del_init(&iocbq->clist);
2858 irsp = &(saveq->iocb);
2862 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2863 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2864 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2865 Rctl = FC_RCTL_ELS_REQ;
2868 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2869 Rctl = w5p->hcsw.Rctl;
2870 Type = w5p->hcsw.Type;
2872 /* Firmware Workaround */
2873 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2874 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2875 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2876 Rctl = FC_RCTL_ELS_REQ;
2878 w5p->hcsw.Rctl = Rctl;
2879 w5p->hcsw.Type = Type;
2883 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2884 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2885 "0313 Ring %d handler: unexpected Rctl x%x "
2886 "Type x%x received\n",
2887 pring->ringno, Rctl, Type);
2893 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2894 * @phba: Pointer to HBA context object.
2895 * @pring: Pointer to driver SLI ring object.
2896 * @prspiocb: Pointer to response iocb object.
2898 * This function looks up the iocb_lookup table to get the command iocb
2899 * corresponding to the given response iocb using the iotag of the
2900 * response iocb. This function is called with the hbalock held
2901 * for sli3 devices or the ring_lock for sli4 devices.
2902 * This function returns the command iocb object if it finds the command
2903 * iocb else returns NULL.
2905 static struct lpfc_iocbq *
2906 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2907 struct lpfc_sli_ring *pring,
2908 struct lpfc_iocbq *prspiocb)
2910 struct lpfc_iocbq *cmd_iocb = NULL;
2912 lockdep_assert_held(&phba->hbalock);
2914 iotag = prspiocb->iocb.ulpIoTag;
2916 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2917 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2918 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2919 /* remove from txcmpl queue list */
2920 list_del_init(&cmd_iocb->list);
2921 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2926 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2927 "0317 iotag x%x is out of "
2928 "range: max iotag x%x wd0 x%x\n",
2929 iotag, phba->sli.last_iotag,
2930 *(((uint32_t *) &prspiocb->iocb) + 7));
2935 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2936 * @phba: Pointer to HBA context object.
2937 * @pring: Pointer to driver SLI ring object.
2940 * This function looks up the iocb_lookup table to get the command iocb
2941 * corresponding to the given iotag. This function is called with the
2943 * This function returns the command iocb object if it finds the command
2944 * iocb else returns NULL.
2946 static struct lpfc_iocbq *
2947 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2948 struct lpfc_sli_ring *pring, uint16_t iotag)
2950 struct lpfc_iocbq *cmd_iocb = NULL;
2952 lockdep_assert_held(&phba->hbalock);
2953 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2954 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2955 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2956 /* remove from txcmpl queue list */
2957 list_del_init(&cmd_iocb->list);
2958 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2963 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2964 "0372 iotag x%x lookup error: max iotag (x%x) "
2966 iotag, phba->sli.last_iotag,
2967 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
2972 * lpfc_sli_process_sol_iocb - process solicited iocb completion
2973 * @phba: Pointer to HBA context object.
2974 * @pring: Pointer to driver SLI ring object.
2975 * @saveq: Pointer to the response iocb to be processed.
2977 * This function is called by the ring event handler for non-fcp
2978 * rings when there is a new response iocb in the response ring.
2979 * The caller is not required to hold any locks. This function
2980 * gets the command iocb associated with the response iocb and
2981 * calls the completion handler for the command iocb. If there
2982 * is no completion handler, the function will free the resources
2983 * associated with command iocb. If the response iocb is for
2984 * an already aborted command iocb, the status of the completion
2985 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2986 * This function always returns 1.
2989 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2990 struct lpfc_iocbq *saveq)
2992 struct lpfc_iocbq *cmdiocbp;
2994 unsigned long iflag;
2996 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2997 if (phba->sli_rev == LPFC_SLI_REV4)
2998 spin_lock_irqsave(&pring->ring_lock, iflag);
3000 spin_lock_irqsave(&phba->hbalock, iflag);
3001 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3002 if (phba->sli_rev == LPFC_SLI_REV4)
3003 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3005 spin_unlock_irqrestore(&phba->hbalock, iflag);
3008 if (cmdiocbp->iocb_cmpl) {
3010 * If an ELS command failed send an event to mgmt
3013 if (saveq->iocb.ulpStatus &&
3014 (pring->ringno == LPFC_ELS_RING) &&
3015 (cmdiocbp->iocb.ulpCommand ==
3016 CMD_ELS_REQUEST64_CR))
3017 lpfc_send_els_failure_event(phba,
3021 * Post all ELS completions to the worker thread.
3022 * All other are passed to the completion callback.
3024 if (pring->ringno == LPFC_ELS_RING) {
3025 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3026 (cmdiocbp->iocb_flag &
3027 LPFC_DRIVER_ABORTED)) {
3028 spin_lock_irqsave(&phba->hbalock,
3030 cmdiocbp->iocb_flag &=
3031 ~LPFC_DRIVER_ABORTED;
3032 spin_unlock_irqrestore(&phba->hbalock,
3034 saveq->iocb.ulpStatus =
3035 IOSTAT_LOCAL_REJECT;
3036 saveq->iocb.un.ulpWord[4] =
3039 /* Firmware could still be in progress
3040 * of DMAing payload, so don't free data
3041 * buffer till after a hbeat.
3043 spin_lock_irqsave(&phba->hbalock,
3045 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3046 spin_unlock_irqrestore(&phba->hbalock,
3049 if (phba->sli_rev == LPFC_SLI_REV4) {
3050 if (saveq->iocb_flag &
3051 LPFC_EXCHANGE_BUSY) {
3052 /* Set cmdiocb flag for the
3053 * exchange busy so sgl (xri)
3054 * will not be released until
3055 * the abort xri is received
3059 &phba->hbalock, iflag);
3060 cmdiocbp->iocb_flag |=
3062 spin_unlock_irqrestore(
3063 &phba->hbalock, iflag);
3065 if (cmdiocbp->iocb_flag &
3066 LPFC_DRIVER_ABORTED) {
3068 * Clear LPFC_DRIVER_ABORTED
3069 * bit in case it was driver
3073 &phba->hbalock, iflag);
3074 cmdiocbp->iocb_flag &=
3075 ~LPFC_DRIVER_ABORTED;
3076 spin_unlock_irqrestore(
3077 &phba->hbalock, iflag);
3078 cmdiocbp->iocb.ulpStatus =
3079 IOSTAT_LOCAL_REJECT;
3080 cmdiocbp->iocb.un.ulpWord[4] =
3081 IOERR_ABORT_REQUESTED;
3083 * For SLI4, irsiocb contains
3084 * NO_XRI in sli_xritag, it
3085 * shall not affect releasing
3086 * sgl (xri) process.
3088 saveq->iocb.ulpStatus =
3089 IOSTAT_LOCAL_REJECT;
3090 saveq->iocb.un.ulpWord[4] =
3093 &phba->hbalock, iflag);
3095 LPFC_DELAY_MEM_FREE;
3096 spin_unlock_irqrestore(
3097 &phba->hbalock, iflag);
3101 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3103 lpfc_sli_release_iocbq(phba, cmdiocbp);
3106 * Unknown initiating command based on the response iotag.
3107 * This could be the case on the ELS ring because of
3110 if (pring->ringno != LPFC_ELS_RING) {
3112 * Ring <ringno> handler: unexpected completion IoTag
3115 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3116 "0322 Ring %d handler: "
3117 "unexpected completion IoTag x%x "
3118 "Data: x%x x%x x%x x%x\n",
3120 saveq->iocb.ulpIoTag,
3121 saveq->iocb.ulpStatus,
3122 saveq->iocb.un.ulpWord[4],
3123 saveq->iocb.ulpCommand,
3124 saveq->iocb.ulpContext);
3132 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3133 * @phba: Pointer to HBA context object.
3134 * @pring: Pointer to driver SLI ring object.
3136 * This function is called from the iocb ring event handlers when
3137 * put pointer is ahead of the get pointer for a ring. This function signal
3138 * an error attention condition to the worker thread and the worker
3139 * thread will transition the HBA to offline state.
3142 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3144 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3146 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3147 * rsp ring <portRspMax>
3149 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3150 "0312 Ring %d handler: portRspPut %d "
3151 "is bigger than rsp ring %d\n",
3152 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3153 pring->sli.sli3.numRiocb);
3155 phba->link_state = LPFC_HBA_ERROR;
3158 * All error attention handlers are posted to
3161 phba->work_ha |= HA_ERATT;
3162 phba->work_hs = HS_FFER3;
3164 lpfc_worker_wake_up(phba);
3170 * lpfc_poll_eratt - Error attention polling timer timeout handler
3171 * @ptr: Pointer to address of HBA context object.
3173 * This function is invoked by the Error Attention polling timer when the
3174 * timer times out. It will check the SLI Error Attention register for
3175 * possible attention events. If so, it will post an Error Attention event
3176 * and wake up worker thread to process it. Otherwise, it will set up the
3177 * Error Attention polling timer for the next poll.
3179 void lpfc_poll_eratt(struct timer_list *t)
3181 struct lpfc_hba *phba;
3183 uint64_t sli_intr, cnt;
3185 phba = from_timer(phba, t, eratt_poll);
3187 /* Here we will also keep track of interrupts per sec of the hba */
3188 sli_intr = phba->sli.slistat.sli_intr;
3190 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3191 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3194 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3196 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3197 do_div(cnt, phba->eratt_poll_interval);
3198 phba->sli.slistat.sli_ips = cnt;
3200 phba->sli.slistat.sli_prev_intr = sli_intr;
3202 /* Check chip HA register for error event */
3203 eratt = lpfc_sli_check_eratt(phba);
3206 /* Tell the worker thread there is work to do */
3207 lpfc_worker_wake_up(phba);
3209 /* Restart the timer for next eratt poll */
3210 mod_timer(&phba->eratt_poll,
3212 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3218 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3219 * @phba: Pointer to HBA context object.
3220 * @pring: Pointer to driver SLI ring object.
3221 * @mask: Host attention register mask for this ring.
3223 * This function is called from the interrupt context when there is a ring
3224 * event for the fcp ring. The caller does not hold any lock.
3225 * The function processes each response iocb in the response ring until it
3226 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3227 * LE bit set. The function will call the completion handler of the command iocb
3228 * if the response iocb indicates a completion for a command iocb or it is
3229 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3230 * function if this is an unsolicited iocb.
3231 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3232 * to check it explicitly.
3235 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3236 struct lpfc_sli_ring *pring, uint32_t mask)
3238 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3239 IOCB_t *irsp = NULL;
3240 IOCB_t *entry = NULL;
3241 struct lpfc_iocbq *cmdiocbq = NULL;
3242 struct lpfc_iocbq rspiocbq;
3244 uint32_t portRspPut, portRspMax;
3246 lpfc_iocb_type type;
3247 unsigned long iflag;
3248 uint32_t rsp_cmpl = 0;
3250 spin_lock_irqsave(&phba->hbalock, iflag);
3251 pring->stats.iocb_event++;
3254 * The next available response entry should never exceed the maximum
3255 * entries. If it does, treat it as an adapter hardware error.
3257 portRspMax = pring->sli.sli3.numRiocb;
3258 portRspPut = le32_to_cpu(pgp->rspPutInx);
3259 if (unlikely(portRspPut >= portRspMax)) {
3260 lpfc_sli_rsp_pointers_error(phba, pring);
3261 spin_unlock_irqrestore(&phba->hbalock, iflag);
3264 if (phba->fcp_ring_in_use) {
3265 spin_unlock_irqrestore(&phba->hbalock, iflag);
3268 phba->fcp_ring_in_use = 1;
3271 while (pring->sli.sli3.rspidx != portRspPut) {
3273 * Fetch an entry off the ring and copy it into a local data
3274 * structure. The copy involves a byte-swap since the
3275 * network byte order and pci byte orders are different.
3277 entry = lpfc_resp_iocb(phba, pring);
3278 phba->last_completion_time = jiffies;
3280 if (++pring->sli.sli3.rspidx >= portRspMax)
3281 pring->sli.sli3.rspidx = 0;
3283 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3284 (uint32_t *) &rspiocbq.iocb,
3285 phba->iocb_rsp_size);
3286 INIT_LIST_HEAD(&(rspiocbq.list));
3287 irsp = &rspiocbq.iocb;
3289 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3290 pring->stats.iocb_rsp++;
3293 if (unlikely(irsp->ulpStatus)) {
3295 * If resource errors reported from HBA, reduce
3296 * queuedepths of the SCSI device.
3298 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3299 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3300 IOERR_NO_RESOURCES)) {
3301 spin_unlock_irqrestore(&phba->hbalock, iflag);
3302 phba->lpfc_rampdown_queue_depth(phba);
3303 spin_lock_irqsave(&phba->hbalock, iflag);
3306 /* Rsp ring <ringno> error: IOCB */
3307 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3308 "0336 Rsp Ring %d error: IOCB Data: "
3309 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3311 irsp->un.ulpWord[0],
3312 irsp->un.ulpWord[1],
3313 irsp->un.ulpWord[2],
3314 irsp->un.ulpWord[3],
3315 irsp->un.ulpWord[4],
3316 irsp->un.ulpWord[5],
3317 *(uint32_t *)&irsp->un1,
3318 *((uint32_t *)&irsp->un1 + 1));
3322 case LPFC_ABORT_IOCB:
3325 * Idle exchange closed via ABTS from port. No iocb
3326 * resources need to be recovered.
3328 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3329 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3330 "0333 IOCB cmd 0x%x"
3331 " processed. Skipping"
3337 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3339 if (unlikely(!cmdiocbq))
3341 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3342 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3343 if (cmdiocbq->iocb_cmpl) {
3344 spin_unlock_irqrestore(&phba->hbalock, iflag);
3345 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3347 spin_lock_irqsave(&phba->hbalock, iflag);
3350 case LPFC_UNSOL_IOCB:
3351 spin_unlock_irqrestore(&phba->hbalock, iflag);
3352 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3353 spin_lock_irqsave(&phba->hbalock, iflag);
3356 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3357 char adaptermsg[LPFC_MAX_ADPTMSG];
3358 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3359 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3361 dev_warn(&((phba->pcidev)->dev),
3363 phba->brd_no, adaptermsg);
3365 /* Unknown IOCB command */
3366 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3367 "0334 Unknown IOCB command "
3368 "Data: x%x, x%x x%x x%x x%x\n",
3369 type, irsp->ulpCommand,
3378 * The response IOCB has been processed. Update the ring
3379 * pointer in SLIM. If the port response put pointer has not
3380 * been updated, sync the pgp->rspPutInx and fetch the new port
3381 * response put pointer.
3383 writel(pring->sli.sli3.rspidx,
3384 &phba->host_gp[pring->ringno].rspGetInx);
3386 if (pring->sli.sli3.rspidx == portRspPut)
3387 portRspPut = le32_to_cpu(pgp->rspPutInx);
3390 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3391 pring->stats.iocb_rsp_full++;
3392 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3393 writel(status, phba->CAregaddr);
3394 readl(phba->CAregaddr);
3396 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3397 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3398 pring->stats.iocb_cmd_empty++;
3400 /* Force update of the local copy of cmdGetInx */
3401 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3402 lpfc_sli_resume_iocb(phba, pring);
3404 if ((pring->lpfc_sli_cmd_available))
3405 (pring->lpfc_sli_cmd_available) (phba, pring);
3409 phba->fcp_ring_in_use = 0;
3410 spin_unlock_irqrestore(&phba->hbalock, iflag);
3415 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3416 * @phba: Pointer to HBA context object.
3417 * @pring: Pointer to driver SLI ring object.
3418 * @rspiocbp: Pointer to driver response IOCB object.
3420 * This function is called from the worker thread when there is a slow-path
3421 * response IOCB to process. This function chains all the response iocbs until
3422 * seeing the iocb with the LE bit set. The function will call
3423 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3424 * completion of a command iocb. The function will call the
3425 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3426 * The function frees the resources or calls the completion handler if this
3427 * iocb is an abort completion. The function returns NULL when the response
3428 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3429 * this function shall chain the iocb on to the iocb_continueq and return the
3430 * response iocb passed in.
3432 static struct lpfc_iocbq *
3433 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3434 struct lpfc_iocbq *rspiocbp)
3436 struct lpfc_iocbq *saveq;
3437 struct lpfc_iocbq *cmdiocbp;
3438 struct lpfc_iocbq *next_iocb;
3439 IOCB_t *irsp = NULL;
3440 uint32_t free_saveq;
3441 uint8_t iocb_cmd_type;
3442 lpfc_iocb_type type;
3443 unsigned long iflag;
3446 spin_lock_irqsave(&phba->hbalock, iflag);
3447 /* First add the response iocb to the countinueq list */
3448 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3449 pring->iocb_continueq_cnt++;
3451 /* Now, determine whether the list is completed for processing */
3452 irsp = &rspiocbp->iocb;
3455 * By default, the driver expects to free all resources
3456 * associated with this iocb completion.
3459 saveq = list_get_first(&pring->iocb_continueq,
3460 struct lpfc_iocbq, list);
3461 irsp = &(saveq->iocb);
3462 list_del_init(&pring->iocb_continueq);
3463 pring->iocb_continueq_cnt = 0;
3465 pring->stats.iocb_rsp++;
3468 * If resource errors reported from HBA, reduce
3469 * queuedepths of the SCSI device.
3471 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3472 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3473 IOERR_NO_RESOURCES)) {
3474 spin_unlock_irqrestore(&phba->hbalock, iflag);
3475 phba->lpfc_rampdown_queue_depth(phba);
3476 spin_lock_irqsave(&phba->hbalock, iflag);
3479 if (irsp->ulpStatus) {
3480 /* Rsp ring <ringno> error: IOCB */
3481 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3482 "0328 Rsp Ring %d error: "
3487 "x%x x%x x%x x%x\n",
3489 irsp->un.ulpWord[0],
3490 irsp->un.ulpWord[1],
3491 irsp->un.ulpWord[2],
3492 irsp->un.ulpWord[3],
3493 irsp->un.ulpWord[4],
3494 irsp->un.ulpWord[5],
3495 *(((uint32_t *) irsp) + 6),
3496 *(((uint32_t *) irsp) + 7),
3497 *(((uint32_t *) irsp) + 8),
3498 *(((uint32_t *) irsp) + 9),
3499 *(((uint32_t *) irsp) + 10),
3500 *(((uint32_t *) irsp) + 11),
3501 *(((uint32_t *) irsp) + 12),
3502 *(((uint32_t *) irsp) + 13),
3503 *(((uint32_t *) irsp) + 14),
3504 *(((uint32_t *) irsp) + 15));
3508 * Fetch the IOCB command type and call the correct completion
3509 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3510 * get freed back to the lpfc_iocb_list by the discovery
3513 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3514 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3517 spin_unlock_irqrestore(&phba->hbalock, iflag);
3518 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3519 spin_lock_irqsave(&phba->hbalock, iflag);
3522 case LPFC_UNSOL_IOCB:
3523 spin_unlock_irqrestore(&phba->hbalock, iflag);
3524 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3525 spin_lock_irqsave(&phba->hbalock, iflag);
3530 case LPFC_ABORT_IOCB:
3532 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3533 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3536 /* Call the specified completion routine */
3537 if (cmdiocbp->iocb_cmpl) {
3538 spin_unlock_irqrestore(&phba->hbalock,
3540 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3542 spin_lock_irqsave(&phba->hbalock,
3545 __lpfc_sli_release_iocbq(phba,
3550 case LPFC_UNKNOWN_IOCB:
3551 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3552 char adaptermsg[LPFC_MAX_ADPTMSG];
3553 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3554 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3556 dev_warn(&((phba->pcidev)->dev),
3558 phba->brd_no, adaptermsg);
3560 /* Unknown IOCB command */
3561 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3562 "0335 Unknown IOCB "
3563 "command Data: x%x "
3574 list_for_each_entry_safe(rspiocbp, next_iocb,
3575 &saveq->list, list) {
3576 list_del_init(&rspiocbp->list);
3577 __lpfc_sli_release_iocbq(phba, rspiocbp);
3579 __lpfc_sli_release_iocbq(phba, saveq);
3583 spin_unlock_irqrestore(&phba->hbalock, iflag);
3588 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3589 * @phba: Pointer to HBA context object.
3590 * @pring: Pointer to driver SLI ring object.
3591 * @mask: Host attention register mask for this ring.
3593 * This routine wraps the actual slow_ring event process routine from the
3594 * API jump table function pointer from the lpfc_hba struct.
3597 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3598 struct lpfc_sli_ring *pring, uint32_t mask)
3600 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3604 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3605 * @phba: Pointer to HBA context object.
3606 * @pring: Pointer to driver SLI ring object.
3607 * @mask: Host attention register mask for this ring.
3609 * This function is called from the worker thread when there is a ring event
3610 * for non-fcp rings. The caller does not hold any lock. The function will
3611 * remove each response iocb in the response ring and calls the handle
3612 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3615 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3616 struct lpfc_sli_ring *pring, uint32_t mask)
3618 struct lpfc_pgp *pgp;
3620 IOCB_t *irsp = NULL;
3621 struct lpfc_iocbq *rspiocbp = NULL;
3622 uint32_t portRspPut, portRspMax;
3623 unsigned long iflag;
3626 pgp = &phba->port_gp[pring->ringno];
3627 spin_lock_irqsave(&phba->hbalock, iflag);
3628 pring->stats.iocb_event++;
3631 * The next available response entry should never exceed the maximum
3632 * entries. If it does, treat it as an adapter hardware error.
3634 portRspMax = pring->sli.sli3.numRiocb;
3635 portRspPut = le32_to_cpu(pgp->rspPutInx);
3636 if (portRspPut >= portRspMax) {
3638 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3639 * rsp ring <portRspMax>
3641 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3642 "0303 Ring %d handler: portRspPut %d "
3643 "is bigger than rsp ring %d\n",
3644 pring->ringno, portRspPut, portRspMax);
3646 phba->link_state = LPFC_HBA_ERROR;
3647 spin_unlock_irqrestore(&phba->hbalock, iflag);
3649 phba->work_hs = HS_FFER3;
3650 lpfc_handle_eratt(phba);
3656 while (pring->sli.sli3.rspidx != portRspPut) {
3658 * Build a completion list and call the appropriate handler.
3659 * The process is to get the next available response iocb, get
3660 * a free iocb from the list, copy the response data into the
3661 * free iocb, insert to the continuation list, and update the
3662 * next response index to slim. This process makes response
3663 * iocb's in the ring available to DMA as fast as possible but
3664 * pays a penalty for a copy operation. Since the iocb is
3665 * only 32 bytes, this penalty is considered small relative to
3666 * the PCI reads for register values and a slim write. When
3667 * the ulpLe field is set, the entire Command has been
3670 entry = lpfc_resp_iocb(phba, pring);
3672 phba->last_completion_time = jiffies;
3673 rspiocbp = __lpfc_sli_get_iocbq(phba);
3674 if (rspiocbp == NULL) {
3675 printk(KERN_ERR "%s: out of buffers! Failing "
3676 "completion.\n", __func__);
3680 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3681 phba->iocb_rsp_size);
3682 irsp = &rspiocbp->iocb;
3684 if (++pring->sli.sli3.rspidx >= portRspMax)
3685 pring->sli.sli3.rspidx = 0;
3687 if (pring->ringno == LPFC_ELS_RING) {
3688 lpfc_debugfs_slow_ring_trc(phba,
3689 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3690 *(((uint32_t *) irsp) + 4),
3691 *(((uint32_t *) irsp) + 6),
3692 *(((uint32_t *) irsp) + 7));
3695 writel(pring->sli.sli3.rspidx,
3696 &phba->host_gp[pring->ringno].rspGetInx);
3698 spin_unlock_irqrestore(&phba->hbalock, iflag);
3699 /* Handle the response IOCB */
3700 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3701 spin_lock_irqsave(&phba->hbalock, iflag);
3704 * If the port response put pointer has not been updated, sync
3705 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3706 * response put pointer.
3708 if (pring->sli.sli3.rspidx == portRspPut) {
3709 portRspPut = le32_to_cpu(pgp->rspPutInx);
3711 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3713 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3714 /* At least one response entry has been freed */
3715 pring->stats.iocb_rsp_full++;
3716 /* SET RxRE_RSP in Chip Att register */
3717 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3718 writel(status, phba->CAregaddr);
3719 readl(phba->CAregaddr); /* flush */
3721 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3722 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3723 pring->stats.iocb_cmd_empty++;
3725 /* Force update of the local copy of cmdGetInx */
3726 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3727 lpfc_sli_resume_iocb(phba, pring);
3729 if ((pring->lpfc_sli_cmd_available))
3730 (pring->lpfc_sli_cmd_available) (phba, pring);
3734 spin_unlock_irqrestore(&phba->hbalock, iflag);
3739 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3740 * @phba: Pointer to HBA context object.
3741 * @pring: Pointer to driver SLI ring object.
3742 * @mask: Host attention register mask for this ring.
3744 * This function is called from the worker thread when there is a pending
3745 * ELS response iocb on the driver internal slow-path response iocb worker
3746 * queue. The caller does not hold any lock. The function will remove each
3747 * response iocb from the response worker queue and calls the handle
3748 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3751 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3752 struct lpfc_sli_ring *pring, uint32_t mask)
3754 struct lpfc_iocbq *irspiocbq;
3755 struct hbq_dmabuf *dmabuf;
3756 struct lpfc_cq_event *cq_event;
3757 unsigned long iflag;
3759 spin_lock_irqsave(&phba->hbalock, iflag);
3760 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3761 spin_unlock_irqrestore(&phba->hbalock, iflag);
3762 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3763 /* Get the response iocb from the head of work queue */
3764 spin_lock_irqsave(&phba->hbalock, iflag);
3765 list_remove_head(&phba->sli4_hba.sp_queue_event,
3766 cq_event, struct lpfc_cq_event, list);
3767 spin_unlock_irqrestore(&phba->hbalock, iflag);
3769 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3770 case CQE_CODE_COMPL_WQE:
3771 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3773 /* Translate ELS WCQE to response IOCBQ */
3774 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3777 lpfc_sli_sp_handle_rspiocb(phba, pring,
3780 case CQE_CODE_RECEIVE:
3781 case CQE_CODE_RECEIVE_V1:
3782 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3784 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3793 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3794 * @phba: Pointer to HBA context object.
3795 * @pring: Pointer to driver SLI ring object.
3797 * This function aborts all iocbs in the given ring and frees all the iocb
3798 * objects in txq. This function issues an abort iocb for all the iocb commands
3799 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3800 * the return of this function. The caller is not required to hold any locks.
3803 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3805 LIST_HEAD(completions);
3806 struct lpfc_iocbq *iocb, *next_iocb;
3808 if (pring->ringno == LPFC_ELS_RING) {
3809 lpfc_fabric_abort_hba(phba);
3812 /* Error everything on txq and txcmplq
3815 if (phba->sli_rev >= LPFC_SLI_REV4) {
3816 spin_lock_irq(&pring->ring_lock);
3817 list_splice_init(&pring->txq, &completions);
3819 spin_unlock_irq(&pring->ring_lock);
3821 spin_lock_irq(&phba->hbalock);
3822 /* Next issue ABTS for everything on the txcmplq */
3823 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3824 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3825 spin_unlock_irq(&phba->hbalock);
3827 spin_lock_irq(&phba->hbalock);
3828 list_splice_init(&pring->txq, &completions);
3831 /* Next issue ABTS for everything on the txcmplq */
3832 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3833 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3834 spin_unlock_irq(&phba->hbalock);
3837 /* Cancel all the IOCBs from the completions list */
3838 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3843 * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
3844 * @phba: Pointer to HBA context object.
3845 * @pring: Pointer to driver SLI ring object.
3847 * This function aborts all iocbs in the given ring and frees all the iocb
3848 * objects in txq. This function issues an abort iocb for all the iocb commands
3849 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3850 * the return of this function. The caller is not required to hold any locks.
3853 lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3855 LIST_HEAD(completions);
3856 struct lpfc_iocbq *iocb, *next_iocb;
3858 if (pring->ringno == LPFC_ELS_RING)
3859 lpfc_fabric_abort_hba(phba);
3861 spin_lock_irq(&phba->hbalock);
3862 /* Next issue ABTS for everything on the txcmplq */
3863 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3864 lpfc_sli4_abort_nvme_io(phba, pring, iocb);
3865 spin_unlock_irq(&phba->hbalock);
3870 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3871 * @phba: Pointer to HBA context object.
3872 * @pring: Pointer to driver SLI ring object.
3874 * This function aborts all iocbs in FCP rings and frees all the iocb
3875 * objects in txq. This function issues an abort iocb for all the iocb commands
3876 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3877 * the return of this function. The caller is not required to hold any locks.
3880 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3882 struct lpfc_sli *psli = &phba->sli;
3883 struct lpfc_sli_ring *pring;
3886 /* Look on all the FCP Rings for the iotag */
3887 if (phba->sli_rev >= LPFC_SLI_REV4) {
3888 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3889 pring = phba->sli4_hba.fcp_wq[i]->pring;
3890 lpfc_sli_abort_iocb_ring(phba, pring);
3893 pring = &psli->sli3_ring[LPFC_FCP_RING];
3894 lpfc_sli_abort_iocb_ring(phba, pring);
3899 * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
3900 * @phba: Pointer to HBA context object.
3902 * This function aborts all wqes in NVME rings. This function issues an
3903 * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
3904 * the txcmplq is not guaranteed to complete before the return of this
3905 * function. The caller is not required to hold any locks.
3908 lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
3910 struct lpfc_sli_ring *pring;
3913 if (phba->sli_rev < LPFC_SLI_REV4)
3916 /* Abort all IO on each NVME ring. */
3917 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
3918 pring = phba->sli4_hba.nvme_wq[i]->pring;
3919 lpfc_sli_abort_wqe_ring(phba, pring);
3925 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3926 * @phba: Pointer to HBA context object.
3928 * This function flushes all iocbs in the fcp ring and frees all the iocb
3929 * objects in txq and txcmplq. This function will not issue abort iocbs
3930 * for all the iocb commands in txcmplq, they will just be returned with
3931 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3932 * slot has been permanently disabled.
3935 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3939 struct lpfc_sli *psli = &phba->sli;
3940 struct lpfc_sli_ring *pring;
3942 struct lpfc_iocbq *piocb, *next_iocb;
3944 spin_lock_irq(&phba->hbalock);
3945 /* Indicate the I/O queues are flushed */
3946 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3947 spin_unlock_irq(&phba->hbalock);
3949 /* Look on all the FCP Rings for the iotag */
3950 if (phba->sli_rev >= LPFC_SLI_REV4) {
3951 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3952 pring = phba->sli4_hba.fcp_wq[i]->pring;
3954 spin_lock_irq(&pring->ring_lock);
3955 /* Retrieve everything on txq */
3956 list_splice_init(&pring->txq, &txq);
3957 list_for_each_entry_safe(piocb, next_iocb,
3958 &pring->txcmplq, list)
3959 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3960 /* Retrieve everything on the txcmplq */
3961 list_splice_init(&pring->txcmplq, &txcmplq);
3963 pring->txcmplq_cnt = 0;
3964 spin_unlock_irq(&pring->ring_lock);
3967 lpfc_sli_cancel_iocbs(phba, &txq,
3968 IOSTAT_LOCAL_REJECT,
3970 /* Flush the txcmpq */
3971 lpfc_sli_cancel_iocbs(phba, &txcmplq,
3972 IOSTAT_LOCAL_REJECT,
3976 pring = &psli->sli3_ring[LPFC_FCP_RING];
3978 spin_lock_irq(&phba->hbalock);
3979 /* Retrieve everything on txq */
3980 list_splice_init(&pring->txq, &txq);
3981 list_for_each_entry_safe(piocb, next_iocb,
3982 &pring->txcmplq, list)
3983 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3984 /* Retrieve everything on the txcmplq */
3985 list_splice_init(&pring->txcmplq, &txcmplq);
3987 pring->txcmplq_cnt = 0;
3988 spin_unlock_irq(&phba->hbalock);
3991 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3993 /* Flush the txcmpq */
3994 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4000 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
4001 * @phba: Pointer to HBA context object.
4003 * This function flushes all wqes in the nvme rings and frees all resources
4004 * in the txcmplq. This function does not issue abort wqes for the IO
4005 * commands in txcmplq, they will just be returned with
4006 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4007 * slot has been permanently disabled.
4010 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
4013 struct lpfc_sli_ring *pring;
4015 struct lpfc_iocbq *piocb, *next_iocb;
4017 if (phba->sli_rev < LPFC_SLI_REV4)
4020 /* Hint to other driver operations that a flush is in progress. */
4021 spin_lock_irq(&phba->hbalock);
4022 phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
4023 spin_unlock_irq(&phba->hbalock);
4025 /* Cycle through all NVME rings and complete each IO with
4026 * a local driver reason code. This is a flush so no
4027 * abort exchange to FW.
4029 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
4030 pring = phba->sli4_hba.nvme_wq[i]->pring;
4032 spin_lock_irq(&pring->ring_lock);
4033 list_for_each_entry_safe(piocb, next_iocb,
4034 &pring->txcmplq, list)
4035 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4036 /* Retrieve everything on the txcmplq */
4037 list_splice_init(&pring->txcmplq, &txcmplq);
4038 pring->txcmplq_cnt = 0;
4039 spin_unlock_irq(&pring->ring_lock);
4041 /* Flush the txcmpq &&&PAE */
4042 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4043 IOSTAT_LOCAL_REJECT,
4049 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4050 * @phba: Pointer to HBA context object.
4051 * @mask: Bit mask to be checked.
4053 * This function reads the host status register and compares
4054 * with the provided bit mask to check if HBA completed
4055 * the restart. This function will wait in a loop for the
4056 * HBA to complete restart. If the HBA does not restart within
4057 * 15 iterations, the function will reset the HBA again. The
4058 * function returns 1 when HBA fail to restart otherwise returns
4062 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4068 /* Read the HBA Host Status Register */
4069 if (lpfc_readl(phba->HSregaddr, &status))
4073 * Check status register every 100ms for 5 retries, then every
4074 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4075 * every 2.5 sec for 4.
4076 * Break our of the loop if errors occurred during init.
4078 while (((status & mask) != mask) &&
4079 !(status & HS_FFERM) &&
4091 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4092 lpfc_sli_brdrestart(phba);
4094 /* Read the HBA Host Status Register */
4095 if (lpfc_readl(phba->HSregaddr, &status)) {
4101 /* Check to see if any errors occurred during init */
4102 if ((status & HS_FFERM) || (i >= 20)) {
4103 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4104 "2751 Adapter failed to restart, "
4105 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4107 readl(phba->MBslimaddr + 0xa8),
4108 readl(phba->MBslimaddr + 0xac));
4109 phba->link_state = LPFC_HBA_ERROR;
4117 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4118 * @phba: Pointer to HBA context object.
4119 * @mask: Bit mask to be checked.
4121 * This function checks the host status register to check if HBA is
4122 * ready. This function will wait in a loop for the HBA to be ready
4123 * If the HBA is not ready , the function will will reset the HBA PCI
4124 * function again. The function returns 1 when HBA fail to be ready
4125 * otherwise returns zero.
4128 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4133 /* Read the HBA Host Status Register */
4134 status = lpfc_sli4_post_status_check(phba);
4137 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4138 lpfc_sli_brdrestart(phba);
4139 status = lpfc_sli4_post_status_check(phba);
4142 /* Check to see if any errors occurred during init */
4144 phba->link_state = LPFC_HBA_ERROR;
4147 phba->sli4_hba.intr_enable = 0;
4153 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4154 * @phba: Pointer to HBA context object.
4155 * @mask: Bit mask to be checked.
4157 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4158 * from the API jump table function pointer from the lpfc_hba struct.
4161 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4163 return phba->lpfc_sli_brdready(phba, mask);
4166 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4169 * lpfc_reset_barrier - Make HBA ready for HBA reset
4170 * @phba: Pointer to HBA context object.
4172 * This function is called before resetting an HBA. This function is called
4173 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4175 void lpfc_reset_barrier(struct lpfc_hba *phba)
4177 uint32_t __iomem *resp_buf;
4178 uint32_t __iomem *mbox_buf;
4179 volatile uint32_t mbox;
4180 uint32_t hc_copy, ha_copy, resp_data;
4184 lockdep_assert_held(&phba->hbalock);
4186 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4187 if (hdrtype != 0x80 ||
4188 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4189 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4193 * Tell the other part of the chip to suspend temporarily all
4196 resp_buf = phba->MBslimaddr;
4198 /* Disable the error attention */
4199 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4201 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4202 readl(phba->HCregaddr); /* flush */
4203 phba->link_flag |= LS_IGNORE_ERATT;
4205 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4207 if (ha_copy & HA_ERATT) {
4208 /* Clear Chip error bit */
4209 writel(HA_ERATT, phba->HAregaddr);
4210 phba->pport->stopped = 1;
4214 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4215 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4217 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4218 mbox_buf = phba->MBslimaddr;
4219 writel(mbox, mbox_buf);
4221 for (i = 0; i < 50; i++) {
4222 if (lpfc_readl((resp_buf + 1), &resp_data))
4224 if (resp_data != ~(BARRIER_TEST_PATTERN))
4230 if (lpfc_readl((resp_buf + 1), &resp_data))
4232 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4233 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4234 phba->pport->stopped)
4240 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4242 for (i = 0; i < 500; i++) {
4243 if (lpfc_readl(resp_buf, &resp_data))
4245 if (resp_data != mbox)
4254 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4256 if (!(ha_copy & HA_ERATT))
4262 if (readl(phba->HAregaddr) & HA_ERATT) {
4263 writel(HA_ERATT, phba->HAregaddr);
4264 phba->pport->stopped = 1;
4268 phba->link_flag &= ~LS_IGNORE_ERATT;
4269 writel(hc_copy, phba->HCregaddr);
4270 readl(phba->HCregaddr); /* flush */
4274 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4275 * @phba: Pointer to HBA context object.
4277 * This function issues a kill_board mailbox command and waits for
4278 * the error attention interrupt. This function is called for stopping
4279 * the firmware processing. The caller is not required to hold any
4280 * locks. This function calls lpfc_hba_down_post function to free
4281 * any pending commands after the kill. The function will return 1 when it
4282 * fails to kill the board else will return 0.
4285 lpfc_sli_brdkill(struct lpfc_hba *phba)
4287 struct lpfc_sli *psli;
4297 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4298 "0329 Kill HBA Data: x%x x%x\n",
4299 phba->pport->port_state, psli->sli_flag);
4301 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4305 /* Disable the error attention */
4306 spin_lock_irq(&phba->hbalock);
4307 if (lpfc_readl(phba->HCregaddr, &status)) {
4308 spin_unlock_irq(&phba->hbalock);
4309 mempool_free(pmb, phba->mbox_mem_pool);
4312 status &= ~HC_ERINT_ENA;
4313 writel(status, phba->HCregaddr);
4314 readl(phba->HCregaddr); /* flush */
4315 phba->link_flag |= LS_IGNORE_ERATT;
4316 spin_unlock_irq(&phba->hbalock);
4318 lpfc_kill_board(phba, pmb);
4319 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4320 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4322 if (retval != MBX_SUCCESS) {
4323 if (retval != MBX_BUSY)
4324 mempool_free(pmb, phba->mbox_mem_pool);
4325 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4326 "2752 KILL_BOARD command failed retval %d\n",
4328 spin_lock_irq(&phba->hbalock);
4329 phba->link_flag &= ~LS_IGNORE_ERATT;
4330 spin_unlock_irq(&phba->hbalock);
4334 spin_lock_irq(&phba->hbalock);
4335 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4336 spin_unlock_irq(&phba->hbalock);
4338 mempool_free(pmb, phba->mbox_mem_pool);
4340 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4341 * attention every 100ms for 3 seconds. If we don't get ERATT after
4342 * 3 seconds we still set HBA_ERROR state because the status of the
4343 * board is now undefined.
4345 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4347 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4349 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4353 del_timer_sync(&psli->mbox_tmo);
4354 if (ha_copy & HA_ERATT) {
4355 writel(HA_ERATT, phba->HAregaddr);
4356 phba->pport->stopped = 1;
4358 spin_lock_irq(&phba->hbalock);
4359 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4360 psli->mbox_active = NULL;
4361 phba->link_flag &= ~LS_IGNORE_ERATT;
4362 spin_unlock_irq(&phba->hbalock);
4364 lpfc_hba_down_post(phba);
4365 phba->link_state = LPFC_HBA_ERROR;
4367 return ha_copy & HA_ERATT ? 0 : 1;
4371 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4372 * @phba: Pointer to HBA context object.
4374 * This function resets the HBA by writing HC_INITFF to the control
4375 * register. After the HBA resets, this function resets all the iocb ring
4376 * indices. This function disables PCI layer parity checking during
4378 * This function returns 0 always.
4379 * The caller is not required to hold any locks.
4382 lpfc_sli_brdreset(struct lpfc_hba *phba)
4384 struct lpfc_sli *psli;
4385 struct lpfc_sli_ring *pring;
4392 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4393 "0325 Reset HBA Data: x%x x%x\n",
4394 (phba->pport) ? phba->pport->port_state : 0,
4397 /* perform board reset */
4398 phba->fc_eventTag = 0;
4399 phba->link_events = 0;
4401 phba->pport->fc_myDID = 0;
4402 phba->pport->fc_prevDID = 0;
4405 /* Turn off parity checking and serr during the physical reset */
4406 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4407 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4409 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4411 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4413 /* Now toggle INITFF bit in the Host Control Register */
4414 writel(HC_INITFF, phba->HCregaddr);
4416 readl(phba->HCregaddr); /* flush */
4417 writel(0, phba->HCregaddr);
4418 readl(phba->HCregaddr); /* flush */
4420 /* Restore PCI cmd register */
4421 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4423 /* Initialize relevant SLI info */
4424 for (i = 0; i < psli->num_rings; i++) {
4425 pring = &psli->sli3_ring[i];
4427 pring->sli.sli3.rspidx = 0;
4428 pring->sli.sli3.next_cmdidx = 0;
4429 pring->sli.sli3.local_getidx = 0;
4430 pring->sli.sli3.cmdidx = 0;
4431 pring->missbufcnt = 0;
4434 phba->link_state = LPFC_WARM_START;
4439 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4440 * @phba: Pointer to HBA context object.
4442 * This function resets a SLI4 HBA. This function disables PCI layer parity
4443 * checking during resets the device. The caller is not required to hold
4446 * This function returns 0 always.
4449 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4451 struct lpfc_sli *psli = &phba->sli;
4456 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4457 "0295 Reset HBA Data: x%x x%x x%x\n",
4458 phba->pport->port_state, psli->sli_flag,
4461 /* perform board reset */
4462 phba->fc_eventTag = 0;
4463 phba->link_events = 0;
4464 phba->pport->fc_myDID = 0;
4465 phba->pport->fc_prevDID = 0;
4467 spin_lock_irq(&phba->hbalock);
4468 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4469 phba->fcf.fcf_flag = 0;
4470 spin_unlock_irq(&phba->hbalock);
4472 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4473 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4474 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4478 /* Now physically reset the device */
4479 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4480 "0389 Performing PCI function reset!\n");
4482 /* Turn off parity checking and serr during the physical reset */
4483 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4484 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4485 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4487 /* Perform FCoE PCI function reset before freeing queue memory */
4488 rc = lpfc_pci_function_reset(phba);
4490 /* Restore PCI cmd register */
4491 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4497 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4498 * @phba: Pointer to HBA context object.
4500 * This function is called in the SLI initialization code path to
4501 * restart the HBA. The caller is not required to hold any lock.
4502 * This function writes MBX_RESTART mailbox command to the SLIM and
4503 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4504 * function to free any pending commands. The function enables
4505 * POST only during the first initialization. The function returns zero.
4506 * The function does not guarantee completion of MBX_RESTART mailbox
4507 * command before the return of this function.
4510 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4513 struct lpfc_sli *psli;
4514 volatile uint32_t word0;
4515 void __iomem *to_slim;
4516 uint32_t hba_aer_enabled;
4518 spin_lock_irq(&phba->hbalock);
4520 /* Take PCIe device Advanced Error Reporting (AER) state */
4521 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4526 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4527 "0337 Restart HBA Data: x%x x%x\n",
4528 (phba->pport) ? phba->pport->port_state : 0,
4532 mb = (MAILBOX_t *) &word0;
4533 mb->mbxCommand = MBX_RESTART;
4536 lpfc_reset_barrier(phba);
4538 to_slim = phba->MBslimaddr;
4539 writel(*(uint32_t *) mb, to_slim);
4540 readl(to_slim); /* flush */
4542 /* Only skip post after fc_ffinit is completed */
4543 if (phba->pport && phba->pport->port_state)
4544 word0 = 1; /* This is really setting up word1 */
4546 word0 = 0; /* This is really setting up word1 */
4547 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4548 writel(*(uint32_t *) mb, to_slim);
4549 readl(to_slim); /* flush */
4551 lpfc_sli_brdreset(phba);
4553 phba->pport->stopped = 0;
4554 phba->link_state = LPFC_INIT_START;
4556 spin_unlock_irq(&phba->hbalock);
4558 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4559 psli->stats_start = get_seconds();
4561 /* Give the INITFF and Post time to settle. */
4564 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4565 if (hba_aer_enabled)
4566 pci_disable_pcie_error_reporting(phba->pcidev);
4568 lpfc_hba_down_post(phba);
4574 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4575 * @phba: Pointer to HBA context object.
4577 * This function is called in the SLI initialization code path to restart
4578 * a SLI4 HBA. The caller is not required to hold any lock.
4579 * At the end of the function, it calls lpfc_hba_down_post function to
4580 * free any pending commands.
4583 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4585 struct lpfc_sli *psli = &phba->sli;
4586 uint32_t hba_aer_enabled;
4590 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4591 "0296 Restart HBA Data: x%x x%x\n",
4592 phba->pport->port_state, psli->sli_flag);
4594 /* Take PCIe device Advanced Error Reporting (AER) state */
4595 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4597 rc = lpfc_sli4_brdreset(phba);
4599 spin_lock_irq(&phba->hbalock);
4600 phba->pport->stopped = 0;
4601 phba->link_state = LPFC_INIT_START;
4603 spin_unlock_irq(&phba->hbalock);
4605 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4606 psli->stats_start = get_seconds();
4608 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4609 if (hba_aer_enabled)
4610 pci_disable_pcie_error_reporting(phba->pcidev);
4612 lpfc_hba_down_post(phba);
4613 lpfc_sli4_queue_destroy(phba);
4619 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4620 * @phba: Pointer to HBA context object.
4622 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4623 * API jump table function pointer from the lpfc_hba struct.
4626 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4628 return phba->lpfc_sli_brdrestart(phba);
4632 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4633 * @phba: Pointer to HBA context object.
4635 * This function is called after a HBA restart to wait for successful
4636 * restart of the HBA. Successful restart of the HBA is indicated by
4637 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4638 * iteration, the function will restart the HBA again. The function returns
4639 * zero if HBA successfully restarted else returns negative error code.
4642 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4644 uint32_t status, i = 0;
4646 /* Read the HBA Host Status Register */
4647 if (lpfc_readl(phba->HSregaddr, &status))
4650 /* Check status register to see what current state is */
4652 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4654 /* Check every 10ms for 10 retries, then every 100ms for 90
4655 * retries, then every 1 sec for 50 retires for a total of
4656 * ~60 seconds before reset the board again and check every
4657 * 1 sec for 50 retries. The up to 60 seconds before the
4658 * board ready is required by the Falcon FIPS zeroization
4659 * complete, and any reset the board in between shall cause
4660 * restart of zeroization, further delay the board ready.
4663 /* Adapter failed to init, timeout, status reg
4665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4666 "0436 Adapter failed to init, "
4667 "timeout, status reg x%x, "
4668 "FW Data: A8 x%x AC x%x\n", status,
4669 readl(phba->MBslimaddr + 0xa8),
4670 readl(phba->MBslimaddr + 0xac));
4671 phba->link_state = LPFC_HBA_ERROR;
4675 /* Check to see if any errors occurred during init */
4676 if (status & HS_FFERM) {
4677 /* ERROR: During chipset initialization */
4678 /* Adapter failed to init, chipset, status reg
4680 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4681 "0437 Adapter failed to init, "
4682 "chipset, status reg x%x, "
4683 "FW Data: A8 x%x AC x%x\n", status,
4684 readl(phba->MBslimaddr + 0xa8),
4685 readl(phba->MBslimaddr + 0xac));
4686 phba->link_state = LPFC_HBA_ERROR;
4699 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4700 lpfc_sli_brdrestart(phba);
4702 /* Read the HBA Host Status Register */
4703 if (lpfc_readl(phba->HSregaddr, &status))
4707 /* Check to see if any errors occurred during init */
4708 if (status & HS_FFERM) {
4709 /* ERROR: During chipset initialization */
4710 /* Adapter failed to init, chipset, status reg <status> */
4711 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4712 "0438 Adapter failed to init, chipset, "
4714 "FW Data: A8 x%x AC x%x\n", status,
4715 readl(phba->MBslimaddr + 0xa8),
4716 readl(phba->MBslimaddr + 0xac));
4717 phba->link_state = LPFC_HBA_ERROR;
4721 /* Clear all interrupt enable conditions */
4722 writel(0, phba->HCregaddr);
4723 readl(phba->HCregaddr); /* flush */
4725 /* setup host attn register */
4726 writel(0xffffffff, phba->HAregaddr);
4727 readl(phba->HAregaddr); /* flush */
4732 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4734 * This function calculates and returns the number of HBQs required to be
4738 lpfc_sli_hbq_count(void)
4740 return ARRAY_SIZE(lpfc_hbq_defs);
4744 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4746 * This function adds the number of hbq entries in every HBQ to get
4747 * the total number of hbq entries required for the HBA and returns
4751 lpfc_sli_hbq_entry_count(void)
4753 int hbq_count = lpfc_sli_hbq_count();
4757 for (i = 0; i < hbq_count; ++i)
4758 count += lpfc_hbq_defs[i]->entry_count;
4763 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4765 * This function calculates amount of memory required for all hbq entries
4766 * to be configured and returns the total memory required.
4769 lpfc_sli_hbq_size(void)
4771 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4775 * lpfc_sli_hbq_setup - configure and initialize HBQs
4776 * @phba: Pointer to HBA context object.
4778 * This function is called during the SLI initialization to configure
4779 * all the HBQs and post buffers to the HBQ. The caller is not
4780 * required to hold any locks. This function will return zero if successful
4781 * else it will return negative error code.
4784 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4786 int hbq_count = lpfc_sli_hbq_count();
4790 uint32_t hbq_entry_index;
4792 /* Get a Mailbox buffer to setup mailbox
4793 * commands for HBA initialization
4795 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4802 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4803 phba->link_state = LPFC_INIT_MBX_CMDS;
4804 phba->hbq_in_use = 1;
4806 hbq_entry_index = 0;
4807 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4808 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4809 phba->hbqs[hbqno].hbqPutIdx = 0;
4810 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4811 phba->hbqs[hbqno].entry_count =
4812 lpfc_hbq_defs[hbqno]->entry_count;
4813 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4814 hbq_entry_index, pmb);
4815 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4817 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4818 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4819 mbxStatus <status>, ring <num> */
4821 lpfc_printf_log(phba, KERN_ERR,
4822 LOG_SLI | LOG_VPORT,
4823 "1805 Adapter failed to init. "
4824 "Data: x%x x%x x%x\n",
4826 pmbox->mbxStatus, hbqno);
4828 phba->link_state = LPFC_HBA_ERROR;
4829 mempool_free(pmb, phba->mbox_mem_pool);
4833 phba->hbq_count = hbq_count;
4835 mempool_free(pmb, phba->mbox_mem_pool);
4837 /* Initially populate or replenish the HBQs */
4838 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4839 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4844 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4845 * @phba: Pointer to HBA context object.
4847 * This function is called during the SLI initialization to configure
4848 * all the HBQs and post buffers to the HBQ. The caller is not
4849 * required to hold any locks. This function will return zero if successful
4850 * else it will return negative error code.
4853 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4855 phba->hbq_in_use = 1;
4856 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4857 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4858 phba->hbq_count = 1;
4859 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4860 /* Initially populate or replenish the HBQs */
4865 * lpfc_sli_config_port - Issue config port mailbox command
4866 * @phba: Pointer to HBA context object.
4867 * @sli_mode: sli mode - 2/3
4869 * This function is called by the sli initialization code path
4870 * to issue config_port mailbox command. This function restarts the
4871 * HBA firmware and issues a config_port mailbox command to configure
4872 * the SLI interface in the sli mode specified by sli_mode
4873 * variable. The caller is not required to hold any locks.
4874 * The function returns 0 if successful, else returns negative error
4878 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4881 uint32_t resetcount = 0, rc = 0, done = 0;
4883 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4885 phba->link_state = LPFC_HBA_ERROR;
4889 phba->sli_rev = sli_mode;
4890 while (resetcount < 2 && !done) {
4891 spin_lock_irq(&phba->hbalock);
4892 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4893 spin_unlock_irq(&phba->hbalock);
4894 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4895 lpfc_sli_brdrestart(phba);
4896 rc = lpfc_sli_chipset_init(phba);
4900 spin_lock_irq(&phba->hbalock);
4901 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4902 spin_unlock_irq(&phba->hbalock);
4905 /* Call pre CONFIG_PORT mailbox command initialization. A
4906 * value of 0 means the call was successful. Any other
4907 * nonzero value is a failure, but if ERESTART is returned,
4908 * the driver may reset the HBA and try again.
4910 rc = lpfc_config_port_prep(phba);
4911 if (rc == -ERESTART) {
4912 phba->link_state = LPFC_LINK_UNKNOWN;
4917 phba->link_state = LPFC_INIT_MBX_CMDS;
4918 lpfc_config_port(phba, pmb);
4919 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4920 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4921 LPFC_SLI3_HBQ_ENABLED |
4922 LPFC_SLI3_CRP_ENABLED |
4923 LPFC_SLI3_BG_ENABLED |
4924 LPFC_SLI3_DSS_ENABLED);
4925 if (rc != MBX_SUCCESS) {
4926 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4927 "0442 Adapter failed to init, mbxCmd x%x "
4928 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4929 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4930 spin_lock_irq(&phba->hbalock);
4931 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4932 spin_unlock_irq(&phba->hbalock);
4935 /* Allow asynchronous mailbox command to go through */
4936 spin_lock_irq(&phba->hbalock);
4937 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4938 spin_unlock_irq(&phba->hbalock);
4941 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4942 (pmb->u.mb.un.varCfgPort.gasabt == 0))
4943 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4944 "3110 Port did not grant ASABT\n");
4949 goto do_prep_failed;
4951 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4952 if (!pmb->u.mb.un.varCfgPort.cMA) {
4954 goto do_prep_failed;
4956 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
4957 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
4958 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4959 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4960 phba->max_vpi : phba->max_vports;
4964 phba->fips_level = 0;
4965 phba->fips_spec_rev = 0;
4966 if (pmb->u.mb.un.varCfgPort.gdss) {
4967 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
4968 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4969 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4970 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4971 "2850 Security Crypto Active. FIPS x%d "
4973 phba->fips_level, phba->fips_spec_rev);
4975 if (pmb->u.mb.un.varCfgPort.sec_err) {
4976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4977 "2856 Config Port Security Crypto "
4979 pmb->u.mb.un.varCfgPort.sec_err);
4981 if (pmb->u.mb.un.varCfgPort.gerbm)
4982 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
4983 if (pmb->u.mb.un.varCfgPort.gcrp)
4984 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
4986 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4987 phba->port_gp = phba->mbox->us.s3_pgp.port;
4989 if (phba->cfg_enable_bg) {
4990 if (pmb->u.mb.un.varCfgPort.gbg)
4991 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4993 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4994 "0443 Adapter did not grant "
4998 phba->hbq_get = NULL;
4999 phba->port_gp = phba->mbox->us.s2.port;
5003 mempool_free(pmb, phba->mbox_mem_pool);
5009 * lpfc_sli_hba_setup - SLI initialization function
5010 * @phba: Pointer to HBA context object.
5012 * This function is the main SLI initialization function. This function
5013 * is called by the HBA initialization code, HBA reset code and HBA
5014 * error attention handler code. Caller is not required to hold any
5015 * locks. This function issues config_port mailbox command to configure
5016 * the SLI, setup iocb rings and HBQ rings. In the end the function
5017 * calls the config_port_post function to issue init_link mailbox
5018 * command and to start the discovery. The function will return zero
5019 * if successful, else it will return negative error code.
5022 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5028 switch (phba->cfg_sli_mode) {
5030 if (phba->cfg_enable_npiv) {
5031 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5032 "1824 NPIV enabled: Override sli_mode "
5033 "parameter (%d) to auto (0).\n",
5034 phba->cfg_sli_mode);
5043 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5044 "1819 Unrecognized sli_mode parameter: %d.\n",
5045 phba->cfg_sli_mode);
5049 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5051 rc = lpfc_sli_config_port(phba, mode);
5053 if (rc && phba->cfg_sli_mode == 3)
5054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5055 "1820 Unable to select SLI-3. "
5056 "Not supported by adapter.\n");
5057 if (rc && mode != 2)
5058 rc = lpfc_sli_config_port(phba, 2);
5059 else if (rc && mode == 2)
5060 rc = lpfc_sli_config_port(phba, 3);
5062 goto lpfc_sli_hba_setup_error;
5064 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5065 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5066 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5068 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5069 "2709 This device supports "
5070 "Advanced Error Reporting (AER)\n");
5071 spin_lock_irq(&phba->hbalock);
5072 phba->hba_flag |= HBA_AER_ENABLED;
5073 spin_unlock_irq(&phba->hbalock);
5075 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5076 "2708 This device does not support "
5077 "Advanced Error Reporting (AER): %d\n",
5079 phba->cfg_aer_support = 0;
5083 if (phba->sli_rev == 3) {
5084 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5085 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5087 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5088 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5089 phba->sli3_options = 0;
5092 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5093 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5094 phba->sli_rev, phba->max_vpi);
5095 rc = lpfc_sli_ring_map(phba);
5098 goto lpfc_sli_hba_setup_error;
5100 /* Initialize VPIs. */
5101 if (phba->sli_rev == LPFC_SLI_REV3) {
5103 * The VPI bitmask and physical ID array are allocated
5104 * and initialized once only - at driver load. A port
5105 * reset doesn't need to reinitialize this memory.
5107 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5108 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5109 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
5111 if (!phba->vpi_bmask) {
5113 goto lpfc_sli_hba_setup_error;
5116 phba->vpi_ids = kzalloc(
5117 (phba->max_vpi+1) * sizeof(uint16_t),
5119 if (!phba->vpi_ids) {
5120 kfree(phba->vpi_bmask);
5122 goto lpfc_sli_hba_setup_error;
5124 for (i = 0; i < phba->max_vpi; i++)
5125 phba->vpi_ids[i] = i;
5130 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5131 rc = lpfc_sli_hbq_setup(phba);
5133 goto lpfc_sli_hba_setup_error;
5135 spin_lock_irq(&phba->hbalock);
5136 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5137 spin_unlock_irq(&phba->hbalock);
5139 rc = lpfc_config_port_post(phba);
5141 goto lpfc_sli_hba_setup_error;
5145 lpfc_sli_hba_setup_error:
5146 phba->link_state = LPFC_HBA_ERROR;
5147 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5148 "0445 Firmware initialization failed\n");
5153 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5154 * @phba: Pointer to HBA context object.
5155 * @mboxq: mailbox pointer.
5156 * This function issue a dump mailbox command to read config region
5157 * 23 and parse the records in the region and populate driver
5161 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5163 LPFC_MBOXQ_t *mboxq;
5164 struct lpfc_dmabuf *mp;
5165 struct lpfc_mqe *mqe;
5166 uint32_t data_length;
5169 /* Program the default value of vlan_id and fc_map */
5170 phba->valid_vlan = 0;
5171 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5172 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5173 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5175 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5179 mqe = &mboxq->u.mqe;
5180 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5182 goto out_free_mboxq;
5185 mp = (struct lpfc_dmabuf *) mboxq->context1;
5186 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5188 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5189 "(%d):2571 Mailbox cmd x%x Status x%x "
5190 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5191 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5192 "CQ: x%x x%x x%x x%x\n",
5193 mboxq->vport ? mboxq->vport->vpi : 0,
5194 bf_get(lpfc_mqe_command, mqe),
5195 bf_get(lpfc_mqe_status, mqe),
5196 mqe->un.mb_words[0], mqe->un.mb_words[1],
5197 mqe->un.mb_words[2], mqe->un.mb_words[3],
5198 mqe->un.mb_words[4], mqe->un.mb_words[5],
5199 mqe->un.mb_words[6], mqe->un.mb_words[7],
5200 mqe->un.mb_words[8], mqe->un.mb_words[9],
5201 mqe->un.mb_words[10], mqe->un.mb_words[11],
5202 mqe->un.mb_words[12], mqe->un.mb_words[13],
5203 mqe->un.mb_words[14], mqe->un.mb_words[15],
5204 mqe->un.mb_words[16], mqe->un.mb_words[50],
5206 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5207 mboxq->mcqe.trailer);
5210 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5213 goto out_free_mboxq;
5215 data_length = mqe->un.mb_words[5];
5216 if (data_length > DMP_RGN23_SIZE) {
5217 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5220 goto out_free_mboxq;
5223 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5224 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5229 mempool_free(mboxq, phba->mbox_mem_pool);
5234 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5235 * @phba: pointer to lpfc hba data structure.
5236 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5237 * @vpd: pointer to the memory to hold resulting port vpd data.
5238 * @vpd_size: On input, the number of bytes allocated to @vpd.
5239 * On output, the number of data bytes in @vpd.
5241 * This routine executes a READ_REV SLI4 mailbox command. In
5242 * addition, this routine gets the port vpd data.
5246 * -ENOMEM - could not allocated memory.
5249 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5250 uint8_t *vpd, uint32_t *vpd_size)
5254 struct lpfc_dmabuf *dmabuf;
5255 struct lpfc_mqe *mqe;
5257 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5262 * Get a DMA buffer for the vpd data resulting from the READ_REV
5265 dma_size = *vpd_size;
5266 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
5267 &dmabuf->phys, GFP_KERNEL);
5268 if (!dmabuf->virt) {
5274 * The SLI4 implementation of READ_REV conflicts at word1,
5275 * bits 31:16 and SLI4 adds vpd functionality not present
5276 * in SLI3. This code corrects the conflicts.
5278 lpfc_read_rev(phba, mboxq);
5279 mqe = &mboxq->u.mqe;
5280 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5281 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5282 mqe->un.read_rev.word1 &= 0x0000FFFF;
5283 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5284 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5286 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5288 dma_free_coherent(&phba->pcidev->dev, dma_size,
5289 dmabuf->virt, dmabuf->phys);
5295 * The available vpd length cannot be bigger than the
5296 * DMA buffer passed to the port. Catch the less than
5297 * case and update the caller's size.
5299 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5300 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5302 memcpy(vpd, dmabuf->virt, *vpd_size);
5304 dma_free_coherent(&phba->pcidev->dev, dma_size,
5305 dmabuf->virt, dmabuf->phys);
5311 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5312 * @phba: pointer to lpfc hba data structure.
5314 * This routine retrieves SLI4 device physical port name this PCI function
5319 * otherwise - failed to retrieve physical port name
5322 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5324 LPFC_MBOXQ_t *mboxq;
5325 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5326 struct lpfc_controller_attribute *cntl_attr;
5327 struct lpfc_mbx_get_port_name *get_port_name;
5328 void *virtaddr = NULL;
5329 uint32_t alloclen, reqlen;
5330 uint32_t shdr_status, shdr_add_status;
5331 union lpfc_sli4_cfg_shdr *shdr;
5332 char cport_name = 0;
5335 /* We assume nothing at this point */
5336 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5337 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5339 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5342 /* obtain link type and link number via READ_CONFIG */
5343 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5344 lpfc_sli4_read_config(phba);
5345 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5346 goto retrieve_ppname;
5348 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5349 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5350 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5351 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5352 LPFC_SLI4_MBX_NEMBED);
5353 if (alloclen < reqlen) {
5354 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5355 "3084 Allocated DMA memory size (%d) is "
5356 "less than the requested DMA memory size "
5357 "(%d)\n", alloclen, reqlen);
5359 goto out_free_mboxq;
5361 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5362 virtaddr = mboxq->sge_array->addr[0];
5363 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5364 shdr = &mbx_cntl_attr->cfg_shdr;
5365 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5366 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5367 if (shdr_status || shdr_add_status || rc) {
5368 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5369 "3085 Mailbox x%x (x%x/x%x) failed, "
5370 "rc:x%x, status:x%x, add_status:x%x\n",
5371 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5372 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5373 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5374 rc, shdr_status, shdr_add_status);
5376 goto out_free_mboxq;
5378 cntl_attr = &mbx_cntl_attr->cntl_attr;
5379 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5380 phba->sli4_hba.lnk_info.lnk_tp =
5381 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5382 phba->sli4_hba.lnk_info.lnk_no =
5383 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5384 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5385 "3086 lnk_type:%d, lnk_numb:%d\n",
5386 phba->sli4_hba.lnk_info.lnk_tp,
5387 phba->sli4_hba.lnk_info.lnk_no);
5390 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5391 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5392 sizeof(struct lpfc_mbx_get_port_name) -
5393 sizeof(struct lpfc_sli4_cfg_mhdr),
5394 LPFC_SLI4_MBX_EMBED);
5395 get_port_name = &mboxq->u.mqe.un.get_port_name;
5396 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5397 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5398 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5399 phba->sli4_hba.lnk_info.lnk_tp);
5400 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5401 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5402 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5403 if (shdr_status || shdr_add_status || rc) {
5404 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5405 "3087 Mailbox x%x (x%x/x%x) failed: "
5406 "rc:x%x, status:x%x, add_status:x%x\n",
5407 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5408 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5409 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5410 rc, shdr_status, shdr_add_status);
5412 goto out_free_mboxq;
5414 switch (phba->sli4_hba.lnk_info.lnk_no) {
5415 case LPFC_LINK_NUMBER_0:
5416 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5417 &get_port_name->u.response);
5418 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5420 case LPFC_LINK_NUMBER_1:
5421 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5422 &get_port_name->u.response);
5423 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5425 case LPFC_LINK_NUMBER_2:
5426 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5427 &get_port_name->u.response);
5428 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5430 case LPFC_LINK_NUMBER_3:
5431 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5432 &get_port_name->u.response);
5433 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5439 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5440 phba->Port[0] = cport_name;
5441 phba->Port[1] = '\0';
5442 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5443 "3091 SLI get port name: %s\n", phba->Port);
5447 if (rc != MBX_TIMEOUT) {
5448 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5449 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5451 mempool_free(mboxq, phba->mbox_mem_pool);
5457 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5458 * @phba: pointer to lpfc hba data structure.
5460 * This routine is called to explicitly arm the SLI4 device's completion and
5464 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5467 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5469 sli4_hba->sli4_cq_release(sli4_hba->mbx_cq, LPFC_QUEUE_REARM);
5470 sli4_hba->sli4_cq_release(sli4_hba->els_cq, LPFC_QUEUE_REARM);
5471 if (sli4_hba->nvmels_cq)
5472 sli4_hba->sli4_cq_release(sli4_hba->nvmels_cq,
5475 if (sli4_hba->fcp_cq)
5476 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
5477 sli4_hba->sli4_cq_release(sli4_hba->fcp_cq[qidx],
5480 if (sli4_hba->nvme_cq)
5481 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
5482 sli4_hba->sli4_cq_release(sli4_hba->nvme_cq[qidx],
5486 sli4_hba->sli4_cq_release(sli4_hba->oas_cq, LPFC_QUEUE_REARM);
5488 if (sli4_hba->hba_eq)
5489 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
5490 sli4_hba->sli4_eq_release(sli4_hba->hba_eq[qidx],
5493 if (phba->nvmet_support) {
5494 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5495 sli4_hba->sli4_cq_release(
5496 sli4_hba->nvmet_cqset[qidx],
5502 sli4_hba->sli4_eq_release(sli4_hba->fof_eq, LPFC_QUEUE_REARM);
5506 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5507 * @phba: Pointer to HBA context object.
5508 * @type: The resource extent type.
5509 * @extnt_count: buffer to hold port available extent count.
5510 * @extnt_size: buffer to hold element count per extent.
5512 * This function calls the port and retrievs the number of available
5513 * extents and their size for a particular extent type.
5515 * Returns: 0 if successful. Nonzero otherwise.
5518 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5519 uint16_t *extnt_count, uint16_t *extnt_size)
5524 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5527 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5531 /* Find out how many extents are available for this resource type */
5532 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5533 sizeof(struct lpfc_sli4_cfg_mhdr));
5534 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5535 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5536 length, LPFC_SLI4_MBX_EMBED);
5538 /* Send an extents count of 0 - the GET doesn't use it. */
5539 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5540 LPFC_SLI4_MBX_EMBED);
5546 if (!phba->sli4_hba.intr_enable)
5547 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5549 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5550 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5557 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5558 if (bf_get(lpfc_mbox_hdr_status,
5559 &rsrc_info->header.cfg_shdr.response)) {
5560 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5561 "2930 Failed to get resource extents "
5562 "Status 0x%x Add'l Status 0x%x\n",
5563 bf_get(lpfc_mbox_hdr_status,
5564 &rsrc_info->header.cfg_shdr.response),
5565 bf_get(lpfc_mbox_hdr_add_status,
5566 &rsrc_info->header.cfg_shdr.response));
5571 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5573 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5576 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5577 "3162 Retrieved extents type-%d from port: count:%d, "
5578 "size:%d\n", type, *extnt_count, *extnt_size);
5581 mempool_free(mbox, phba->mbox_mem_pool);
5586 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5587 * @phba: Pointer to HBA context object.
5588 * @type: The extent type to check.
5590 * This function reads the current available extents from the port and checks
5591 * if the extent count or extent size has changed since the last access.
5592 * Callers use this routine post port reset to understand if there is a
5593 * extent reprovisioning requirement.
5596 * -Error: error indicates problem.
5597 * 1: Extent count or size has changed.
5601 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5603 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5604 uint16_t size_diff, rsrc_ext_size;
5606 struct lpfc_rsrc_blks *rsrc_entry;
5607 struct list_head *rsrc_blk_list = NULL;
5611 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5618 case LPFC_RSC_TYPE_FCOE_RPI:
5619 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5621 case LPFC_RSC_TYPE_FCOE_VPI:
5622 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5624 case LPFC_RSC_TYPE_FCOE_XRI:
5625 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5627 case LPFC_RSC_TYPE_FCOE_VFI:
5628 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5634 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5636 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5640 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5647 * lpfc_sli4_cfg_post_extnts -
5648 * @phba: Pointer to HBA context object.
5649 * @extnt_cnt - number of available extents.
5650 * @type - the extent type (rpi, xri, vfi, vpi).
5651 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5652 * @mbox - pointer to the caller's allocated mailbox structure.
5654 * This function executes the extents allocation request. It also
5655 * takes care of the amount of memory needed to allocate or get the
5656 * allocated extents. It is the caller's responsibility to evaluate
5660 * -Error: Error value describes the condition found.
5664 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5665 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5670 uint32_t alloc_len, mbox_tmo;
5672 /* Calculate the total requested length of the dma memory */
5673 req_len = extnt_cnt * sizeof(uint16_t);
5676 * Calculate the size of an embedded mailbox. The uint32_t
5677 * accounts for extents-specific word.
5679 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5683 * Presume the allocation and response will fit into an embedded
5684 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5686 *emb = LPFC_SLI4_MBX_EMBED;
5687 if (req_len > emb_len) {
5688 req_len = extnt_cnt * sizeof(uint16_t) +
5689 sizeof(union lpfc_sli4_cfg_shdr) +
5691 *emb = LPFC_SLI4_MBX_NEMBED;
5694 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5695 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5697 if (alloc_len < req_len) {
5698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5699 "2982 Allocated DMA memory size (x%x) is "
5700 "less than the requested DMA memory "
5701 "size (x%x)\n", alloc_len, req_len);
5704 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5708 if (!phba->sli4_hba.intr_enable)
5709 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5711 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5712 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5721 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5722 * @phba: Pointer to HBA context object.
5723 * @type: The resource extent type to allocate.
5725 * This function allocates the number of elements for the specified
5729 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5732 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5733 uint16_t rsrc_id, rsrc_start, j, k;
5736 unsigned long longs;
5737 unsigned long *bmask;
5738 struct lpfc_rsrc_blks *rsrc_blks;
5741 struct lpfc_id_range *id_array = NULL;
5742 void *virtaddr = NULL;
5743 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5744 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5745 struct list_head *ext_blk_list;
5747 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5753 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5754 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5755 "3009 No available Resource Extents "
5756 "for resource type 0x%x: Count: 0x%x, "
5757 "Size 0x%x\n", type, rsrc_cnt,
5762 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5763 "2903 Post resource extents type-0x%x: "
5764 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5766 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5770 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5777 * Figure out where the response is located. Then get local pointers
5778 * to the response data. The port does not guarantee to respond to
5779 * all extents counts request so update the local variable with the
5780 * allocated count from the port.
5782 if (emb == LPFC_SLI4_MBX_EMBED) {
5783 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5784 id_array = &rsrc_ext->u.rsp.id[0];
5785 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5787 virtaddr = mbox->sge_array->addr[0];
5788 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5789 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5790 id_array = &n_rsrc->id;
5793 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5794 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5797 * Based on the resource size and count, correct the base and max
5800 length = sizeof(struct lpfc_rsrc_blks);
5802 case LPFC_RSC_TYPE_FCOE_RPI:
5803 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5804 sizeof(unsigned long),
5806 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5810 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5813 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5814 kfree(phba->sli4_hba.rpi_bmask);
5820 * The next_rpi was initialized with the maximum available
5821 * count but the port may allocate a smaller number. Catch
5822 * that case and update the next_rpi.
5824 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5826 /* Initialize local ptrs for common extent processing later. */
5827 bmask = phba->sli4_hba.rpi_bmask;
5828 ids = phba->sli4_hba.rpi_ids;
5829 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5831 case LPFC_RSC_TYPE_FCOE_VPI:
5832 phba->vpi_bmask = kzalloc(longs *
5833 sizeof(unsigned long),
5835 if (unlikely(!phba->vpi_bmask)) {
5839 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5842 if (unlikely(!phba->vpi_ids)) {
5843 kfree(phba->vpi_bmask);
5848 /* Initialize local ptrs for common extent processing later. */
5849 bmask = phba->vpi_bmask;
5850 ids = phba->vpi_ids;
5851 ext_blk_list = &phba->lpfc_vpi_blk_list;
5853 case LPFC_RSC_TYPE_FCOE_XRI:
5854 phba->sli4_hba.xri_bmask = kzalloc(longs *
5855 sizeof(unsigned long),
5857 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5861 phba->sli4_hba.max_cfg_param.xri_used = 0;
5862 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5865 if (unlikely(!phba->sli4_hba.xri_ids)) {
5866 kfree(phba->sli4_hba.xri_bmask);
5871 /* Initialize local ptrs for common extent processing later. */
5872 bmask = phba->sli4_hba.xri_bmask;
5873 ids = phba->sli4_hba.xri_ids;
5874 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5876 case LPFC_RSC_TYPE_FCOE_VFI:
5877 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5878 sizeof(unsigned long),
5880 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5884 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5887 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5888 kfree(phba->sli4_hba.vfi_bmask);
5893 /* Initialize local ptrs for common extent processing later. */
5894 bmask = phba->sli4_hba.vfi_bmask;
5895 ids = phba->sli4_hba.vfi_ids;
5896 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5899 /* Unsupported Opcode. Fail call. */
5903 ext_blk_list = NULL;
5908 * Complete initializing the extent configuration with the
5909 * allocated ids assigned to this function. The bitmask serves
5910 * as an index into the array and manages the available ids. The
5911 * array just stores the ids communicated to the port via the wqes.
5913 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5915 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5918 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5921 rsrc_blks = kzalloc(length, GFP_KERNEL);
5922 if (unlikely(!rsrc_blks)) {
5928 rsrc_blks->rsrc_start = rsrc_id;
5929 rsrc_blks->rsrc_size = rsrc_size;
5930 list_add_tail(&rsrc_blks->list, ext_blk_list);
5931 rsrc_start = rsrc_id;
5932 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
5933 phba->sli4_hba.scsi_xri_start = rsrc_start +
5934 lpfc_sli4_get_iocb_cnt(phba);
5935 phba->sli4_hba.nvme_xri_start =
5936 phba->sli4_hba.scsi_xri_start +
5937 phba->sli4_hba.scsi_xri_max;
5940 while (rsrc_id < (rsrc_start + rsrc_size)) {
5945 /* Entire word processed. Get next word.*/
5950 lpfc_sli4_mbox_cmd_free(phba, mbox);
5957 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5958 * @phba: Pointer to HBA context object.
5959 * @type: the extent's type.
5961 * This function deallocates all extents of a particular resource type.
5962 * SLI4 does not allow for deallocating a particular extent range. It
5963 * is the caller's responsibility to release all kernel memory resources.
5966 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5969 uint32_t length, mbox_tmo = 0;
5971 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5972 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5974 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5979 * This function sends an embedded mailbox because it only sends the
5980 * the resource type. All extents of this type are released by the
5983 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5984 sizeof(struct lpfc_sli4_cfg_mhdr));
5985 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5986 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5987 length, LPFC_SLI4_MBX_EMBED);
5989 /* Send an extents count of 0 - the dealloc doesn't use it. */
5990 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5991 LPFC_SLI4_MBX_EMBED);
5996 if (!phba->sli4_hba.intr_enable)
5997 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5999 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6000 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6007 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6008 if (bf_get(lpfc_mbox_hdr_status,
6009 &dealloc_rsrc->header.cfg_shdr.response)) {
6010 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6011 "2919 Failed to release resource extents "
6012 "for type %d - Status 0x%x Add'l Status 0x%x. "
6013 "Resource memory not released.\n",
6015 bf_get(lpfc_mbox_hdr_status,
6016 &dealloc_rsrc->header.cfg_shdr.response),
6017 bf_get(lpfc_mbox_hdr_add_status,
6018 &dealloc_rsrc->header.cfg_shdr.response));
6023 /* Release kernel memory resources for the specific type. */
6025 case LPFC_RSC_TYPE_FCOE_VPI:
6026 kfree(phba->vpi_bmask);
6027 kfree(phba->vpi_ids);
6028 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6029 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6030 &phba->lpfc_vpi_blk_list, list) {
6031 list_del_init(&rsrc_blk->list);
6034 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6036 case LPFC_RSC_TYPE_FCOE_XRI:
6037 kfree(phba->sli4_hba.xri_bmask);
6038 kfree(phba->sli4_hba.xri_ids);
6039 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6040 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6041 list_del_init(&rsrc_blk->list);
6045 case LPFC_RSC_TYPE_FCOE_VFI:
6046 kfree(phba->sli4_hba.vfi_bmask);
6047 kfree(phba->sli4_hba.vfi_ids);
6048 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6049 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6050 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6051 list_del_init(&rsrc_blk->list);
6055 case LPFC_RSC_TYPE_FCOE_RPI:
6056 /* RPI bitmask and physical id array are cleaned up earlier. */
6057 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6058 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6059 list_del_init(&rsrc_blk->list);
6067 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6070 mempool_free(mbox, phba->mbox_mem_pool);
6075 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6080 len = sizeof(struct lpfc_mbx_set_feature) -
6081 sizeof(struct lpfc_sli4_cfg_mhdr);
6082 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6083 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6084 LPFC_SLI4_MBX_EMBED);
6087 case LPFC_SET_UE_RECOVERY:
6088 bf_set(lpfc_mbx_set_feature_UER,
6089 &mbox->u.mqe.un.set_feature, 1);
6090 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6091 mbox->u.mqe.un.set_feature.param_len = 8;
6093 case LPFC_SET_MDS_DIAGS:
6094 bf_set(lpfc_mbx_set_feature_mds,
6095 &mbox->u.mqe.un.set_feature, 1);
6096 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6097 &mbox->u.mqe.un.set_feature, 1);
6098 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6099 mbox->u.mqe.un.set_feature.param_len = 8;
6107 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6108 * @phba: Pointer to HBA context object.
6110 * This function allocates all SLI4 resource identifiers.
6113 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6115 int i, rc, error = 0;
6116 uint16_t count, base;
6117 unsigned long longs;
6119 if (!phba->sli4_hba.rpi_hdrs_in_use)
6120 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6121 if (phba->sli4_hba.extents_in_use) {
6123 * The port supports resource extents. The XRI, VPI, VFI, RPI
6124 * resource extent count must be read and allocated before
6125 * provisioning the resource id arrays.
6127 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6128 LPFC_IDX_RSRC_RDY) {
6130 * Extent-based resources are set - the driver could
6131 * be in a port reset. Figure out if any corrective
6132 * actions need to be taken.
6134 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6135 LPFC_RSC_TYPE_FCOE_VFI);
6138 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6139 LPFC_RSC_TYPE_FCOE_VPI);
6142 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6143 LPFC_RSC_TYPE_FCOE_XRI);
6146 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6147 LPFC_RSC_TYPE_FCOE_RPI);
6152 * It's possible that the number of resources
6153 * provided to this port instance changed between
6154 * resets. Detect this condition and reallocate
6155 * resources. Otherwise, there is no action.
6158 lpfc_printf_log(phba, KERN_INFO,
6159 LOG_MBOX | LOG_INIT,
6160 "2931 Detected extent resource "
6161 "change. Reallocating all "
6163 rc = lpfc_sli4_dealloc_extent(phba,
6164 LPFC_RSC_TYPE_FCOE_VFI);
6165 rc = lpfc_sli4_dealloc_extent(phba,
6166 LPFC_RSC_TYPE_FCOE_VPI);
6167 rc = lpfc_sli4_dealloc_extent(phba,
6168 LPFC_RSC_TYPE_FCOE_XRI);
6169 rc = lpfc_sli4_dealloc_extent(phba,
6170 LPFC_RSC_TYPE_FCOE_RPI);
6175 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6179 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6183 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6187 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6190 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6195 * The port does not support resource extents. The XRI, VPI,
6196 * VFI, RPI resource ids were determined from READ_CONFIG.
6197 * Just allocate the bitmasks and provision the resource id
6198 * arrays. If a port reset is active, the resources don't
6199 * need any action - just exit.
6201 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6202 LPFC_IDX_RSRC_RDY) {
6203 lpfc_sli4_dealloc_resource_identifiers(phba);
6204 lpfc_sli4_remove_rpis(phba);
6207 count = phba->sli4_hba.max_cfg_param.max_rpi;
6209 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6210 "3279 Invalid provisioning of "
6215 base = phba->sli4_hba.max_cfg_param.rpi_base;
6216 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6217 phba->sli4_hba.rpi_bmask = kzalloc(longs *
6218 sizeof(unsigned long),
6220 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6224 phba->sli4_hba.rpi_ids = kzalloc(count *
6227 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6229 goto free_rpi_bmask;
6232 for (i = 0; i < count; i++)
6233 phba->sli4_hba.rpi_ids[i] = base + i;
6236 count = phba->sli4_hba.max_cfg_param.max_vpi;
6238 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6239 "3280 Invalid provisioning of "
6244 base = phba->sli4_hba.max_cfg_param.vpi_base;
6245 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6246 phba->vpi_bmask = kzalloc(longs *
6247 sizeof(unsigned long),
6249 if (unlikely(!phba->vpi_bmask)) {
6253 phba->vpi_ids = kzalloc(count *
6256 if (unlikely(!phba->vpi_ids)) {
6258 goto free_vpi_bmask;
6261 for (i = 0; i < count; i++)
6262 phba->vpi_ids[i] = base + i;
6265 count = phba->sli4_hba.max_cfg_param.max_xri;
6267 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6268 "3281 Invalid provisioning of "
6273 base = phba->sli4_hba.max_cfg_param.xri_base;
6274 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6275 phba->sli4_hba.xri_bmask = kzalloc(longs *
6276 sizeof(unsigned long),
6278 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6282 phba->sli4_hba.max_cfg_param.xri_used = 0;
6283 phba->sli4_hba.xri_ids = kzalloc(count *
6286 if (unlikely(!phba->sli4_hba.xri_ids)) {
6288 goto free_xri_bmask;
6291 for (i = 0; i < count; i++)
6292 phba->sli4_hba.xri_ids[i] = base + i;
6295 count = phba->sli4_hba.max_cfg_param.max_vfi;
6297 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6298 "3282 Invalid provisioning of "
6303 base = phba->sli4_hba.max_cfg_param.vfi_base;
6304 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6305 phba->sli4_hba.vfi_bmask = kzalloc(longs *
6306 sizeof(unsigned long),
6308 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6312 phba->sli4_hba.vfi_ids = kzalloc(count *
6315 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6317 goto free_vfi_bmask;
6320 for (i = 0; i < count; i++)
6321 phba->sli4_hba.vfi_ids[i] = base + i;
6324 * Mark all resources ready. An HBA reset doesn't need
6325 * to reset the initialization.
6327 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6333 kfree(phba->sli4_hba.vfi_bmask);
6334 phba->sli4_hba.vfi_bmask = NULL;
6336 kfree(phba->sli4_hba.xri_ids);
6337 phba->sli4_hba.xri_ids = NULL;
6339 kfree(phba->sli4_hba.xri_bmask);
6340 phba->sli4_hba.xri_bmask = NULL;
6342 kfree(phba->vpi_ids);
6343 phba->vpi_ids = NULL;
6345 kfree(phba->vpi_bmask);
6346 phba->vpi_bmask = NULL;
6348 kfree(phba->sli4_hba.rpi_ids);
6349 phba->sli4_hba.rpi_ids = NULL;
6351 kfree(phba->sli4_hba.rpi_bmask);
6352 phba->sli4_hba.rpi_bmask = NULL;
6358 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6359 * @phba: Pointer to HBA context object.
6361 * This function allocates the number of elements for the specified
6365 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6367 if (phba->sli4_hba.extents_in_use) {
6368 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6369 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6370 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6371 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6373 kfree(phba->vpi_bmask);
6374 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6375 kfree(phba->vpi_ids);
6376 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6377 kfree(phba->sli4_hba.xri_bmask);
6378 kfree(phba->sli4_hba.xri_ids);
6379 kfree(phba->sli4_hba.vfi_bmask);
6380 kfree(phba->sli4_hba.vfi_ids);
6381 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6382 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6389 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6390 * @phba: Pointer to HBA context object.
6391 * @type: The resource extent type.
6392 * @extnt_count: buffer to hold port extent count response
6393 * @extnt_size: buffer to hold port extent size response.
6395 * This function calls the port to read the host allocated extents
6396 * for a particular type.
6399 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6400 uint16_t *extnt_cnt, uint16_t *extnt_size)
6404 uint16_t curr_blks = 0;
6405 uint32_t req_len, emb_len;
6406 uint32_t alloc_len, mbox_tmo;
6407 struct list_head *blk_list_head;
6408 struct lpfc_rsrc_blks *rsrc_blk;
6410 void *virtaddr = NULL;
6411 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6412 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6413 union lpfc_sli4_cfg_shdr *shdr;
6416 case LPFC_RSC_TYPE_FCOE_VPI:
6417 blk_list_head = &phba->lpfc_vpi_blk_list;
6419 case LPFC_RSC_TYPE_FCOE_XRI:
6420 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6422 case LPFC_RSC_TYPE_FCOE_VFI:
6423 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6425 case LPFC_RSC_TYPE_FCOE_RPI:
6426 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6432 /* Count the number of extents currently allocatd for this type. */
6433 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6434 if (curr_blks == 0) {
6436 * The GET_ALLOCATED mailbox does not return the size,
6437 * just the count. The size should be just the size
6438 * stored in the current allocated block and all sizes
6439 * for an extent type are the same so set the return
6442 *extnt_size = rsrc_blk->rsrc_size;
6448 * Calculate the size of an embedded mailbox. The uint32_t
6449 * accounts for extents-specific word.
6451 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6455 * Presume the allocation and response will fit into an embedded
6456 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6458 emb = LPFC_SLI4_MBX_EMBED;
6460 if (req_len > emb_len) {
6461 req_len = curr_blks * sizeof(uint16_t) +
6462 sizeof(union lpfc_sli4_cfg_shdr) +
6464 emb = LPFC_SLI4_MBX_NEMBED;
6467 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6470 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6472 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6473 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6475 if (alloc_len < req_len) {
6476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6477 "2983 Allocated DMA memory size (x%x) is "
6478 "less than the requested DMA memory "
6479 "size (x%x)\n", alloc_len, req_len);
6483 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6489 if (!phba->sli4_hba.intr_enable)
6490 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6492 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6493 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6502 * Figure out where the response is located. Then get local pointers
6503 * to the response data. The port does not guarantee to respond to
6504 * all extents counts request so update the local variable with the
6505 * allocated count from the port.
6507 if (emb == LPFC_SLI4_MBX_EMBED) {
6508 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6509 shdr = &rsrc_ext->header.cfg_shdr;
6510 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6512 virtaddr = mbox->sge_array->addr[0];
6513 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6514 shdr = &n_rsrc->cfg_shdr;
6515 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6518 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6519 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6520 "2984 Failed to read allocated resources "
6521 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6523 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6524 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6529 lpfc_sli4_mbox_cmd_free(phba, mbox);
6534 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
6535 * @phba: pointer to lpfc hba data structure.
6536 * @pring: Pointer to driver SLI ring object.
6537 * @sgl_list: linked link of sgl buffers to post
6538 * @cnt: number of linked list buffers
6540 * This routine walks the list of buffers that have been allocated and
6541 * repost them to the port by using SGL block post. This is needed after a
6542 * pci_function_reset/warm_start or start. It attempts to construct blocks
6543 * of buffer sgls which contains contiguous xris and uses the non-embedded
6544 * SGL block post mailbox commands to post them to the port. For single
6545 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6546 * mailbox command for posting.
6548 * Returns: 0 = success, non-zero failure.
6551 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6552 struct list_head *sgl_list, int cnt)
6554 struct lpfc_sglq *sglq_entry = NULL;
6555 struct lpfc_sglq *sglq_entry_next = NULL;
6556 struct lpfc_sglq *sglq_entry_first = NULL;
6557 int status, total_cnt;
6558 int post_cnt = 0, num_posted = 0, block_cnt = 0;
6559 int last_xritag = NO_XRI;
6560 LIST_HEAD(prep_sgl_list);
6561 LIST_HEAD(blck_sgl_list);
6562 LIST_HEAD(allc_sgl_list);
6563 LIST_HEAD(post_sgl_list);
6564 LIST_HEAD(free_sgl_list);
6566 spin_lock_irq(&phba->hbalock);
6567 spin_lock(&phba->sli4_hba.sgl_list_lock);
6568 list_splice_init(sgl_list, &allc_sgl_list);
6569 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6570 spin_unlock_irq(&phba->hbalock);
6573 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6574 &allc_sgl_list, list) {
6575 list_del_init(&sglq_entry->list);
6577 if ((last_xritag != NO_XRI) &&
6578 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6579 /* a hole in xri block, form a sgl posting block */
6580 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6581 post_cnt = block_cnt - 1;
6582 /* prepare list for next posting block */
6583 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6586 /* prepare list for next posting block */
6587 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6588 /* enough sgls for non-embed sgl mbox command */
6589 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6590 list_splice_init(&prep_sgl_list,
6592 post_cnt = block_cnt;
6598 /* keep track of last sgl's xritag */
6599 last_xritag = sglq_entry->sli4_xritag;
6601 /* end of repost sgl list condition for buffers */
6602 if (num_posted == total_cnt) {
6603 if (post_cnt == 0) {
6604 list_splice_init(&prep_sgl_list,
6606 post_cnt = block_cnt;
6607 } else if (block_cnt == 1) {
6608 status = lpfc_sli4_post_sgl(phba,
6609 sglq_entry->phys, 0,
6610 sglq_entry->sli4_xritag);
6612 /* successful, put sgl to posted list */
6613 list_add_tail(&sglq_entry->list,
6616 /* Failure, put sgl to free list */
6617 lpfc_printf_log(phba, KERN_WARNING,
6619 "3159 Failed to post "
6620 "sgl, xritag:x%x\n",
6621 sglq_entry->sli4_xritag);
6622 list_add_tail(&sglq_entry->list,
6629 /* continue until a nembed page worth of sgls */
6633 /* post the buffer list sgls as a block */
6634 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
6638 /* success, put sgl list to posted sgl list */
6639 list_splice_init(&blck_sgl_list, &post_sgl_list);
6641 /* Failure, put sgl list to free sgl list */
6642 sglq_entry_first = list_first_entry(&blck_sgl_list,
6645 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6646 "3160 Failed to post sgl-list, "
6648 sglq_entry_first->sli4_xritag,
6649 (sglq_entry_first->sli4_xritag +
6651 list_splice_init(&blck_sgl_list, &free_sgl_list);
6652 total_cnt -= post_cnt;
6655 /* don't reset xirtag due to hole in xri block */
6657 last_xritag = NO_XRI;
6659 /* reset sgl post count for next round of posting */
6663 /* free the sgls failed to post */
6664 lpfc_free_sgl_list(phba, &free_sgl_list);
6666 /* push sgls posted to the available list */
6667 if (!list_empty(&post_sgl_list)) {
6668 spin_lock_irq(&phba->hbalock);
6669 spin_lock(&phba->sli4_hba.sgl_list_lock);
6670 list_splice_init(&post_sgl_list, sgl_list);
6671 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6672 spin_unlock_irq(&phba->hbalock);
6674 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6675 "3161 Failure to post sgl to port.\n");
6679 /* return the number of XRIs actually posted */
6684 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6688 len = sizeof(struct lpfc_mbx_set_host_data) -
6689 sizeof(struct lpfc_sli4_cfg_mhdr);
6690 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6691 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
6692 LPFC_SLI4_MBX_EMBED);
6694 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
6695 mbox->u.mqe.un.set_host_data.param_len =
6696 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
6697 snprintf(mbox->u.mqe.un.set_host_data.data,
6698 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
6699 "Linux %s v"LPFC_DRIVER_VERSION,
6700 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
6704 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
6705 struct lpfc_queue *drq, int count, int idx)
6708 struct lpfc_rqe hrqe;
6709 struct lpfc_rqe drqe;
6710 struct lpfc_rqb *rqbp;
6711 unsigned long flags;
6712 struct rqb_dmabuf *rqb_buffer;
6713 LIST_HEAD(rqb_buf_list);
6715 spin_lock_irqsave(&phba->hbalock, flags);
6717 for (i = 0; i < count; i++) {
6718 /* IF RQ is already full, don't bother */
6719 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
6721 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
6724 rqb_buffer->hrq = hrq;
6725 rqb_buffer->drq = drq;
6726 rqb_buffer->idx = idx;
6727 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
6729 while (!list_empty(&rqb_buf_list)) {
6730 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
6733 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
6734 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
6735 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
6736 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
6737 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
6739 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6740 "6421 Cannot post to HRQ %d: %x %x %x "
6748 rqbp->rqb_free_buffer(phba, rqb_buffer);
6750 list_add_tail(&rqb_buffer->hbuf.list,
6751 &rqbp->rqb_buffer_list);
6752 rqbp->buffer_count++;
6755 spin_unlock_irqrestore(&phba->hbalock, flags);
6760 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
6761 * @phba: Pointer to HBA context object.
6763 * This function is the main SLI4 device initialization PCI function. This
6764 * function is called by the HBA initialization code, HBA reset code and
6765 * HBA error attention handler code. Caller is not required to hold any
6769 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6772 LPFC_MBOXQ_t *mboxq;
6773 struct lpfc_mqe *mqe;
6776 uint32_t ftr_rsp = 0;
6777 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6778 struct lpfc_vport *vport = phba->pport;
6779 struct lpfc_dmabuf *mp;
6780 struct lpfc_rqb *rqbp;
6782 /* Perform a PCI function reset to start from clean */
6783 rc = lpfc_pci_function_reset(phba);
6787 /* Check the HBA Host Status Register for readyness */
6788 rc = lpfc_sli4_post_status_check(phba);
6792 spin_lock_irq(&phba->hbalock);
6793 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6794 spin_unlock_irq(&phba->hbalock);
6798 * Allocate a single mailbox container for initializing the
6801 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6805 /* Issue READ_REV to collect vpd and FW information. */
6806 vpd_size = SLI4_PAGE_SIZE;
6807 vpd = kzalloc(vpd_size, GFP_KERNEL);
6813 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
6819 mqe = &mboxq->u.mqe;
6820 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6821 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
6822 phba->hba_flag |= HBA_FCOE_MODE;
6823 phba->fcp_embed_io = 0; /* SLI4 FC support only */
6825 phba->hba_flag &= ~HBA_FCOE_MODE;
6828 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6830 phba->hba_flag |= HBA_FIP_SUPPORT;
6832 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6834 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6836 if (phba->sli_rev != LPFC_SLI_REV4) {
6837 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6838 "0376 READ_REV Error. SLI Level %d "
6839 "FCoE enabled %d\n",
6840 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
6847 * Continue initialization with default values even if driver failed
6848 * to read FCoE param config regions, only read parameters if the
6851 if (phba->hba_flag & HBA_FCOE_MODE &&
6852 lpfc_sli4_read_fcoe_params(phba))
6853 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6854 "2570 Failed to read FCoE parameters\n");
6857 * Retrieve sli4 device physical port name, failure of doing it
6858 * is considered as non-fatal.
6860 rc = lpfc_sli4_retrieve_pport_name(phba);
6862 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6863 "3080 Successful retrieving SLI4 device "
6864 "physical port name: %s.\n", phba->Port);
6867 * Evaluate the read rev and vpd data. Populate the driver
6868 * state with the results. If this routine fails, the failure
6869 * is not fatal as the driver will use generic values.
6871 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6872 if (unlikely(!rc)) {
6873 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6874 "0377 Error %d parsing vpd. "
6875 "Using defaults.\n", rc);
6880 /* Save information as VPD data */
6881 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6882 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6883 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6884 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6886 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6888 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6890 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6892 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6893 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6894 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6895 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6896 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6897 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6898 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6899 "(%d):0380 READ_REV Status x%x "
6900 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6901 mboxq->vport ? mboxq->vport->vpi : 0,
6902 bf_get(lpfc_mqe_status, mqe),
6903 phba->vpd.rev.opFwName,
6904 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6905 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
6907 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
6908 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
6909 if (phba->pport->cfg_lun_queue_depth > rc) {
6910 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6911 "3362 LUN queue depth changed from %d to %d\n",
6912 phba->pport->cfg_lun_queue_depth, rc);
6913 phba->pport->cfg_lun_queue_depth = rc;
6916 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6917 LPFC_SLI_INTF_IF_TYPE_0) {
6918 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
6919 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6920 if (rc == MBX_SUCCESS) {
6921 phba->hba_flag |= HBA_RECOVERABLE_UE;
6922 /* Set 1Sec interval to detect UE */
6923 phba->eratt_poll_interval = 1;
6924 phba->sli4_hba.ue_to_sr = bf_get(
6925 lpfc_mbx_set_feature_UESR,
6926 &mboxq->u.mqe.un.set_feature);
6927 phba->sli4_hba.ue_to_rp = bf_get(
6928 lpfc_mbx_set_feature_UERP,
6929 &mboxq->u.mqe.un.set_feature);
6933 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
6934 /* Enable MDS Diagnostics only if the SLI Port supports it */
6935 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
6936 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6937 if (rc != MBX_SUCCESS)
6938 phba->mds_diags_support = 0;
6942 * Discover the port's supported feature set and match it against the
6945 lpfc_request_features(phba, mboxq);
6946 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6953 * The port must support FCP initiator mode as this is the
6954 * only mode running in the host.
6956 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6957 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6958 "0378 No support for fcpi mode.\n");
6962 /* Performance Hints are ONLY for FCoE */
6963 if (phba->hba_flag & HBA_FCOE_MODE) {
6964 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6965 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6967 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
6971 * If the port cannot support the host's requested features
6972 * then turn off the global config parameters to disable the
6973 * feature in the driver. This is not a fatal error.
6975 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6976 if (phba->cfg_enable_bg) {
6977 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6978 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6983 if (phba->max_vpi && phba->cfg_enable_npiv &&
6984 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6988 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6989 "0379 Feature Mismatch Data: x%08x %08x "
6990 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6991 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6992 phba->cfg_enable_npiv, phba->max_vpi);
6993 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6994 phba->cfg_enable_bg = 0;
6995 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6996 phba->cfg_enable_npiv = 0;
6999 /* These SLI3 features are assumed in SLI4 */
7000 spin_lock_irq(&phba->hbalock);
7001 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7002 spin_unlock_irq(&phba->hbalock);
7005 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7006 * calls depends on these resources to complete port setup.
7008 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7010 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7011 "2920 Failed to alloc Resource IDs "
7016 lpfc_set_host_data(phba, mboxq);
7018 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7020 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7021 "2134 Failed to set host os driver version %x",
7025 /* Read the port's service parameters. */
7026 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7028 phba->link_state = LPFC_HBA_ERROR;
7033 mboxq->vport = vport;
7034 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7035 mp = (struct lpfc_dmabuf *) mboxq->context1;
7036 if (rc == MBX_SUCCESS) {
7037 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7042 * This memory was allocated by the lpfc_read_sparam routine. Release
7043 * it to the mbuf pool.
7045 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7047 mboxq->context1 = NULL;
7049 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7050 "0382 READ_SPARAM command failed "
7051 "status %d, mbxStatus x%x\n",
7052 rc, bf_get(lpfc_mqe_status, mqe));
7053 phba->link_state = LPFC_HBA_ERROR;
7058 lpfc_update_vport_wwn(vport);
7060 /* Update the fc_host data structures with new wwn. */
7061 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7062 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7064 /* Create all the SLI4 queues */
7065 rc = lpfc_sli4_queue_create(phba);
7067 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7068 "3089 Failed to allocate queues\n");
7072 /* Set up all the queues to the device */
7073 rc = lpfc_sli4_queue_setup(phba);
7075 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7076 "0381 Error %d during queue setup.\n ", rc);
7077 goto out_stop_timers;
7079 /* Initialize the driver internal SLI layer lists. */
7080 lpfc_sli4_setup(phba);
7081 lpfc_sli4_queue_init(phba);
7083 /* update host els xri-sgl sizes and mappings */
7084 rc = lpfc_sli4_els_sgl_update(phba);
7086 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7087 "1400 Failed to update xri-sgl size and "
7088 "mapping: %d\n", rc);
7089 goto out_destroy_queue;
7092 /* register the els sgl pool to the port */
7093 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7094 phba->sli4_hba.els_xri_cnt);
7095 if (unlikely(rc < 0)) {
7096 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7097 "0582 Error %d during els sgl post "
7100 goto out_destroy_queue;
7102 phba->sli4_hba.els_xri_cnt = rc;
7104 if (phba->nvmet_support) {
7105 /* update host nvmet xri-sgl sizes and mappings */
7106 rc = lpfc_sli4_nvmet_sgl_update(phba);
7108 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7109 "6308 Failed to update nvmet-sgl size "
7110 "and mapping: %d\n", rc);
7111 goto out_destroy_queue;
7114 /* register the nvmet sgl pool to the port */
7115 rc = lpfc_sli4_repost_sgl_list(
7117 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7118 phba->sli4_hba.nvmet_xri_cnt);
7119 if (unlikely(rc < 0)) {
7120 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7121 "3117 Error %d during nvmet "
7124 goto out_destroy_queue;
7126 phba->sli4_hba.nvmet_xri_cnt = rc;
7128 cnt = phba->cfg_iocb_cnt * 1024;
7129 /* We need 1 iocbq for every SGL, for IO processing */
7130 cnt += phba->sli4_hba.nvmet_xri_cnt;
7132 /* update host scsi xri-sgl sizes and mappings */
7133 rc = lpfc_sli4_scsi_sgl_update(phba);
7135 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7136 "6309 Failed to update scsi-sgl size "
7137 "and mapping: %d\n", rc);
7138 goto out_destroy_queue;
7141 /* update host nvme xri-sgl sizes and mappings */
7142 rc = lpfc_sli4_nvme_sgl_update(phba);
7144 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7145 "6082 Failed to update nvme-sgl size "
7146 "and mapping: %d\n", rc);
7147 goto out_destroy_queue;
7150 cnt = phba->cfg_iocb_cnt * 1024;
7153 if (!phba->sli.iocbq_lookup) {
7154 /* Initialize and populate the iocb list per host */
7155 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7156 "2821 initialize iocb list %d total %d\n",
7157 phba->cfg_iocb_cnt, cnt);
7158 rc = lpfc_init_iocb_list(phba, cnt);
7160 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7161 "1413 Failed to init iocb list.\n");
7162 goto out_destroy_queue;
7166 if (phba->nvmet_support)
7167 lpfc_nvmet_create_targetport(phba);
7169 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7170 /* Post initial buffers to all RQs created */
7171 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7172 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7173 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7174 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7175 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7176 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7177 rqbp->buffer_count = 0;
7179 lpfc_post_rq_buffer(
7180 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7181 phba->sli4_hba.nvmet_mrq_data[i],
7182 LPFC_NVMET_RQE_DEF_COUNT, i);
7186 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
7187 /* register the allocated scsi sgl pool to the port */
7188 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
7190 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7191 "0383 Error %d during scsi sgl post "
7193 /* Some Scsi buffers were moved to abort scsi list */
7194 /* A pci function reset will repost them */
7196 goto out_destroy_queue;
7200 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
7201 (phba->nvmet_support == 0)) {
7203 /* register the allocated nvme sgl pool to the port */
7204 rc = lpfc_repost_nvme_sgl_list(phba);
7206 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7207 "6116 Error %d during nvme sgl post "
7209 /* Some NVME buffers were moved to abort nvme list */
7210 /* A pci function reset will repost them */
7212 goto out_destroy_queue;
7216 /* Post the rpi header region to the device. */
7217 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7219 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7220 "0393 Error %d during rpi post operation\n",
7223 goto out_destroy_queue;
7225 lpfc_sli4_node_prep(phba);
7227 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7228 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7230 * The FC Port needs to register FCFI (index 0)
7232 lpfc_reg_fcfi(phba, mboxq);
7233 mboxq->vport = phba->pport;
7234 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7235 if (rc != MBX_SUCCESS)
7236 goto out_unset_queue;
7238 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7239 &mboxq->u.mqe.un.reg_fcfi);
7241 /* We are a NVME Target mode with MRQ > 1 */
7243 /* First register the FCFI */
7244 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7245 mboxq->vport = phba->pport;
7246 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7247 if (rc != MBX_SUCCESS)
7248 goto out_unset_queue;
7250 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7251 &mboxq->u.mqe.un.reg_fcfi_mrq);
7253 /* Next register the MRQs */
7254 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7255 mboxq->vport = phba->pport;
7256 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7257 if (rc != MBX_SUCCESS)
7258 goto out_unset_queue;
7261 /* Check if the port is configured to be disabled */
7262 lpfc_sli_read_link_ste(phba);
7265 /* Arm the CQs and then EQs on device */
7266 lpfc_sli4_arm_cqeq_intr(phba);
7268 /* Indicate device interrupt mode */
7269 phba->sli4_hba.intr_enable = 1;
7271 /* Allow asynchronous mailbox command to go through */
7272 spin_lock_irq(&phba->hbalock);
7273 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7274 spin_unlock_irq(&phba->hbalock);
7276 /* Post receive buffers to the device */
7277 lpfc_sli4_rb_setup(phba);
7279 /* Reset HBA FCF states after HBA reset */
7280 phba->fcf.fcf_flag = 0;
7281 phba->fcf.current_rec.flag = 0;
7283 /* Start the ELS watchdog timer */
7284 mod_timer(&vport->els_tmofunc,
7285 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7287 /* Start heart beat timer */
7288 mod_timer(&phba->hb_tmofunc,
7289 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7290 phba->hb_outstanding = 0;
7291 phba->last_completion_time = jiffies;
7293 /* Start error attention (ERATT) polling timer */
7294 mod_timer(&phba->eratt_poll,
7295 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7297 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7298 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7299 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7301 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7302 "2829 This device supports "
7303 "Advanced Error Reporting (AER)\n");
7304 spin_lock_irq(&phba->hbalock);
7305 phba->hba_flag |= HBA_AER_ENABLED;
7306 spin_unlock_irq(&phba->hbalock);
7308 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7309 "2830 This device does not support "
7310 "Advanced Error Reporting (AER)\n");
7311 phba->cfg_aer_support = 0;
7317 * The port is ready, set the host's link state to LINK_DOWN
7318 * in preparation for link interrupts.
7320 spin_lock_irq(&phba->hbalock);
7321 phba->link_state = LPFC_LINK_DOWN;
7322 spin_unlock_irq(&phba->hbalock);
7323 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7324 (phba->hba_flag & LINK_DISABLED)) {
7325 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7326 "3103 Adapter Link is disabled.\n");
7327 lpfc_down_link(phba, mboxq);
7328 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7329 if (rc != MBX_SUCCESS) {
7330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7331 "3104 Adapter failed to issue "
7332 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7333 goto out_unset_queue;
7335 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7336 /* don't perform init_link on SLI4 FC port loopback test */
7337 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7338 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7340 goto out_unset_queue;
7343 mempool_free(mboxq, phba->mbox_mem_pool);
7346 /* Unset all the queues set up in this routine when error out */
7347 lpfc_sli4_queue_unset(phba);
7349 lpfc_free_iocb_list(phba);
7350 lpfc_sli4_queue_destroy(phba);
7352 lpfc_stop_hba_timers(phba);
7354 mempool_free(mboxq, phba->mbox_mem_pool);
7359 * lpfc_mbox_timeout - Timeout call back function for mbox timer
7360 * @ptr: context object - pointer to hba structure.
7362 * This is the callback function for mailbox timer. The mailbox
7363 * timer is armed when a new mailbox command is issued and the timer
7364 * is deleted when the mailbox complete. The function is called by
7365 * the kernel timer code when a mailbox does not complete within
7366 * expected time. This function wakes up the worker thread to
7367 * process the mailbox timeout and returns. All the processing is
7368 * done by the worker thread function lpfc_mbox_timeout_handler.
7371 lpfc_mbox_timeout(struct timer_list *t)
7373 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
7374 unsigned long iflag;
7375 uint32_t tmo_posted;
7377 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7378 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7380 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7381 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7384 lpfc_worker_wake_up(phba);
7389 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7391 * @phba: Pointer to HBA context object.
7393 * This function checks if any mailbox completions are present on the mailbox
7397 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7401 struct lpfc_queue *mcq;
7402 struct lpfc_mcqe *mcqe;
7403 bool pending_completions = false;
7406 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7409 /* Check for completions on mailbox completion queue */
7411 mcq = phba->sli4_hba.mbx_cq;
7412 idx = mcq->hba_index;
7413 qe_valid = mcq->qe_valid;
7414 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe) == qe_valid) {
7415 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
7416 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7417 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7418 pending_completions = true;
7421 idx = (idx + 1) % mcq->entry_count;
7422 if (mcq->hba_index == idx)
7425 /* if the index wrapped around, toggle the valid bit */
7426 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7427 qe_valid = (qe_valid) ? 0 : 1;
7429 return pending_completions;
7434 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7436 * @phba: Pointer to HBA context object.
7438 * For sli4, it is possible to miss an interrupt. As such mbox completions
7439 * maybe missed causing erroneous mailbox timeouts to occur. This function
7440 * checks to see if mbox completions are on the mailbox completion queue
7441 * and will process all the completions associated with the eq for the
7442 * mailbox completion queue.
7445 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7447 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
7449 struct lpfc_queue *fpeq = NULL;
7450 struct lpfc_eqe *eqe;
7453 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7456 /* Find the eq associated with the mcq */
7458 if (sli4_hba->hba_eq)
7459 for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
7460 if (sli4_hba->hba_eq[eqidx]->queue_id ==
7461 sli4_hba->mbx_cq->assoc_qid) {
7462 fpeq = sli4_hba->hba_eq[eqidx];
7468 /* Turn off interrupts from this EQ */
7470 sli4_hba->sli4_eq_clr_intr(fpeq);
7472 /* Check to see if a mbox completion is pending */
7474 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7477 * If a mbox completion is pending, process all the events on EQ
7478 * associated with the mbox completion queue (this could include
7479 * mailbox commands, async events, els commands, receive queue data
7484 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
7485 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
7486 fpeq->EQ_processed++;
7489 /* Always clear and re-arm the EQ */
7491 sli4_hba->sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
7493 return mbox_pending;
7498 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7499 * @phba: Pointer to HBA context object.
7501 * This function is called from worker thread when a mailbox command times out.
7502 * The caller is not required to hold any locks. This function will reset the
7503 * HBA and recover all the pending commands.
7506 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7508 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
7509 MAILBOX_t *mb = NULL;
7511 struct lpfc_sli *psli = &phba->sli;
7513 /* If the mailbox completed, process the completion and return */
7514 if (lpfc_sli4_process_missed_mbox_completions(phba))
7519 /* Check the pmbox pointer first. There is a race condition
7520 * between the mbox timeout handler getting executed in the
7521 * worklist and the mailbox actually completing. When this
7522 * race condition occurs, the mbox_active will be NULL.
7524 spin_lock_irq(&phba->hbalock);
7525 if (pmbox == NULL) {
7526 lpfc_printf_log(phba, KERN_WARNING,
7528 "0353 Active Mailbox cleared - mailbox timeout "
7530 spin_unlock_irq(&phba->hbalock);
7534 /* Mbox cmd <mbxCommand> timeout */
7535 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7536 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
7538 phba->pport->port_state,
7540 phba->sli.mbox_active);
7541 spin_unlock_irq(&phba->hbalock);
7543 /* Setting state unknown so lpfc_sli_abort_iocb_ring
7544 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
7545 * it to fail all outstanding SCSI IO.
7547 spin_lock_irq(&phba->pport->work_port_lock);
7548 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7549 spin_unlock_irq(&phba->pport->work_port_lock);
7550 spin_lock_irq(&phba->hbalock);
7551 phba->link_state = LPFC_LINK_UNKNOWN;
7552 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7553 spin_unlock_irq(&phba->hbalock);
7555 lpfc_sli_abort_fcp_rings(phba);
7557 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7558 "0345 Resetting board due to mailbox timeout\n");
7560 /* Reset the HBA device */
7561 lpfc_reset_hba(phba);
7565 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
7566 * @phba: Pointer to HBA context object.
7567 * @pmbox: Pointer to mailbox object.
7568 * @flag: Flag indicating how the mailbox need to be processed.
7570 * This function is called by discovery code and HBA management code
7571 * to submit a mailbox command to firmware with SLI-3 interface spec. This
7572 * function gets the hbalock to protect the data structures.
7573 * The mailbox command can be submitted in polling mode, in which case
7574 * this function will wait in a polling loop for the completion of the
7576 * If the mailbox is submitted in no_wait mode (not polling) the
7577 * function will submit the command and returns immediately without waiting
7578 * for the mailbox completion. The no_wait is supported only when HBA
7579 * is in SLI2/SLI3 mode - interrupts are enabled.
7580 * The SLI interface allows only one mailbox pending at a time. If the
7581 * mailbox is issued in polling mode and there is already a mailbox
7582 * pending, then the function will return an error. If the mailbox is issued
7583 * in NO_WAIT mode and there is a mailbox pending already, the function
7584 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
7585 * The sli layer owns the mailbox object until the completion of mailbox
7586 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
7587 * return codes the caller owns the mailbox command after the return of
7591 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
7595 struct lpfc_sli *psli = &phba->sli;
7596 uint32_t status, evtctr;
7597 uint32_t ha_copy, hc_copy;
7599 unsigned long timeout;
7600 unsigned long drvr_flag = 0;
7601 uint32_t word0, ldata;
7602 void __iomem *to_slim;
7603 int processing_queue = 0;
7605 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7607 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7608 /* processing mbox queue from intr_handler */
7609 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7610 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7613 processing_queue = 1;
7614 pmbox = lpfc_mbox_get(phba);
7616 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7621 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
7622 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
7624 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7625 lpfc_printf_log(phba, KERN_ERR,
7626 LOG_MBOX | LOG_VPORT,
7627 "1806 Mbox x%x failed. No vport\n",
7628 pmbox->u.mb.mbxCommand);
7630 goto out_not_finished;
7634 /* If the PCI channel is in offline state, do not post mbox. */
7635 if (unlikely(pci_channel_offline(phba->pcidev))) {
7636 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7637 goto out_not_finished;
7640 /* If HBA has a deferred error attention, fail the iocb. */
7641 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
7642 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7643 goto out_not_finished;
7649 status = MBX_SUCCESS;
7651 if (phba->link_state == LPFC_HBA_ERROR) {
7652 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7654 /* Mbox command <mbxCommand> cannot issue */
7655 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7656 "(%d):0311 Mailbox command x%x cannot "
7657 "issue Data: x%x x%x\n",
7658 pmbox->vport ? pmbox->vport->vpi : 0,
7659 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7660 goto out_not_finished;
7663 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
7664 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
7665 !(hc_copy & HC_MBINT_ENA)) {
7666 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7667 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7668 "(%d):2528 Mailbox command x%x cannot "
7669 "issue Data: x%x x%x\n",
7670 pmbox->vport ? pmbox->vport->vpi : 0,
7671 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7672 goto out_not_finished;
7676 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7677 /* Polling for a mbox command when another one is already active
7678 * is not allowed in SLI. Also, the driver must have established
7679 * SLI2 mode to queue and process multiple mbox commands.
7682 if (flag & MBX_POLL) {
7683 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7685 /* Mbox command <mbxCommand> cannot issue */
7686 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7687 "(%d):2529 Mailbox command x%x "
7688 "cannot issue Data: x%x x%x\n",
7689 pmbox->vport ? pmbox->vport->vpi : 0,
7690 pmbox->u.mb.mbxCommand,
7691 psli->sli_flag, flag);
7692 goto out_not_finished;
7695 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
7696 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7697 /* Mbox command <mbxCommand> cannot issue */
7698 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7699 "(%d):2530 Mailbox command x%x "
7700 "cannot issue Data: x%x x%x\n",
7701 pmbox->vport ? pmbox->vport->vpi : 0,
7702 pmbox->u.mb.mbxCommand,
7703 psli->sli_flag, flag);
7704 goto out_not_finished;
7707 /* Another mailbox command is still being processed, queue this
7708 * command to be processed later.
7710 lpfc_mbox_put(phba, pmbox);
7712 /* Mbox cmd issue - BUSY */
7713 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7714 "(%d):0308 Mbox cmd issue - BUSY Data: "
7715 "x%x x%x x%x x%x\n",
7716 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
7718 phba->pport ? phba->pport->port_state : 0xff,
7719 psli->sli_flag, flag);
7721 psli->slistat.mbox_busy++;
7722 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7725 lpfc_debugfs_disc_trc(pmbox->vport,
7726 LPFC_DISC_TRC_MBOX_VPORT,
7727 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
7728 (uint32_t)mbx->mbxCommand,
7729 mbx->un.varWords[0], mbx->un.varWords[1]);
7732 lpfc_debugfs_disc_trc(phba->pport,
7734 "MBOX Bsy: cmd:x%x mb:x%x x%x",
7735 (uint32_t)mbx->mbxCommand,
7736 mbx->un.varWords[0], mbx->un.varWords[1]);
7742 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7744 /* If we are not polling, we MUST be in SLI2 mode */
7745 if (flag != MBX_POLL) {
7746 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
7747 (mbx->mbxCommand != MBX_KILL_BOARD)) {
7748 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7749 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7750 /* Mbox command <mbxCommand> cannot issue */
7751 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7752 "(%d):2531 Mailbox command x%x "
7753 "cannot issue Data: x%x x%x\n",
7754 pmbox->vport ? pmbox->vport->vpi : 0,
7755 pmbox->u.mb.mbxCommand,
7756 psli->sli_flag, flag);
7757 goto out_not_finished;
7759 /* timeout active mbox command */
7760 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7762 mod_timer(&psli->mbox_tmo, jiffies + timeout);
7765 /* Mailbox cmd <cmd> issue */
7766 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7767 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
7769 pmbox->vport ? pmbox->vport->vpi : 0,
7771 phba->pport ? phba->pport->port_state : 0xff,
7772 psli->sli_flag, flag);
7774 if (mbx->mbxCommand != MBX_HEARTBEAT) {
7776 lpfc_debugfs_disc_trc(pmbox->vport,
7777 LPFC_DISC_TRC_MBOX_VPORT,
7778 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7779 (uint32_t)mbx->mbxCommand,
7780 mbx->un.varWords[0], mbx->un.varWords[1]);
7783 lpfc_debugfs_disc_trc(phba->pport,
7785 "MBOX Send: cmd:x%x mb:x%x x%x",
7786 (uint32_t)mbx->mbxCommand,
7787 mbx->un.varWords[0], mbx->un.varWords[1]);
7791 psli->slistat.mbox_cmd++;
7792 evtctr = psli->slistat.mbox_event;
7794 /* next set own bit for the adapter and copy over command word */
7795 mbx->mbxOwner = OWN_CHIP;
7797 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7798 /* Populate mbox extension offset word. */
7799 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
7800 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7801 = (uint8_t *)phba->mbox_ext
7802 - (uint8_t *)phba->mbox;
7805 /* Copy the mailbox extension data */
7806 if (pmbox->in_ext_byte_len && pmbox->context2) {
7807 lpfc_sli_pcimem_bcopy(pmbox->context2,
7808 (uint8_t *)phba->mbox_ext,
7809 pmbox->in_ext_byte_len);
7811 /* Copy command data to host SLIM area */
7812 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
7814 /* Populate mbox extension offset word. */
7815 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
7816 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7817 = MAILBOX_HBA_EXT_OFFSET;
7819 /* Copy the mailbox extension data */
7820 if (pmbox->in_ext_byte_len && pmbox->context2)
7821 lpfc_memcpy_to_slim(phba->MBslimaddr +
7822 MAILBOX_HBA_EXT_OFFSET,
7823 pmbox->context2, pmbox->in_ext_byte_len);
7825 if (mbx->mbxCommand == MBX_CONFIG_PORT)
7826 /* copy command data into host mbox for cmpl */
7827 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
7830 /* First copy mbox command data to HBA SLIM, skip past first
7832 to_slim = phba->MBslimaddr + sizeof (uint32_t);
7833 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
7834 MAILBOX_CMD_SIZE - sizeof (uint32_t));
7836 /* Next copy over first word, with mbxOwner set */
7837 ldata = *((uint32_t *)mbx);
7838 to_slim = phba->MBslimaddr;
7839 writel(ldata, to_slim);
7840 readl(to_slim); /* flush */
7842 if (mbx->mbxCommand == MBX_CONFIG_PORT)
7843 /* switch over to host mailbox */
7844 psli->sli_flag |= LPFC_SLI_ACTIVE;
7851 /* Set up reference to mailbox command */
7852 psli->mbox_active = pmbox;
7853 /* Interrupt board to do it */
7854 writel(CA_MBATT, phba->CAregaddr);
7855 readl(phba->CAregaddr); /* flush */
7856 /* Don't wait for it to finish, just return */
7860 /* Set up null reference to mailbox command */
7861 psli->mbox_active = NULL;
7862 /* Interrupt board to do it */
7863 writel(CA_MBATT, phba->CAregaddr);
7864 readl(phba->CAregaddr); /* flush */
7866 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7867 /* First read mbox status word */
7868 word0 = *((uint32_t *)phba->mbox);
7869 word0 = le32_to_cpu(word0);
7871 /* First read mbox status word */
7872 if (lpfc_readl(phba->MBslimaddr, &word0)) {
7873 spin_unlock_irqrestore(&phba->hbalock,
7875 goto out_not_finished;
7879 /* Read the HBA Host Attention Register */
7880 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7881 spin_unlock_irqrestore(&phba->hbalock,
7883 goto out_not_finished;
7885 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7888 /* Wait for command to complete */
7889 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
7890 (!(ha_copy & HA_MBATT) &&
7891 (phba->link_state > LPFC_WARM_START))) {
7892 if (time_after(jiffies, timeout)) {
7893 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7894 spin_unlock_irqrestore(&phba->hbalock,
7896 goto out_not_finished;
7899 /* Check if we took a mbox interrupt while we were
7901 if (((word0 & OWN_CHIP) != OWN_CHIP)
7902 && (evtctr != psli->slistat.mbox_event))
7906 spin_unlock_irqrestore(&phba->hbalock,
7909 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7912 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7913 /* First copy command data */
7914 word0 = *((uint32_t *)phba->mbox);
7915 word0 = le32_to_cpu(word0);
7916 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7919 /* Check real SLIM for any errors */
7920 slimword0 = readl(phba->MBslimaddr);
7921 slimmb = (MAILBOX_t *) & slimword0;
7922 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
7923 && slimmb->mbxStatus) {
7930 /* First copy command data */
7931 word0 = readl(phba->MBslimaddr);
7933 /* Read the HBA Host Attention Register */
7934 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7935 spin_unlock_irqrestore(&phba->hbalock,
7937 goto out_not_finished;
7941 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7942 /* copy results back to user */
7943 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
7945 /* Copy the mailbox extension data */
7946 if (pmbox->out_ext_byte_len && pmbox->context2) {
7947 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
7949 pmbox->out_ext_byte_len);
7952 /* First copy command data */
7953 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
7955 /* Copy the mailbox extension data */
7956 if (pmbox->out_ext_byte_len && pmbox->context2) {
7957 lpfc_memcpy_from_slim(pmbox->context2,
7959 MAILBOX_HBA_EXT_OFFSET,
7960 pmbox->out_ext_byte_len);
7964 writel(HA_MBATT, phba->HAregaddr);
7965 readl(phba->HAregaddr); /* flush */
7967 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7968 status = mbx->mbxStatus;
7971 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7975 if (processing_queue) {
7976 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
7977 lpfc_mbox_cmpl_put(phba, pmbox);
7979 return MBX_NOT_FINISHED;
7983 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
7984 * @phba: Pointer to HBA context object.
7986 * The function blocks the posting of SLI4 asynchronous mailbox commands from
7987 * the driver internal pending mailbox queue. It will then try to wait out the
7988 * possible outstanding mailbox command before return.
7991 * 0 - the outstanding mailbox command completed; otherwise, the wait for
7992 * the outstanding mailbox command timed out.
7995 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
7997 struct lpfc_sli *psli = &phba->sli;
7999 unsigned long timeout = 0;
8001 /* Mark the asynchronous mailbox command posting as blocked */
8002 spin_lock_irq(&phba->hbalock);
8003 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8004 /* Determine how long we might wait for the active mailbox
8005 * command to be gracefully completed by firmware.
8007 if (phba->sli.mbox_active)
8008 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8009 phba->sli.mbox_active) *
8011 spin_unlock_irq(&phba->hbalock);
8013 /* Make sure the mailbox is really active */
8015 lpfc_sli4_process_missed_mbox_completions(phba);
8017 /* Wait for the outstnading mailbox command to complete */
8018 while (phba->sli.mbox_active) {
8019 /* Check active mailbox complete status every 2ms */
8021 if (time_after(jiffies, timeout)) {
8022 /* Timeout, marked the outstanding cmd not complete */
8028 /* Can not cleanly block async mailbox command, fails it */
8030 spin_lock_irq(&phba->hbalock);
8031 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8032 spin_unlock_irq(&phba->hbalock);
8038 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8039 * @phba: Pointer to HBA context object.
8041 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8042 * commands from the driver internal pending mailbox queue. It makes sure
8043 * that there is no outstanding mailbox command before resuming posting
8044 * asynchronous mailbox commands. If, for any reason, there is outstanding
8045 * mailbox command, it will try to wait it out before resuming asynchronous
8046 * mailbox command posting.
8049 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8051 struct lpfc_sli *psli = &phba->sli;
8053 spin_lock_irq(&phba->hbalock);
8054 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8055 /* Asynchronous mailbox posting is not blocked, do nothing */
8056 spin_unlock_irq(&phba->hbalock);
8060 /* Outstanding synchronous mailbox command is guaranteed to be done,
8061 * successful or timeout, after timing-out the outstanding mailbox
8062 * command shall always be removed, so just unblock posting async
8063 * mailbox command and resume
8065 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8066 spin_unlock_irq(&phba->hbalock);
8068 /* wake up worker thread to post asynchronlous mailbox command */
8069 lpfc_worker_wake_up(phba);
8073 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8074 * @phba: Pointer to HBA context object.
8075 * @mboxq: Pointer to mailbox object.
8077 * The function waits for the bootstrap mailbox register ready bit from
8078 * port for twice the regular mailbox command timeout value.
8080 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8081 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8084 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8087 unsigned long timeout;
8088 struct lpfc_register bmbx_reg;
8090 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8094 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8095 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8099 if (time_after(jiffies, timeout))
8100 return MBXERR_ERROR;
8101 } while (!db_ready);
8107 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8108 * @phba: Pointer to HBA context object.
8109 * @mboxq: Pointer to mailbox object.
8111 * The function posts a mailbox to the port. The mailbox is expected
8112 * to be comletely filled in and ready for the port to operate on it.
8113 * This routine executes a synchronous completion operation on the
8114 * mailbox by polling for its completion.
8116 * The caller must not be holding any locks when calling this routine.
8119 * MBX_SUCCESS - mailbox posted successfully
8120 * Any of the MBX error values.
8123 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8125 int rc = MBX_SUCCESS;
8126 unsigned long iflag;
8127 uint32_t mcqe_status;
8129 struct lpfc_sli *psli = &phba->sli;
8130 struct lpfc_mqe *mb = &mboxq->u.mqe;
8131 struct lpfc_bmbx_create *mbox_rgn;
8132 struct dma_address *dma_address;
8135 * Only one mailbox can be active to the bootstrap mailbox region
8136 * at a time and there is no queueing provided.
8138 spin_lock_irqsave(&phba->hbalock, iflag);
8139 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8140 spin_unlock_irqrestore(&phba->hbalock, iflag);
8141 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8142 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8143 "cannot issue Data: x%x x%x\n",
8144 mboxq->vport ? mboxq->vport->vpi : 0,
8145 mboxq->u.mb.mbxCommand,
8146 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8147 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8148 psli->sli_flag, MBX_POLL);
8149 return MBXERR_ERROR;
8151 /* The server grabs the token and owns it until release */
8152 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8153 phba->sli.mbox_active = mboxq;
8154 spin_unlock_irqrestore(&phba->hbalock, iflag);
8156 /* wait for bootstrap mbox register for readyness */
8157 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8162 * Initialize the bootstrap memory region to avoid stale data areas
8163 * in the mailbox post. Then copy the caller's mailbox contents to
8164 * the bmbx mailbox region.
8166 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8167 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8168 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8169 sizeof(struct lpfc_mqe));
8171 /* Post the high mailbox dma address to the port and wait for ready. */
8172 dma_address = &phba->sli4_hba.bmbx.dma_address;
8173 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8175 /* wait for bootstrap mbox register for hi-address write done */
8176 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8180 /* Post the low mailbox dma address to the port. */
8181 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8183 /* wait for bootstrap mbox register for low address write done */
8184 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8189 * Read the CQ to ensure the mailbox has completed.
8190 * If so, update the mailbox status so that the upper layers
8191 * can complete the request normally.
8193 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8194 sizeof(struct lpfc_mqe));
8195 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8196 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8197 sizeof(struct lpfc_mcqe));
8198 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8200 * When the CQE status indicates a failure and the mailbox status
8201 * indicates success then copy the CQE status into the mailbox status
8202 * (and prefix it with x4000).
8204 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8205 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8206 bf_set(lpfc_mqe_status, mb,
8207 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8210 lpfc_sli4_swap_str(phba, mboxq);
8212 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8213 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8214 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8215 " x%x x%x CQ: x%x x%x x%x x%x\n",
8216 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8217 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8218 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8219 bf_get(lpfc_mqe_status, mb),
8220 mb->un.mb_words[0], mb->un.mb_words[1],
8221 mb->un.mb_words[2], mb->un.mb_words[3],
8222 mb->un.mb_words[4], mb->un.mb_words[5],
8223 mb->un.mb_words[6], mb->un.mb_words[7],
8224 mb->un.mb_words[8], mb->un.mb_words[9],
8225 mb->un.mb_words[10], mb->un.mb_words[11],
8226 mb->un.mb_words[12], mboxq->mcqe.word0,
8227 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8228 mboxq->mcqe.trailer);
8230 /* We are holding the token, no needed for lock when release */
8231 spin_lock_irqsave(&phba->hbalock, iflag);
8232 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8233 phba->sli.mbox_active = NULL;
8234 spin_unlock_irqrestore(&phba->hbalock, iflag);
8239 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8240 * @phba: Pointer to HBA context object.
8241 * @pmbox: Pointer to mailbox object.
8242 * @flag: Flag indicating how the mailbox need to be processed.
8244 * This function is called by discovery code and HBA management code to submit
8245 * a mailbox command to firmware with SLI-4 interface spec.
8247 * Return codes the caller owns the mailbox command after the return of the
8251 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8254 struct lpfc_sli *psli = &phba->sli;
8255 unsigned long iflags;
8258 /* dump from issue mailbox command if setup */
8259 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8261 rc = lpfc_mbox_dev_check(phba);
8263 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8264 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8265 "cannot issue Data: x%x x%x\n",
8266 mboxq->vport ? mboxq->vport->vpi : 0,
8267 mboxq->u.mb.mbxCommand,
8268 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8269 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8270 psli->sli_flag, flag);
8271 goto out_not_finished;
8274 /* Detect polling mode and jump to a handler */
8275 if (!phba->sli4_hba.intr_enable) {
8276 if (flag == MBX_POLL)
8277 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8280 if (rc != MBX_SUCCESS)
8281 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8282 "(%d):2541 Mailbox command x%x "
8283 "(x%x/x%x) failure: "
8284 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8286 mboxq->vport ? mboxq->vport->vpi : 0,
8287 mboxq->u.mb.mbxCommand,
8288 lpfc_sli_config_mbox_subsys_get(phba,
8290 lpfc_sli_config_mbox_opcode_get(phba,
8292 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8293 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8294 bf_get(lpfc_mcqe_ext_status,
8296 psli->sli_flag, flag);
8298 } else if (flag == MBX_POLL) {
8299 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8300 "(%d):2542 Try to issue mailbox command "
8301 "x%x (x%x/x%x) synchronously ahead of async "
8302 "mailbox command queue: x%x x%x\n",
8303 mboxq->vport ? mboxq->vport->vpi : 0,
8304 mboxq->u.mb.mbxCommand,
8305 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8306 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8307 psli->sli_flag, flag);
8308 /* Try to block the asynchronous mailbox posting */
8309 rc = lpfc_sli4_async_mbox_block(phba);
8311 /* Successfully blocked, now issue sync mbox cmd */
8312 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8313 if (rc != MBX_SUCCESS)
8314 lpfc_printf_log(phba, KERN_WARNING,
8316 "(%d):2597 Sync Mailbox command "
8317 "x%x (x%x/x%x) failure: "
8318 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8320 mboxq->vport ? mboxq->vport->vpi : 0,
8321 mboxq->u.mb.mbxCommand,
8322 lpfc_sli_config_mbox_subsys_get(phba,
8324 lpfc_sli_config_mbox_opcode_get(phba,
8326 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8327 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8328 bf_get(lpfc_mcqe_ext_status,
8330 psli->sli_flag, flag);
8331 /* Unblock the async mailbox posting afterward */
8332 lpfc_sli4_async_mbox_unblock(phba);
8337 /* Now, interrupt mode asynchrous mailbox command */
8338 rc = lpfc_mbox_cmd_check(phba, mboxq);
8340 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8341 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8342 "cannot issue Data: x%x x%x\n",
8343 mboxq->vport ? mboxq->vport->vpi : 0,
8344 mboxq->u.mb.mbxCommand,
8345 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8346 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8347 psli->sli_flag, flag);
8348 goto out_not_finished;
8351 /* Put the mailbox command to the driver internal FIFO */
8352 psli->slistat.mbox_busy++;
8353 spin_lock_irqsave(&phba->hbalock, iflags);
8354 lpfc_mbox_put(phba, mboxq);
8355 spin_unlock_irqrestore(&phba->hbalock, iflags);
8356 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8357 "(%d):0354 Mbox cmd issue - Enqueue Data: "
8358 "x%x (x%x/x%x) x%x x%x x%x\n",
8359 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8360 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8361 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8362 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8363 phba->pport->port_state,
8364 psli->sli_flag, MBX_NOWAIT);
8365 /* Wake up worker thread to transport mailbox command from head */
8366 lpfc_worker_wake_up(phba);
8371 return MBX_NOT_FINISHED;
8375 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8376 * @phba: Pointer to HBA context object.
8378 * This function is called by worker thread to send a mailbox command to
8379 * SLI4 HBA firmware.
8383 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8385 struct lpfc_sli *psli = &phba->sli;
8386 LPFC_MBOXQ_t *mboxq;
8387 int rc = MBX_SUCCESS;
8388 unsigned long iflags;
8389 struct lpfc_mqe *mqe;
8392 /* Check interrupt mode before post async mailbox command */
8393 if (unlikely(!phba->sli4_hba.intr_enable))
8394 return MBX_NOT_FINISHED;
8396 /* Check for mailbox command service token */
8397 spin_lock_irqsave(&phba->hbalock, iflags);
8398 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8399 spin_unlock_irqrestore(&phba->hbalock, iflags);
8400 return MBX_NOT_FINISHED;
8402 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8403 spin_unlock_irqrestore(&phba->hbalock, iflags);
8404 return MBX_NOT_FINISHED;
8406 if (unlikely(phba->sli.mbox_active)) {
8407 spin_unlock_irqrestore(&phba->hbalock, iflags);
8408 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8409 "0384 There is pending active mailbox cmd\n");
8410 return MBX_NOT_FINISHED;
8412 /* Take the mailbox command service token */
8413 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8415 /* Get the next mailbox command from head of queue */
8416 mboxq = lpfc_mbox_get(phba);
8418 /* If no more mailbox command waiting for post, we're done */
8420 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8421 spin_unlock_irqrestore(&phba->hbalock, iflags);
8424 phba->sli.mbox_active = mboxq;
8425 spin_unlock_irqrestore(&phba->hbalock, iflags);
8427 /* Check device readiness for posting mailbox command */
8428 rc = lpfc_mbox_dev_check(phba);
8430 /* Driver clean routine will clean up pending mailbox */
8431 goto out_not_finished;
8433 /* Prepare the mbox command to be posted */
8434 mqe = &mboxq->u.mqe;
8435 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8437 /* Start timer for the mbox_tmo and log some mailbox post messages */
8438 mod_timer(&psli->mbox_tmo, (jiffies +
8439 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
8441 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8442 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
8444 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8445 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8446 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8447 phba->pport->port_state, psli->sli_flag);
8449 if (mbx_cmnd != MBX_HEARTBEAT) {
8451 lpfc_debugfs_disc_trc(mboxq->vport,
8452 LPFC_DISC_TRC_MBOX_VPORT,
8453 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8454 mbx_cmnd, mqe->un.mb_words[0],
8455 mqe->un.mb_words[1]);
8457 lpfc_debugfs_disc_trc(phba->pport,
8459 "MBOX Send: cmd:x%x mb:x%x x%x",
8460 mbx_cmnd, mqe->un.mb_words[0],
8461 mqe->un.mb_words[1]);
8464 psli->slistat.mbox_cmd++;
8466 /* Post the mailbox command to the port */
8467 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8468 if (rc != MBX_SUCCESS) {
8469 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8470 "(%d):2533 Mailbox command x%x (x%x/x%x) "
8471 "cannot issue Data: x%x x%x\n",
8472 mboxq->vport ? mboxq->vport->vpi : 0,
8473 mboxq->u.mb.mbxCommand,
8474 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8475 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8476 psli->sli_flag, MBX_NOWAIT);
8477 goto out_not_finished;
8483 spin_lock_irqsave(&phba->hbalock, iflags);
8484 if (phba->sli.mbox_active) {
8485 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8486 __lpfc_mbox_cmpl_put(phba, mboxq);
8487 /* Release the token */
8488 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8489 phba->sli.mbox_active = NULL;
8491 spin_unlock_irqrestore(&phba->hbalock, iflags);
8493 return MBX_NOT_FINISHED;
8497 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8498 * @phba: Pointer to HBA context object.
8499 * @pmbox: Pointer to mailbox object.
8500 * @flag: Flag indicating how the mailbox need to be processed.
8502 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8503 * the API jump table function pointer from the lpfc_hba struct.
8505 * Return codes the caller owns the mailbox command after the return of the
8509 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8511 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8515 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
8516 * @phba: The hba struct for which this call is being executed.
8517 * @dev_grp: The HBA PCI-Device group number.
8519 * This routine sets up the mbox interface API function jump table in @phba
8521 * Returns: 0 - success, -ENODEV - failure.
8524 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8528 case LPFC_PCI_DEV_LP:
8529 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8530 phba->lpfc_sli_handle_slow_ring_event =
8531 lpfc_sli_handle_slow_ring_event_s3;
8532 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8533 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8534 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8536 case LPFC_PCI_DEV_OC:
8537 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8538 phba->lpfc_sli_handle_slow_ring_event =
8539 lpfc_sli_handle_slow_ring_event_s4;
8540 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8541 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8542 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8545 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8546 "1420 Invalid HBA PCI-device group: 0x%x\n",
8555 * __lpfc_sli_ringtx_put - Add an iocb to the txq
8556 * @phba: Pointer to HBA context object.
8557 * @pring: Pointer to driver SLI ring object.
8558 * @piocb: Pointer to address of newly added command iocb.
8560 * This function is called with hbalock held to add a command
8561 * iocb to the txq when SLI layer cannot submit the command iocb
8565 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8566 struct lpfc_iocbq *piocb)
8568 lockdep_assert_held(&phba->hbalock);
8569 /* Insert the caller's iocb in the txq tail for later processing. */
8570 list_add_tail(&piocb->list, &pring->txq);
8574 * lpfc_sli_next_iocb - Get the next iocb in the txq
8575 * @phba: Pointer to HBA context object.
8576 * @pring: Pointer to driver SLI ring object.
8577 * @piocb: Pointer to address of newly added command iocb.
8579 * This function is called with hbalock held before a new
8580 * iocb is submitted to the firmware. This function checks
8581 * txq to flush the iocbs in txq to Firmware before
8582 * submitting new iocbs to the Firmware.
8583 * If there are iocbs in the txq which need to be submitted
8584 * to firmware, lpfc_sli_next_iocb returns the first element
8585 * of the txq after dequeuing it from txq.
8586 * If there is no iocb in the txq then the function will return
8587 * *piocb and *piocb is set to NULL. Caller needs to check
8588 * *piocb to find if there are more commands in the txq.
8590 static struct lpfc_iocbq *
8591 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8592 struct lpfc_iocbq **piocb)
8594 struct lpfc_iocbq * nextiocb;
8596 lockdep_assert_held(&phba->hbalock);
8598 nextiocb = lpfc_sli_ringtx_get(phba, pring);
8608 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
8609 * @phba: Pointer to HBA context object.
8610 * @ring_number: SLI ring number to issue iocb on.
8611 * @piocb: Pointer to command iocb.
8612 * @flag: Flag indicating if this command can be put into txq.
8614 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
8615 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
8616 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
8617 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
8618 * this function allows only iocbs for posting buffers. This function finds
8619 * next available slot in the command ring and posts the command to the
8620 * available slot and writes the port attention register to request HBA start
8621 * processing new iocb. If there is no slot available in the ring and
8622 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
8623 * the function returns IOCB_BUSY.
8625 * This function is called with hbalock held. The function will return success
8626 * after it successfully submit the iocb to firmware or after adding to the
8630 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
8631 struct lpfc_iocbq *piocb, uint32_t flag)
8633 struct lpfc_iocbq *nextiocb;
8635 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
8637 lockdep_assert_held(&phba->hbalock);
8639 if (piocb->iocb_cmpl && (!piocb->vport) &&
8640 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
8641 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
8642 lpfc_printf_log(phba, KERN_ERR,
8643 LOG_SLI | LOG_VPORT,
8644 "1807 IOCB x%x failed. No vport\n",
8645 piocb->iocb.ulpCommand);
8651 /* If the PCI channel is in offline state, do not post iocbs. */
8652 if (unlikely(pci_channel_offline(phba->pcidev)))
8655 /* If HBA has a deferred error attention, fail the iocb. */
8656 if (unlikely(phba->hba_flag & DEFER_ERATT))
8660 * We should never get an IOCB if we are in a < LINK_DOWN state
8662 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
8666 * Check to see if we are blocking IOCB processing because of a
8667 * outstanding event.
8669 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
8672 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
8674 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
8675 * can be issued if the link is not up.
8677 switch (piocb->iocb.ulpCommand) {
8678 case CMD_GEN_REQUEST64_CR:
8679 case CMD_GEN_REQUEST64_CX:
8680 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
8681 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
8682 FC_RCTL_DD_UNSOL_CMD) ||
8683 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
8684 MENLO_TRANSPORT_TYPE))
8688 case CMD_QUE_RING_BUF_CN:
8689 case CMD_QUE_RING_BUF64_CN:
8691 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
8692 * completion, iocb_cmpl MUST be 0.
8694 if (piocb->iocb_cmpl)
8695 piocb->iocb_cmpl = NULL;
8697 case CMD_CREATE_XRI_CR:
8698 case CMD_CLOSE_XRI_CN:
8699 case CMD_CLOSE_XRI_CX:
8706 * For FCP commands, we must be in a state where we can process link
8709 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
8710 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
8714 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
8715 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
8716 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
8719 lpfc_sli_update_ring(phba, pring);
8721 lpfc_sli_update_full_ring(phba, pring);
8724 return IOCB_SUCCESS;
8729 pring->stats.iocb_cmd_delay++;
8733 if (!(flag & SLI_IOCB_RET_IOCB)) {
8734 __lpfc_sli_ringtx_put(phba, pring, piocb);
8735 return IOCB_SUCCESS;
8742 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
8743 * @phba: Pointer to HBA context object.
8744 * @piocb: Pointer to command iocb.
8745 * @sglq: Pointer to the scatter gather queue object.
8747 * This routine converts the bpl or bde that is in the IOCB
8748 * to a sgl list for the sli4 hardware. The physical address
8749 * of the bpl/bde is converted back to a virtual address.
8750 * If the IOCB contains a BPL then the list of BDE's is
8751 * converted to sli4_sge's. If the IOCB contains a single
8752 * BDE then it is converted to a single sli_sge.
8753 * The IOCB is still in cpu endianess so the contents of
8754 * the bpl can be used without byte swapping.
8756 * Returns valid XRI = Success, NO_XRI = Failure.
8759 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
8760 struct lpfc_sglq *sglq)
8762 uint16_t xritag = NO_XRI;
8763 struct ulp_bde64 *bpl = NULL;
8764 struct ulp_bde64 bde;
8765 struct sli4_sge *sgl = NULL;
8766 struct lpfc_dmabuf *dmabuf;
8770 uint32_t offset = 0; /* accumulated offset in the sg request list */
8771 int inbound = 0; /* number of sg reply entries inbound from firmware */
8773 if (!piocbq || !sglq)
8776 sgl = (struct sli4_sge *)sglq->sgl;
8777 icmd = &piocbq->iocb;
8778 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
8779 return sglq->sli4_xritag;
8780 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8781 numBdes = icmd->un.genreq64.bdl.bdeSize /
8782 sizeof(struct ulp_bde64);
8783 /* The addrHigh and addrLow fields within the IOCB
8784 * have not been byteswapped yet so there is no
8785 * need to swap them back.
8787 if (piocbq->context3)
8788 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
8792 bpl = (struct ulp_bde64 *)dmabuf->virt;
8796 for (i = 0; i < numBdes; i++) {
8797 /* Should already be byte swapped. */
8798 sgl->addr_hi = bpl->addrHigh;
8799 sgl->addr_lo = bpl->addrLow;
8801 sgl->word2 = le32_to_cpu(sgl->word2);
8802 if ((i+1) == numBdes)
8803 bf_set(lpfc_sli4_sge_last, sgl, 1);
8805 bf_set(lpfc_sli4_sge_last, sgl, 0);
8806 /* swap the size field back to the cpu so we
8807 * can assign it to the sgl.
8809 bde.tus.w = le32_to_cpu(bpl->tus.w);
8810 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
8811 /* The offsets in the sgl need to be accumulated
8812 * separately for the request and reply lists.
8813 * The request is always first, the reply follows.
8815 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
8816 /* add up the reply sg entries */
8817 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
8819 /* first inbound? reset the offset */
8822 bf_set(lpfc_sli4_sge_offset, sgl, offset);
8823 bf_set(lpfc_sli4_sge_type, sgl,
8824 LPFC_SGE_TYPE_DATA);
8825 offset += bde.tus.f.bdeSize;
8827 sgl->word2 = cpu_to_le32(sgl->word2);
8831 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
8832 /* The addrHigh and addrLow fields of the BDE have not
8833 * been byteswapped yet so they need to be swapped
8834 * before putting them in the sgl.
8837 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
8839 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
8840 sgl->word2 = le32_to_cpu(sgl->word2);
8841 bf_set(lpfc_sli4_sge_last, sgl, 1);
8842 sgl->word2 = cpu_to_le32(sgl->word2);
8844 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
8846 return sglq->sli4_xritag;
8850 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
8851 * @phba: Pointer to HBA context object.
8852 * @piocb: Pointer to command iocb.
8853 * @wqe: Pointer to the work queue entry.
8855 * This routine converts the iocb command to its Work Queue Entry
8856 * equivalent. The wqe pointer should not have any fields set when
8857 * this routine is called because it will memcpy over them.
8858 * This routine does not set the CQ_ID or the WQEC bits in the
8861 * Returns: 0 = Success, IOCB_ERROR = Failure.
8864 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8865 union lpfc_wqe *wqe)
8867 uint32_t xmit_len = 0, total_len = 0;
8871 uint8_t command_type = ELS_COMMAND_NON_FIP;
8874 uint16_t abrt_iotag;
8875 struct lpfc_iocbq *abrtiocbq;
8876 struct ulp_bde64 *bpl = NULL;
8877 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
8879 struct ulp_bde64 bde;
8880 struct lpfc_nodelist *ndlp;
8884 fip = phba->hba_flag & HBA_FIP_SUPPORT;
8885 /* The fcp commands will set command type */
8886 if (iocbq->iocb_flag & LPFC_IO_FCP)
8887 command_type = FCP_COMMAND;
8888 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
8889 command_type = ELS_COMMAND_FIP;
8891 command_type = ELS_COMMAND_NON_FIP;
8893 if (phba->fcp_embed_io)
8894 memset(wqe, 0, sizeof(union lpfc_wqe128));
8895 /* Some of the fields are in the right position already */
8896 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
8897 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
8898 /* The ct field has moved so reset */
8899 wqe->generic.wqe_com.word7 = 0;
8900 wqe->generic.wqe_com.word10 = 0;
8903 abort_tag = (uint32_t) iocbq->iotag;
8904 xritag = iocbq->sli4_xritag;
8905 /* words0-2 bpl convert bde */
8906 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8907 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8908 sizeof(struct ulp_bde64);
8909 bpl = (struct ulp_bde64 *)
8910 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
8914 /* Should already be byte swapped. */
8915 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
8916 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
8917 /* swap the size field back to the cpu so we
8918 * can assign it to the sgl.
8920 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
8921 xmit_len = wqe->generic.bde.tus.f.bdeSize;
8923 for (i = 0; i < numBdes; i++) {
8924 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8925 total_len += bde.tus.f.bdeSize;
8928 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
8930 iocbq->iocb.ulpIoTag = iocbq->iotag;
8931 cmnd = iocbq->iocb.ulpCommand;
8933 switch (iocbq->iocb.ulpCommand) {
8934 case CMD_ELS_REQUEST64_CR:
8935 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
8936 ndlp = iocbq->context_un.ndlp;
8938 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8939 if (!iocbq->iocb.ulpLe) {
8940 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8941 "2007 Only Limited Edition cmd Format"
8942 " supported 0x%x\n",
8943 iocbq->iocb.ulpCommand);
8947 wqe->els_req.payload_len = xmit_len;
8948 /* Els_reguest64 has a TMO */
8949 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
8950 iocbq->iocb.ulpTimeout);
8951 /* Need a VF for word 4 set the vf bit*/
8952 bf_set(els_req64_vf, &wqe->els_req, 0);
8953 /* And a VFID for word 12 */
8954 bf_set(els_req64_vfid, &wqe->els_req, 0);
8955 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8956 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8957 iocbq->iocb.ulpContext);
8958 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
8959 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
8960 /* CCP CCPE PV PRI in word10 were set in the memcpy */
8961 if (command_type == ELS_COMMAND_FIP)
8962 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
8963 >> LPFC_FIP_ELS_ID_SHIFT);
8964 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8965 iocbq->context2)->virt);
8966 if_type = bf_get(lpfc_sli_intf_if_type,
8967 &phba->sli4_hba.sli_intf);
8968 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
8969 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
8970 *pcmd == ELS_CMD_SCR ||
8971 *pcmd == ELS_CMD_FDISC ||
8972 *pcmd == ELS_CMD_LOGO ||
8973 *pcmd == ELS_CMD_PLOGI)) {
8974 bf_set(els_req64_sp, &wqe->els_req, 1);
8975 bf_set(els_req64_sid, &wqe->els_req,
8976 iocbq->vport->fc_myDID);
8977 if ((*pcmd == ELS_CMD_FLOGI) &&
8978 !(phba->fc_topology ==
8979 LPFC_TOPOLOGY_LOOP))
8980 bf_set(els_req64_sid, &wqe->els_req, 0);
8981 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
8982 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8983 phba->vpi_ids[iocbq->vport->vpi]);
8984 } else if (pcmd && iocbq->context1) {
8985 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
8986 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8987 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8990 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
8991 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8992 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
8993 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
8994 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
8995 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
8996 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8997 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
8998 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9000 case CMD_XMIT_SEQUENCE64_CX:
9001 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9002 iocbq->iocb.un.ulpWord[3]);
9003 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9004 iocbq->iocb.unsli3.rcvsli3.ox_id);
9005 /* The entire sequence is transmitted for this IOCB */
9006 xmit_len = total_len;
9007 cmnd = CMD_XMIT_SEQUENCE64_CR;
9008 if (phba->link_flag & LS_LOOPBACK_MODE)
9009 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9010 case CMD_XMIT_SEQUENCE64_CR:
9011 /* word3 iocb=io_tag32 wqe=reserved */
9012 wqe->xmit_sequence.rsvd3 = 0;
9013 /* word4 relative_offset memcpy */
9014 /* word5 r_ctl/df_ctl memcpy */
9015 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9016 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9017 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9018 LPFC_WQE_IOD_WRITE);
9019 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9020 LPFC_WQE_LENLOC_WORD12);
9021 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9022 wqe->xmit_sequence.xmit_len = xmit_len;
9023 command_type = OTHER_COMMAND;
9025 case CMD_XMIT_BCAST64_CN:
9026 /* word3 iocb=iotag32 wqe=seq_payload_len */
9027 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9028 /* word4 iocb=rsvd wqe=rsvd */
9029 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9030 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9031 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9032 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9033 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9034 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9035 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9036 LPFC_WQE_LENLOC_WORD3);
9037 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9039 case CMD_FCP_IWRITE64_CR:
9040 command_type = FCP_COMMAND_DATA_OUT;
9041 /* word3 iocb=iotag wqe=payload_offset_len */
9042 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9043 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9044 xmit_len + sizeof(struct fcp_rsp));
9045 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9047 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9048 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9049 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9050 iocbq->iocb.ulpFCP2Rcvy);
9051 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9052 /* Always open the exchange */
9053 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9054 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9055 LPFC_WQE_LENLOC_WORD4);
9056 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9057 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9058 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9059 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9060 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9061 if (iocbq->priority) {
9062 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9063 (iocbq->priority << 1));
9065 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9066 (phba->cfg_XLanePriority << 1));
9069 /* Note, word 10 is already initialized to 0 */
9071 /* Don't set PBDE for Perf hints, just fcp_embed_pbde */
9072 if (phba->fcp_embed_pbde)
9073 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9075 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9077 if (phba->fcp_embed_io) {
9078 struct lpfc_scsi_buf *lpfc_cmd;
9079 struct sli4_sge *sgl;
9080 union lpfc_wqe128 *wqe128;
9081 struct fcp_cmnd *fcp_cmnd;
9084 /* 128 byte wqe support here */
9085 wqe128 = (union lpfc_wqe128 *)wqe;
9087 lpfc_cmd = iocbq->context1;
9088 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9089 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9091 /* Word 0-2 - FCP_CMND */
9092 wqe128->generic.bde.tus.f.bdeFlags =
9093 BUFF_TYPE_BDE_IMMED;
9094 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
9095 wqe128->generic.bde.addrHigh = 0;
9096 wqe128->generic.bde.addrLow = 88; /* Word 22 */
9098 bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1);
9100 /* Word 22-29 FCP CMND Payload */
9101 ptr = &wqe128->words[22];
9102 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9105 case CMD_FCP_IREAD64_CR:
9106 /* word3 iocb=iotag wqe=payload_offset_len */
9107 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9108 bf_set(payload_offset_len, &wqe->fcp_iread,
9109 xmit_len + sizeof(struct fcp_rsp));
9110 bf_set(cmd_buff_len, &wqe->fcp_iread,
9112 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9113 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9114 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9115 iocbq->iocb.ulpFCP2Rcvy);
9116 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9117 /* Always open the exchange */
9118 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9119 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9120 LPFC_WQE_LENLOC_WORD4);
9121 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9122 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9123 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9124 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9125 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9126 if (iocbq->priority) {
9127 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9128 (iocbq->priority << 1));
9130 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9131 (phba->cfg_XLanePriority << 1));
9134 /* Note, word 10 is already initialized to 0 */
9136 /* Don't set PBDE for Perf hints, just fcp_embed_pbde */
9137 if (phba->fcp_embed_pbde)
9138 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9140 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9142 if (phba->fcp_embed_io) {
9143 struct lpfc_scsi_buf *lpfc_cmd;
9144 struct sli4_sge *sgl;
9145 union lpfc_wqe128 *wqe128;
9146 struct fcp_cmnd *fcp_cmnd;
9149 /* 128 byte wqe support here */
9150 wqe128 = (union lpfc_wqe128 *)wqe;
9152 lpfc_cmd = iocbq->context1;
9153 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9154 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9156 /* Word 0-2 - FCP_CMND */
9157 wqe128->generic.bde.tus.f.bdeFlags =
9158 BUFF_TYPE_BDE_IMMED;
9159 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
9160 wqe128->generic.bde.addrHigh = 0;
9161 wqe128->generic.bde.addrLow = 88; /* Word 22 */
9163 bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1);
9165 /* Word 22-29 FCP CMND Payload */
9166 ptr = &wqe128->words[22];
9167 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9170 case CMD_FCP_ICMND64_CR:
9171 /* word3 iocb=iotag wqe=payload_offset_len */
9172 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9173 bf_set(payload_offset_len, &wqe->fcp_icmd,
9174 xmit_len + sizeof(struct fcp_rsp));
9175 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9177 /* word3 iocb=IO_TAG wqe=reserved */
9178 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9179 /* Always open the exchange */
9180 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9181 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9182 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9183 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9184 LPFC_WQE_LENLOC_NONE);
9185 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9186 iocbq->iocb.ulpFCP2Rcvy);
9187 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9188 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9189 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9190 if (iocbq->priority) {
9191 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9192 (iocbq->priority << 1));
9194 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9195 (phba->cfg_XLanePriority << 1));
9198 /* Note, word 10 is already initialized to 0 */
9200 if (phba->fcp_embed_io) {
9201 struct lpfc_scsi_buf *lpfc_cmd;
9202 struct sli4_sge *sgl;
9203 union lpfc_wqe128 *wqe128;
9204 struct fcp_cmnd *fcp_cmnd;
9207 /* 128 byte wqe support here */
9208 wqe128 = (union lpfc_wqe128 *)wqe;
9210 lpfc_cmd = iocbq->context1;
9211 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9212 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9214 /* Word 0-2 - FCP_CMND */
9215 wqe128->generic.bde.tus.f.bdeFlags =
9216 BUFF_TYPE_BDE_IMMED;
9217 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
9218 wqe128->generic.bde.addrHigh = 0;
9219 wqe128->generic.bde.addrLow = 88; /* Word 22 */
9221 bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1);
9223 /* Word 22-29 FCP CMND Payload */
9224 ptr = &wqe128->words[22];
9225 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9228 case CMD_GEN_REQUEST64_CR:
9229 /* For this command calculate the xmit length of the
9233 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9234 sizeof(struct ulp_bde64);
9235 for (i = 0; i < numBdes; i++) {
9236 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9237 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9239 xmit_len += bde.tus.f.bdeSize;
9241 /* word3 iocb=IO_TAG wqe=request_payload_len */
9242 wqe->gen_req.request_payload_len = xmit_len;
9243 /* word4 iocb=parameter wqe=relative_offset memcpy */
9244 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
9245 /* word6 context tag copied in memcpy */
9246 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9247 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9248 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9249 "2015 Invalid CT %x command 0x%x\n",
9250 ct, iocbq->iocb.ulpCommand);
9253 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9254 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9255 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9256 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9257 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9258 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9259 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9260 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9261 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9262 command_type = OTHER_COMMAND;
9264 case CMD_XMIT_ELS_RSP64_CX:
9265 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9266 /* words0-2 BDE memcpy */
9267 /* word3 iocb=iotag32 wqe=response_payload_len */
9268 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9270 wqe->xmit_els_rsp.word4 = 0;
9271 /* word5 iocb=rsvd wge=did */
9272 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9273 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9275 if_type = bf_get(lpfc_sli_intf_if_type,
9276 &phba->sli4_hba.sli_intf);
9277 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9278 if (iocbq->vport->fc_flag & FC_PT2PT) {
9279 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9280 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9281 iocbq->vport->fc_myDID);
9282 if (iocbq->vport->fc_myDID == Fabric_DID) {
9284 &wqe->xmit_els_rsp.wqe_dest, 0);
9288 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9289 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9290 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9291 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9292 iocbq->iocb.unsli3.rcvsli3.ox_id);
9293 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9294 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9295 phba->vpi_ids[iocbq->vport->vpi]);
9296 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9297 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9298 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9299 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9300 LPFC_WQE_LENLOC_WORD3);
9301 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9302 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9303 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9304 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9305 iocbq->context2)->virt);
9306 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9307 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9308 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9309 iocbq->vport->fc_myDID);
9310 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9311 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9312 phba->vpi_ids[phba->pport->vpi]);
9314 command_type = OTHER_COMMAND;
9316 case CMD_CLOSE_XRI_CN:
9317 case CMD_ABORT_XRI_CN:
9318 case CMD_ABORT_XRI_CX:
9319 /* words 0-2 memcpy should be 0 rserved */
9320 /* port will send abts */
9321 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9322 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9323 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9324 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9328 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9330 * The link is down, or the command was ELS_FIP
9331 * so the fw does not need to send abts
9334 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9336 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9337 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9338 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9339 wqe->abort_cmd.rsrvd5 = 0;
9340 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9341 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9342 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9344 * The abort handler will send us CMD_ABORT_XRI_CN or
9345 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9347 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9348 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9349 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9350 LPFC_WQE_LENLOC_NONE);
9351 cmnd = CMD_ABORT_XRI_CX;
9352 command_type = OTHER_COMMAND;
9355 case CMD_XMIT_BLS_RSP64_CX:
9356 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9357 /* As BLS ABTS RSP WQE is very different from other WQEs,
9358 * we re-construct this WQE here based on information in
9359 * iocbq from scratch.
9361 memset(wqe, 0, sizeof(union lpfc_wqe));
9362 /* OX_ID is invariable to who sent ABTS to CT exchange */
9363 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
9364 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9365 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
9366 LPFC_ABTS_UNSOL_INT) {
9367 /* ABTS sent by initiator to CT exchange, the
9368 * RX_ID field will be filled with the newly
9369 * allocated responder XRI.
9371 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9372 iocbq->sli4_xritag);
9374 /* ABTS sent by responder to CT exchange, the
9375 * RX_ID field will be filled with the responder
9378 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9379 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
9381 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9382 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
9385 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9387 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9388 iocbq->iocb.ulpContext);
9389 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
9390 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
9391 phba->vpi_ids[phba->pport->vpi]);
9392 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9393 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9394 LPFC_WQE_LENLOC_NONE);
9395 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9396 command_type = OTHER_COMMAND;
9397 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9398 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9399 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9400 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9401 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9402 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9403 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9407 case CMD_SEND_FRAME:
9408 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9409 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9411 case CMD_XRI_ABORTED_CX:
9412 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
9413 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9414 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9415 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9416 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9418 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9419 "2014 Invalid command 0x%x\n",
9420 iocbq->iocb.ulpCommand);
9425 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9426 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9427 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9428 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9429 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9430 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9431 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9432 LPFC_IO_DIF_INSERT);
9433 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9434 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9435 wqe->generic.wqe_com.abort_tag = abort_tag;
9436 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9437 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9438 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9439 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9444 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9445 * @phba: Pointer to HBA context object.
9446 * @ring_number: SLI ring number to issue iocb on.
9447 * @piocb: Pointer to command iocb.
9448 * @flag: Flag indicating if this command can be put into txq.
9450 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9451 * an iocb command to an HBA with SLI-4 interface spec.
9453 * This function is called with hbalock held. The function will return success
9454 * after it successfully submit the iocb to firmware or after adding to the
9458 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9459 struct lpfc_iocbq *piocb, uint32_t flag)
9461 struct lpfc_sglq *sglq;
9462 union lpfc_wqe *wqe;
9463 union lpfc_wqe128 wqe128;
9464 struct lpfc_queue *wq;
9465 struct lpfc_sli_ring *pring;
9468 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9469 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9470 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS)))
9471 wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
9473 wq = phba->sli4_hba.oas_wq;
9475 wq = phba->sli4_hba.els_wq;
9478 /* Get corresponding ring */
9482 * The WQE can be either 64 or 128 bytes,
9483 * so allocate space on the stack assuming the largest.
9485 wqe = (union lpfc_wqe *)&wqe128;
9487 lockdep_assert_held(&phba->hbalock);
9489 if (piocb->sli4_xritag == NO_XRI) {
9490 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
9491 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
9494 if (!list_empty(&pring->txq)) {
9495 if (!(flag & SLI_IOCB_RET_IOCB)) {
9496 __lpfc_sli_ringtx_put(phba,
9498 return IOCB_SUCCESS;
9503 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
9505 if (!(flag & SLI_IOCB_RET_IOCB)) {
9506 __lpfc_sli_ringtx_put(phba,
9509 return IOCB_SUCCESS;
9515 } else if (piocb->iocb_flag & LPFC_IO_FCP)
9516 /* These IO's already have an XRI and a mapped sgl. */
9520 * This is a continuation of a commandi,(CX) so this
9521 * sglq is on the active list
9523 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
9529 piocb->sli4_lxritag = sglq->sli4_lxritag;
9530 piocb->sli4_xritag = sglq->sli4_xritag;
9531 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
9535 if (lpfc_sli4_iocb2wqe(phba, piocb, wqe))
9538 if (lpfc_sli4_wq_put(wq, wqe))
9540 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9546 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
9548 * This routine wraps the actual lockless version for issusing IOCB function
9549 * pointer from the lpfc_hba struct.
9552 * IOCB_ERROR - Error
9553 * IOCB_SUCCESS - Success
9557 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9558 struct lpfc_iocbq *piocb, uint32_t flag)
9560 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9564 * lpfc_sli_api_table_setup - Set up sli api function jump table
9565 * @phba: The hba struct for which this call is being executed.
9566 * @dev_grp: The HBA PCI-Device group number.
9568 * This routine sets up the SLI interface API function jump table in @phba
9570 * Returns: 0 - success, -ENODEV - failure.
9573 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9577 case LPFC_PCI_DEV_LP:
9578 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
9579 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
9581 case LPFC_PCI_DEV_OC:
9582 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
9583 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
9586 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9587 "1419 Invalid HBA PCI-device group: 0x%x\n",
9592 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
9597 * lpfc_sli4_calc_ring - Calculates which ring to use
9598 * @phba: Pointer to HBA context object.
9599 * @piocb: Pointer to command iocb.
9601 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
9602 * hba_wqidx, thus we need to calculate the corresponding ring.
9603 * Since ABORTS must go on the same WQ of the command they are
9604 * aborting, we use command's hba_wqidx.
9606 struct lpfc_sli_ring *
9607 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
9609 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
9610 if (!(phba->cfg_fof) ||
9611 (!(piocb->iocb_flag & LPFC_IO_FOF))) {
9612 if (unlikely(!phba->sli4_hba.fcp_wq))
9615 * for abort iocb hba_wqidx should already
9616 * be setup based on what work queue we used.
9618 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9620 lpfc_sli4_scmd_to_wqidx_distr(phba,
9622 piocb->hba_wqidx = piocb->hba_wqidx %
9623 phba->cfg_fcp_io_channel;
9625 return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
9627 if (unlikely(!phba->sli4_hba.oas_wq))
9629 piocb->hba_wqidx = 0;
9630 return phba->sli4_hba.oas_wq->pring;
9633 if (unlikely(!phba->sli4_hba.els_wq))
9635 piocb->hba_wqidx = 0;
9636 return phba->sli4_hba.els_wq->pring;
9641 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
9642 * @phba: Pointer to HBA context object.
9643 * @pring: Pointer to driver SLI ring object.
9644 * @piocb: Pointer to command iocb.
9645 * @flag: Flag indicating if this command can be put into txq.
9647 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
9648 * function. This function gets the hbalock and calls
9649 * __lpfc_sli_issue_iocb function and will return the error returned
9650 * by __lpfc_sli_issue_iocb function. This wrapper is used by
9651 * functions which do not hold hbalock.
9654 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9655 struct lpfc_iocbq *piocb, uint32_t flag)
9657 struct lpfc_hba_eq_hdl *hba_eq_hdl;
9658 struct lpfc_sli_ring *pring;
9659 struct lpfc_queue *fpeq;
9660 struct lpfc_eqe *eqe;
9661 unsigned long iflags;
9664 if (phba->sli_rev == LPFC_SLI_REV4) {
9665 pring = lpfc_sli4_calc_ring(phba, piocb);
9666 if (unlikely(pring == NULL))
9669 spin_lock_irqsave(&pring->ring_lock, iflags);
9670 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9671 spin_unlock_irqrestore(&pring->ring_lock, iflags);
9673 if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
9674 idx = piocb->hba_wqidx;
9675 hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx];
9677 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
9679 /* Get associated EQ with this index */
9680 fpeq = phba->sli4_hba.hba_eq[idx];
9682 /* Turn off interrupts from this EQ */
9683 phba->sli4_hba.sli4_eq_clr_intr(fpeq);
9686 * Process all the events on FCP EQ
9688 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9689 lpfc_sli4_hba_handle_eqe(phba,
9691 fpeq->EQ_processed++;
9694 /* Always clear and re-arm the EQ */
9695 phba->sli4_hba.sli4_eq_release(fpeq,
9698 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
9701 /* For now, SLI2/3 will still use hbalock */
9702 spin_lock_irqsave(&phba->hbalock, iflags);
9703 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9704 spin_unlock_irqrestore(&phba->hbalock, iflags);
9710 * lpfc_extra_ring_setup - Extra ring setup function
9711 * @phba: Pointer to HBA context object.
9713 * This function is called while driver attaches with the
9714 * HBA to setup the extra ring. The extra ring is used
9715 * only when driver needs to support target mode functionality
9716 * or IP over FC functionalities.
9718 * This function is called with no lock held. SLI3 only.
9721 lpfc_extra_ring_setup( struct lpfc_hba *phba)
9723 struct lpfc_sli *psli;
9724 struct lpfc_sli_ring *pring;
9728 /* Adjust cmd/rsp ring iocb entries more evenly */
9730 /* Take some away from the FCP ring */
9731 pring = &psli->sli3_ring[LPFC_FCP_RING];
9732 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9733 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9734 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9735 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9737 /* and give them to the extra ring */
9738 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
9740 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9741 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9742 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9743 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9745 /* Setup default profile for this ring */
9746 pring->iotag_max = 4096;
9747 pring->num_mask = 1;
9748 pring->prt[0].profile = 0; /* Mask 0 */
9749 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
9750 pring->prt[0].type = phba->cfg_multi_ring_type;
9751 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
9755 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
9756 * @phba: Pointer to HBA context object.
9757 * @iocbq: Pointer to iocb object.
9759 * The async_event handler calls this routine when it receives
9760 * an ASYNC_STATUS_CN event from the port. The port generates
9761 * this event when an Abort Sequence request to an rport fails
9762 * twice in succession. The abort could be originated by the
9763 * driver or by the port. The ABTS could have been for an ELS
9764 * or FCP IO. The port only generates this event when an ABTS
9765 * fails to complete after one retry.
9768 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
9769 struct lpfc_iocbq *iocbq)
9771 struct lpfc_nodelist *ndlp = NULL;
9772 uint16_t rpi = 0, vpi = 0;
9773 struct lpfc_vport *vport = NULL;
9775 /* The rpi in the ulpContext is vport-sensitive. */
9776 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
9777 rpi = iocbq->iocb.ulpContext;
9779 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9780 "3092 Port generated ABTS async event "
9781 "on vpi %d rpi %d status 0x%x\n",
9782 vpi, rpi, iocbq->iocb.ulpStatus);
9784 vport = lpfc_find_vport_by_vpid(phba, vpi);
9787 ndlp = lpfc_findnode_rpi(vport, rpi);
9788 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
9791 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
9792 lpfc_sli_abts_recover_port(vport, ndlp);
9796 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9797 "3095 Event Context not found, no "
9798 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
9799 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
9803 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
9804 * @phba: pointer to HBA context object.
9805 * @ndlp: nodelist pointer for the impacted rport.
9806 * @axri: pointer to the wcqe containing the failed exchange.
9808 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
9809 * port. The port generates this event when an abort exchange request to an
9810 * rport fails twice in succession with no reply. The abort could be originated
9811 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
9814 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
9815 struct lpfc_nodelist *ndlp,
9816 struct sli4_wcqe_xri_aborted *axri)
9818 struct lpfc_vport *vport;
9819 uint32_t ext_status = 0;
9821 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
9822 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9823 "3115 Node Context not found, driver "
9824 "ignoring abts err event\n");
9828 vport = ndlp->vport;
9829 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9830 "3116 Port generated FCP XRI ABORT event on "
9831 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
9832 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
9833 bf_get(lpfc_wcqe_xa_xri, axri),
9834 bf_get(lpfc_wcqe_xa_status, axri),
9838 * Catch the ABTS protocol failure case. Older OCe FW releases returned
9839 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
9840 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
9842 ext_status = axri->parameter & IOERR_PARAM_MASK;
9843 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
9844 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
9845 lpfc_sli_abts_recover_port(vport, ndlp);
9849 * lpfc_sli_async_event_handler - ASYNC iocb handler function
9850 * @phba: Pointer to HBA context object.
9851 * @pring: Pointer to driver SLI ring object.
9852 * @iocbq: Pointer to iocb object.
9854 * This function is called by the slow ring event handler
9855 * function when there is an ASYNC event iocb in the ring.
9856 * This function is called with no lock held.
9857 * Currently this function handles only temperature related
9858 * ASYNC events. The function decodes the temperature sensor
9859 * event message and posts events for the management applications.
9862 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
9863 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
9867 struct temp_event temp_event_data;
9868 struct Scsi_Host *shost;
9871 icmd = &iocbq->iocb;
9872 evt_code = icmd->un.asyncstat.evt_code;
9875 case ASYNC_TEMP_WARN:
9876 case ASYNC_TEMP_SAFE:
9877 temp_event_data.data = (uint32_t) icmd->ulpContext;
9878 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
9879 if (evt_code == ASYNC_TEMP_WARN) {
9880 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
9881 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9882 "0347 Adapter is very hot, please take "
9883 "corrective action. temperature : %d Celsius\n",
9884 (uint32_t) icmd->ulpContext);
9886 temp_event_data.event_code = LPFC_NORMAL_TEMP;
9887 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9888 "0340 Adapter temperature is OK now. "
9889 "temperature : %d Celsius\n",
9890 (uint32_t) icmd->ulpContext);
9893 /* Send temperature change event to applications */
9894 shost = lpfc_shost_from_vport(phba->pport);
9895 fc_host_post_vendor_event(shost, fc_get_event_number(),
9896 sizeof(temp_event_data), (char *) &temp_event_data,
9899 case ASYNC_STATUS_CN:
9900 lpfc_sli_abts_err_handler(phba, iocbq);
9903 iocb_w = (uint32_t *) icmd;
9904 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9905 "0346 Ring %d handler: unexpected ASYNC_STATUS"
9907 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
9908 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
9909 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
9910 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
9911 pring->ringno, icmd->un.asyncstat.evt_code,
9912 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
9913 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
9914 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
9915 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
9923 * lpfc_sli4_setup - SLI ring setup function
9924 * @phba: Pointer to HBA context object.
9926 * lpfc_sli_setup sets up rings of the SLI interface with
9927 * number of iocbs per ring and iotags. This function is
9928 * called while driver attach to the HBA and before the
9929 * interrupts are enabled. So there is no need for locking.
9931 * This function always returns 0.
9934 lpfc_sli4_setup(struct lpfc_hba *phba)
9936 struct lpfc_sli_ring *pring;
9938 pring = phba->sli4_hba.els_wq->pring;
9939 pring->num_mask = LPFC_MAX_RING_MASK;
9940 pring->prt[0].profile = 0; /* Mask 0 */
9941 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9942 pring->prt[0].type = FC_TYPE_ELS;
9943 pring->prt[0].lpfc_sli_rcv_unsol_event =
9944 lpfc_els_unsol_event;
9945 pring->prt[1].profile = 0; /* Mask 1 */
9946 pring->prt[1].rctl = FC_RCTL_ELS_REP;
9947 pring->prt[1].type = FC_TYPE_ELS;
9948 pring->prt[1].lpfc_sli_rcv_unsol_event =
9949 lpfc_els_unsol_event;
9950 pring->prt[2].profile = 0; /* Mask 2 */
9951 /* NameServer Inquiry */
9952 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
9954 pring->prt[2].type = FC_TYPE_CT;
9955 pring->prt[2].lpfc_sli_rcv_unsol_event =
9956 lpfc_ct_unsol_event;
9957 pring->prt[3].profile = 0; /* Mask 3 */
9958 /* NameServer response */
9959 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
9961 pring->prt[3].type = FC_TYPE_CT;
9962 pring->prt[3].lpfc_sli_rcv_unsol_event =
9963 lpfc_ct_unsol_event;
9968 * lpfc_sli_setup - SLI ring setup function
9969 * @phba: Pointer to HBA context object.
9971 * lpfc_sli_setup sets up rings of the SLI interface with
9972 * number of iocbs per ring and iotags. This function is
9973 * called while driver attach to the HBA and before the
9974 * interrupts are enabled. So there is no need for locking.
9976 * This function always returns 0. SLI3 only.
9979 lpfc_sli_setup(struct lpfc_hba *phba)
9981 int i, totiocbsize = 0;
9982 struct lpfc_sli *psli = &phba->sli;
9983 struct lpfc_sli_ring *pring;
9985 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
9988 psli->iocbq_lookup = NULL;
9989 psli->iocbq_lookup_len = 0;
9990 psli->last_iotag = 0;
9992 for (i = 0; i < psli->num_rings; i++) {
9993 pring = &psli->sli3_ring[i];
9995 case LPFC_FCP_RING: /* ring 0 - FCP */
9996 /* numCiocb and numRiocb are used in config_port */
9997 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
9998 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
9999 pring->sli.sli3.numCiocb +=
10000 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10001 pring->sli.sli3.numRiocb +=
10002 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10003 pring->sli.sli3.numCiocb +=
10004 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10005 pring->sli.sli3.numRiocb +=
10006 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10007 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10008 SLI3_IOCB_CMD_SIZE :
10009 SLI2_IOCB_CMD_SIZE;
10010 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10011 SLI3_IOCB_RSP_SIZE :
10012 SLI2_IOCB_RSP_SIZE;
10013 pring->iotag_ctr = 0;
10015 (phba->cfg_hba_queue_depth * 2);
10016 pring->fast_iotag = pring->iotag_max;
10017 pring->num_mask = 0;
10019 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
10020 /* numCiocb and numRiocb are used in config_port */
10021 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10022 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10023 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10024 SLI3_IOCB_CMD_SIZE :
10025 SLI2_IOCB_CMD_SIZE;
10026 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10027 SLI3_IOCB_RSP_SIZE :
10028 SLI2_IOCB_RSP_SIZE;
10029 pring->iotag_max = phba->cfg_hba_queue_depth;
10030 pring->num_mask = 0;
10032 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10033 /* numCiocb and numRiocb are used in config_port */
10034 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10035 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10036 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10037 SLI3_IOCB_CMD_SIZE :
10038 SLI2_IOCB_CMD_SIZE;
10039 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10040 SLI3_IOCB_RSP_SIZE :
10041 SLI2_IOCB_RSP_SIZE;
10042 pring->fast_iotag = 0;
10043 pring->iotag_ctr = 0;
10044 pring->iotag_max = 4096;
10045 pring->lpfc_sli_rcv_async_status =
10046 lpfc_sli_async_event_handler;
10047 pring->num_mask = LPFC_MAX_RING_MASK;
10048 pring->prt[0].profile = 0; /* Mask 0 */
10049 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10050 pring->prt[0].type = FC_TYPE_ELS;
10051 pring->prt[0].lpfc_sli_rcv_unsol_event =
10052 lpfc_els_unsol_event;
10053 pring->prt[1].profile = 0; /* Mask 1 */
10054 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10055 pring->prt[1].type = FC_TYPE_ELS;
10056 pring->prt[1].lpfc_sli_rcv_unsol_event =
10057 lpfc_els_unsol_event;
10058 pring->prt[2].profile = 0; /* Mask 2 */
10059 /* NameServer Inquiry */
10060 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10062 pring->prt[2].type = FC_TYPE_CT;
10063 pring->prt[2].lpfc_sli_rcv_unsol_event =
10064 lpfc_ct_unsol_event;
10065 pring->prt[3].profile = 0; /* Mask 3 */
10066 /* NameServer response */
10067 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10069 pring->prt[3].type = FC_TYPE_CT;
10070 pring->prt[3].lpfc_sli_rcv_unsol_event =
10071 lpfc_ct_unsol_event;
10074 totiocbsize += (pring->sli.sli3.numCiocb *
10075 pring->sli.sli3.sizeCiocb) +
10076 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10078 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10079 /* Too many cmd / rsp ring entries in SLI2 SLIM */
10080 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10081 "SLI2 SLIM Data: x%x x%lx\n",
10082 phba->brd_no, totiocbsize,
10083 (unsigned long) MAX_SLIM_IOCB_SIZE);
10085 if (phba->cfg_multi_ring_support == 2)
10086 lpfc_extra_ring_setup(phba);
10092 * lpfc_sli4_queue_init - Queue initialization function
10093 * @phba: Pointer to HBA context object.
10095 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
10096 * ring. This function also initializes ring indices of each ring.
10097 * This function is called during the initialization of the SLI
10098 * interface of an HBA.
10099 * This function is called with no lock held and always returns
10103 lpfc_sli4_queue_init(struct lpfc_hba *phba)
10105 struct lpfc_sli *psli;
10106 struct lpfc_sli_ring *pring;
10110 spin_lock_irq(&phba->hbalock);
10111 INIT_LIST_HEAD(&psli->mboxq);
10112 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10113 /* Initialize list headers for txq and txcmplq as double linked lists */
10114 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
10115 pring = phba->sli4_hba.fcp_wq[i]->pring;
10117 pring->ringno = LPFC_FCP_RING;
10118 INIT_LIST_HEAD(&pring->txq);
10119 INIT_LIST_HEAD(&pring->txcmplq);
10120 INIT_LIST_HEAD(&pring->iocb_continueq);
10121 spin_lock_init(&pring->ring_lock);
10123 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
10124 pring = phba->sli4_hba.nvme_wq[i]->pring;
10126 pring->ringno = LPFC_FCP_RING;
10127 INIT_LIST_HEAD(&pring->txq);
10128 INIT_LIST_HEAD(&pring->txcmplq);
10129 INIT_LIST_HEAD(&pring->iocb_continueq);
10130 spin_lock_init(&pring->ring_lock);
10132 pring = phba->sli4_hba.els_wq->pring;
10134 pring->ringno = LPFC_ELS_RING;
10135 INIT_LIST_HEAD(&pring->txq);
10136 INIT_LIST_HEAD(&pring->txcmplq);
10137 INIT_LIST_HEAD(&pring->iocb_continueq);
10138 spin_lock_init(&pring->ring_lock);
10140 if (phba->cfg_nvme_io_channel) {
10141 pring = phba->sli4_hba.nvmels_wq->pring;
10143 pring->ringno = LPFC_ELS_RING;
10144 INIT_LIST_HEAD(&pring->txq);
10145 INIT_LIST_HEAD(&pring->txcmplq);
10146 INIT_LIST_HEAD(&pring->iocb_continueq);
10147 spin_lock_init(&pring->ring_lock);
10150 if (phba->cfg_fof) {
10151 pring = phba->sli4_hba.oas_wq->pring;
10153 pring->ringno = LPFC_FCP_RING;
10154 INIT_LIST_HEAD(&pring->txq);
10155 INIT_LIST_HEAD(&pring->txcmplq);
10156 INIT_LIST_HEAD(&pring->iocb_continueq);
10157 spin_lock_init(&pring->ring_lock);
10160 spin_unlock_irq(&phba->hbalock);
10164 * lpfc_sli_queue_init - Queue initialization function
10165 * @phba: Pointer to HBA context object.
10167 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10168 * ring. This function also initializes ring indices of each ring.
10169 * This function is called during the initialization of the SLI
10170 * interface of an HBA.
10171 * This function is called with no lock held and always returns
10175 lpfc_sli_queue_init(struct lpfc_hba *phba)
10177 struct lpfc_sli *psli;
10178 struct lpfc_sli_ring *pring;
10182 spin_lock_irq(&phba->hbalock);
10183 INIT_LIST_HEAD(&psli->mboxq);
10184 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10185 /* Initialize list headers for txq and txcmplq as double linked lists */
10186 for (i = 0; i < psli->num_rings; i++) {
10187 pring = &psli->sli3_ring[i];
10189 pring->sli.sli3.next_cmdidx = 0;
10190 pring->sli.sli3.local_getidx = 0;
10191 pring->sli.sli3.cmdidx = 0;
10192 INIT_LIST_HEAD(&pring->iocb_continueq);
10193 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10194 INIT_LIST_HEAD(&pring->postbufq);
10196 INIT_LIST_HEAD(&pring->txq);
10197 INIT_LIST_HEAD(&pring->txcmplq);
10198 spin_lock_init(&pring->ring_lock);
10200 spin_unlock_irq(&phba->hbalock);
10204 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10205 * @phba: Pointer to HBA context object.
10207 * This routine flushes the mailbox command subsystem. It will unconditionally
10208 * flush all the mailbox commands in the three possible stages in the mailbox
10209 * command sub-system: pending mailbox command queue; the outstanding mailbox
10210 * command; and completed mailbox command queue. It is caller's responsibility
10211 * to make sure that the driver is in the proper state to flush the mailbox
10212 * command sub-system. Namely, the posting of mailbox commands into the
10213 * pending mailbox command queue from the various clients must be stopped;
10214 * either the HBA is in a state that it will never works on the outstanding
10215 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10216 * mailbox command has been completed.
10219 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10221 LIST_HEAD(completions);
10222 struct lpfc_sli *psli = &phba->sli;
10224 unsigned long iflag;
10226 /* Flush all the mailbox commands in the mbox system */
10227 spin_lock_irqsave(&phba->hbalock, iflag);
10228 /* The pending mailbox command queue */
10229 list_splice_init(&phba->sli.mboxq, &completions);
10230 /* The outstanding active mailbox command */
10231 if (psli->mbox_active) {
10232 list_add_tail(&psli->mbox_active->list, &completions);
10233 psli->mbox_active = NULL;
10234 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10236 /* The completed mailbox command queue */
10237 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10238 spin_unlock_irqrestore(&phba->hbalock, iflag);
10240 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10241 while (!list_empty(&completions)) {
10242 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10243 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10244 if (pmb->mbox_cmpl)
10245 pmb->mbox_cmpl(phba, pmb);
10250 * lpfc_sli_host_down - Vport cleanup function
10251 * @vport: Pointer to virtual port object.
10253 * lpfc_sli_host_down is called to clean up the resources
10254 * associated with a vport before destroying virtual
10255 * port data structures.
10256 * This function does following operations:
10257 * - Free discovery resources associated with this virtual
10259 * - Free iocbs associated with this virtual port in
10261 * - Send abort for all iocb commands associated with this
10262 * vport in txcmplq.
10264 * This function is called with no lock held and always returns 1.
10267 lpfc_sli_host_down(struct lpfc_vport *vport)
10269 LIST_HEAD(completions);
10270 struct lpfc_hba *phba = vport->phba;
10271 struct lpfc_sli *psli = &phba->sli;
10272 struct lpfc_queue *qp = NULL;
10273 struct lpfc_sli_ring *pring;
10274 struct lpfc_iocbq *iocb, *next_iocb;
10276 unsigned long flags = 0;
10277 uint16_t prev_pring_flag;
10279 lpfc_cleanup_discovery_resources(vport);
10281 spin_lock_irqsave(&phba->hbalock, flags);
10284 * Error everything on the txq since these iocbs
10285 * have not been given to the FW yet.
10286 * Also issue ABTS for everything on the txcmplq
10288 if (phba->sli_rev != LPFC_SLI_REV4) {
10289 for (i = 0; i < psli->num_rings; i++) {
10290 pring = &psli->sli3_ring[i];
10291 prev_pring_flag = pring->flag;
10292 /* Only slow rings */
10293 if (pring->ringno == LPFC_ELS_RING) {
10294 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10295 /* Set the lpfc data pending flag */
10296 set_bit(LPFC_DATA_READY, &phba->data_flags);
10298 list_for_each_entry_safe(iocb, next_iocb,
10299 &pring->txq, list) {
10300 if (iocb->vport != vport)
10302 list_move_tail(&iocb->list, &completions);
10304 list_for_each_entry_safe(iocb, next_iocb,
10305 &pring->txcmplq, list) {
10306 if (iocb->vport != vport)
10308 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10310 pring->flag = prev_pring_flag;
10313 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10317 if (pring == phba->sli4_hba.els_wq->pring) {
10318 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10319 /* Set the lpfc data pending flag */
10320 set_bit(LPFC_DATA_READY, &phba->data_flags);
10322 prev_pring_flag = pring->flag;
10323 spin_lock_irq(&pring->ring_lock);
10324 list_for_each_entry_safe(iocb, next_iocb,
10325 &pring->txq, list) {
10326 if (iocb->vport != vport)
10328 list_move_tail(&iocb->list, &completions);
10330 spin_unlock_irq(&pring->ring_lock);
10331 list_for_each_entry_safe(iocb, next_iocb,
10332 &pring->txcmplq, list) {
10333 if (iocb->vport != vport)
10335 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10337 pring->flag = prev_pring_flag;
10340 spin_unlock_irqrestore(&phba->hbalock, flags);
10342 /* Cancel all the IOCBs from the completions list */
10343 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10349 * lpfc_sli_hba_down - Resource cleanup function for the HBA
10350 * @phba: Pointer to HBA context object.
10352 * This function cleans up all iocb, buffers, mailbox commands
10353 * while shutting down the HBA. This function is called with no
10354 * lock held and always returns 1.
10355 * This function does the following to cleanup driver resources:
10356 * - Free discovery resources for each virtual port
10357 * - Cleanup any pending fabric iocbs
10358 * - Iterate through the iocb txq and free each entry
10360 * - Free up any buffer posted to the HBA
10361 * - Free mailbox commands in the mailbox queue.
10364 lpfc_sli_hba_down(struct lpfc_hba *phba)
10366 LIST_HEAD(completions);
10367 struct lpfc_sli *psli = &phba->sli;
10368 struct lpfc_queue *qp = NULL;
10369 struct lpfc_sli_ring *pring;
10370 struct lpfc_dmabuf *buf_ptr;
10371 unsigned long flags = 0;
10374 /* Shutdown the mailbox command sub-system */
10375 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10377 lpfc_hba_down_prep(phba);
10379 lpfc_fabric_abort_hba(phba);
10381 spin_lock_irqsave(&phba->hbalock, flags);
10384 * Error everything on the txq since these iocbs
10385 * have not been given to the FW yet.
10387 if (phba->sli_rev != LPFC_SLI_REV4) {
10388 for (i = 0; i < psli->num_rings; i++) {
10389 pring = &psli->sli3_ring[i];
10390 /* Only slow rings */
10391 if (pring->ringno == LPFC_ELS_RING) {
10392 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10393 /* Set the lpfc data pending flag */
10394 set_bit(LPFC_DATA_READY, &phba->data_flags);
10396 list_splice_init(&pring->txq, &completions);
10399 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10403 spin_lock_irq(&pring->ring_lock);
10404 list_splice_init(&pring->txq, &completions);
10405 spin_unlock_irq(&pring->ring_lock);
10406 if (pring == phba->sli4_hba.els_wq->pring) {
10407 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10408 /* Set the lpfc data pending flag */
10409 set_bit(LPFC_DATA_READY, &phba->data_flags);
10413 spin_unlock_irqrestore(&phba->hbalock, flags);
10415 /* Cancel all the IOCBs from the completions list */
10416 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10419 spin_lock_irqsave(&phba->hbalock, flags);
10420 list_splice_init(&phba->elsbuf, &completions);
10421 phba->elsbuf_cnt = 0;
10422 phba->elsbuf_prev_cnt = 0;
10423 spin_unlock_irqrestore(&phba->hbalock, flags);
10425 while (!list_empty(&completions)) {
10426 list_remove_head(&completions, buf_ptr,
10427 struct lpfc_dmabuf, list);
10428 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10432 /* Return any active mbox cmds */
10433 del_timer_sync(&psli->mbox_tmo);
10435 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
10436 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10437 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
10443 * lpfc_sli_pcimem_bcopy - SLI memory copy function
10444 * @srcp: Source memory pointer.
10445 * @destp: Destination memory pointer.
10446 * @cnt: Number of words required to be copied.
10448 * This function is used for copying data between driver memory
10449 * and the SLI memory. This function also changes the endianness
10450 * of each word if native endianness is different from SLI
10451 * endianness. This function can be called with or without
10455 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10457 uint32_t *src = srcp;
10458 uint32_t *dest = destp;
10462 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10464 ldata = le32_to_cpu(ldata);
10473 * lpfc_sli_bemem_bcopy - SLI memory copy function
10474 * @srcp: Source memory pointer.
10475 * @destp: Destination memory pointer.
10476 * @cnt: Number of words required to be copied.
10478 * This function is used for copying data between a data structure
10479 * with big endian representation to local endianness.
10480 * This function can be called with or without lock.
10483 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10485 uint32_t *src = srcp;
10486 uint32_t *dest = destp;
10490 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10492 ldata = be32_to_cpu(ldata);
10500 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
10501 * @phba: Pointer to HBA context object.
10502 * @pring: Pointer to driver SLI ring object.
10503 * @mp: Pointer to driver buffer object.
10505 * This function is called with no lock held.
10506 * It always return zero after adding the buffer to the postbufq
10510 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10511 struct lpfc_dmabuf *mp)
10513 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10515 spin_lock_irq(&phba->hbalock);
10516 list_add_tail(&mp->list, &pring->postbufq);
10517 pring->postbufq_cnt++;
10518 spin_unlock_irq(&phba->hbalock);
10523 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
10524 * @phba: Pointer to HBA context object.
10526 * When HBQ is enabled, buffers are searched based on tags. This function
10527 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10528 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10529 * does not conflict with tags of buffer posted for unsolicited events.
10530 * The function returns the allocated tag. The function is called with
10534 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10536 spin_lock_irq(&phba->hbalock);
10537 phba->buffer_tag_count++;
10539 * Always set the QUE_BUFTAG_BIT to distiguish between
10540 * a tag assigned by HBQ.
10542 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10543 spin_unlock_irq(&phba->hbalock);
10544 return phba->buffer_tag_count;
10548 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
10549 * @phba: Pointer to HBA context object.
10550 * @pring: Pointer to driver SLI ring object.
10551 * @tag: Buffer tag.
10553 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10554 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10555 * iocb is posted to the response ring with the tag of the buffer.
10556 * This function searches the pring->postbufq list using the tag
10557 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10558 * iocb. If the buffer is found then lpfc_dmabuf object of the
10559 * buffer is returned to the caller else NULL is returned.
10560 * This function is called with no lock held.
10562 struct lpfc_dmabuf *
10563 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10566 struct lpfc_dmabuf *mp, *next_mp;
10567 struct list_head *slp = &pring->postbufq;
10569 /* Search postbufq, from the beginning, looking for a match on tag */
10570 spin_lock_irq(&phba->hbalock);
10571 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10572 if (mp->buffer_tag == tag) {
10573 list_del_init(&mp->list);
10574 pring->postbufq_cnt--;
10575 spin_unlock_irq(&phba->hbalock);
10580 spin_unlock_irq(&phba->hbalock);
10581 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10582 "0402 Cannot find virtual addr for buffer tag on "
10583 "ring %d Data x%lx x%p x%p x%x\n",
10584 pring->ringno, (unsigned long) tag,
10585 slp->next, slp->prev, pring->postbufq_cnt);
10591 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
10592 * @phba: Pointer to HBA context object.
10593 * @pring: Pointer to driver SLI ring object.
10594 * @phys: DMA address of the buffer.
10596 * This function searches the buffer list using the dma_address
10597 * of unsolicited event to find the driver's lpfc_dmabuf object
10598 * corresponding to the dma_address. The function returns the
10599 * lpfc_dmabuf object if a buffer is found else it returns NULL.
10600 * This function is called by the ct and els unsolicited event
10601 * handlers to get the buffer associated with the unsolicited
10604 * This function is called with no lock held.
10606 struct lpfc_dmabuf *
10607 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10610 struct lpfc_dmabuf *mp, *next_mp;
10611 struct list_head *slp = &pring->postbufq;
10613 /* Search postbufq, from the beginning, looking for a match on phys */
10614 spin_lock_irq(&phba->hbalock);
10615 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10616 if (mp->phys == phys) {
10617 list_del_init(&mp->list);
10618 pring->postbufq_cnt--;
10619 spin_unlock_irq(&phba->hbalock);
10624 spin_unlock_irq(&phba->hbalock);
10625 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10626 "0410 Cannot find virtual addr for mapped buf on "
10627 "ring %d Data x%llx x%p x%p x%x\n",
10628 pring->ringno, (unsigned long long)phys,
10629 slp->next, slp->prev, pring->postbufq_cnt);
10634 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
10635 * @phba: Pointer to HBA context object.
10636 * @cmdiocb: Pointer to driver command iocb object.
10637 * @rspiocb: Pointer to driver response iocb object.
10639 * This function is the completion handler for the abort iocbs for
10640 * ELS commands. This function is called from the ELS ring event
10641 * handler with no lock held. This function frees memory resources
10642 * associated with the abort iocb.
10645 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10646 struct lpfc_iocbq *rspiocb)
10648 IOCB_t *irsp = &rspiocb->iocb;
10649 uint16_t abort_iotag, abort_context;
10650 struct lpfc_iocbq *abort_iocb = NULL;
10652 if (irsp->ulpStatus) {
10655 * Assume that the port already completed and returned, or
10656 * will return the iocb. Just Log the message.
10658 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
10659 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
10661 spin_lock_irq(&phba->hbalock);
10662 if (phba->sli_rev < LPFC_SLI_REV4) {
10663 if (abort_iotag != 0 &&
10664 abort_iotag <= phba->sli.last_iotag)
10666 phba->sli.iocbq_lookup[abort_iotag];
10668 /* For sli4 the abort_tag is the XRI,
10669 * so the abort routine puts the iotag of the iocb
10670 * being aborted in the context field of the abort
10673 abort_iocb = phba->sli.iocbq_lookup[abort_context];
10675 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
10676 "0327 Cannot abort els iocb %p "
10677 "with tag %x context %x, abort status %x, "
10679 abort_iocb, abort_iotag, abort_context,
10680 irsp->ulpStatus, irsp->un.ulpWord[4]);
10682 spin_unlock_irq(&phba->hbalock);
10684 lpfc_sli_release_iocbq(phba, cmdiocb);
10689 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
10690 * @phba: Pointer to HBA context object.
10691 * @cmdiocb: Pointer to driver command iocb object.
10692 * @rspiocb: Pointer to driver response iocb object.
10694 * The function is called from SLI ring event handler with no
10695 * lock held. This function is the completion handler for ELS commands
10696 * which are aborted. The function frees memory resources used for
10697 * the aborted ELS commands.
10700 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10701 struct lpfc_iocbq *rspiocb)
10703 IOCB_t *irsp = &rspiocb->iocb;
10705 /* ELS cmd tag <ulpIoTag> completes */
10706 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10707 "0139 Ignoring ELS cmd tag x%x completion Data: "
10709 irsp->ulpIoTag, irsp->ulpStatus,
10710 irsp->un.ulpWord[4], irsp->ulpTimeout);
10711 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
10712 lpfc_ct_free_iocb(phba, cmdiocb);
10714 lpfc_els_free_iocb(phba, cmdiocb);
10719 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
10720 * @phba: Pointer to HBA context object.
10721 * @pring: Pointer to driver SLI ring object.
10722 * @cmdiocb: Pointer to driver command iocb object.
10724 * This function issues an abort iocb for the provided command iocb down to
10725 * the port. Other than the case the outstanding command iocb is an abort
10726 * request, this function issues abort out unconditionally. This function is
10727 * called with hbalock held. The function returns 0 when it fails due to
10728 * memory allocation failure or when the command iocb is an abort request.
10731 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10732 struct lpfc_iocbq *cmdiocb)
10734 struct lpfc_vport *vport = cmdiocb->vport;
10735 struct lpfc_iocbq *abtsiocbp;
10736 IOCB_t *icmd = NULL;
10737 IOCB_t *iabt = NULL;
10739 unsigned long iflags;
10741 lockdep_assert_held(&phba->hbalock);
10744 * There are certain command types we don't want to abort. And we
10745 * don't want to abort commands that are already in the process of
10748 icmd = &cmdiocb->iocb;
10749 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10750 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10751 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10754 /* issue ABTS for this IOCB based on iotag */
10755 abtsiocbp = __lpfc_sli_get_iocbq(phba);
10756 if (abtsiocbp == NULL)
10759 /* This signals the response to set the correct status
10760 * before calling the completion handler
10762 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10764 iabt = &abtsiocbp->iocb;
10765 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
10766 iabt->un.acxri.abortContextTag = icmd->ulpContext;
10767 if (phba->sli_rev == LPFC_SLI_REV4) {
10768 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
10769 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
10772 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
10774 iabt->ulpClass = icmd->ulpClass;
10776 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10777 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
10778 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
10779 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
10780 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
10781 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
10783 if (phba->link_state >= LPFC_LINK_UP)
10784 iabt->ulpCommand = CMD_ABORT_XRI_CN;
10786 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
10788 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
10789 abtsiocbp->vport = vport;
10791 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
10792 "0339 Abort xri x%x, original iotag x%x, "
10793 "abort cmd iotag x%x\n",
10794 iabt->un.acxri.abortIoTag,
10795 iabt->un.acxri.abortContextTag,
10798 if (phba->sli_rev == LPFC_SLI_REV4) {
10799 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
10800 if (unlikely(pring == NULL))
10802 /* Note: both hbalock and ring_lock need to be set here */
10803 spin_lock_irqsave(&pring->ring_lock, iflags);
10804 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10806 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10808 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10813 __lpfc_sli_release_iocbq(phba, abtsiocbp);
10816 * Caller to this routine should check for IOCB_ERROR
10817 * and handle it properly. This routine no longer removes
10818 * iocb off txcmplq and call compl in case of IOCB_ERROR.
10824 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
10825 * @phba: Pointer to HBA context object.
10826 * @pring: Pointer to driver SLI ring object.
10827 * @cmdiocb: Pointer to driver command iocb object.
10829 * This function issues an abort iocb for the provided command iocb. In case
10830 * of unloading, the abort iocb will not be issued to commands on the ELS
10831 * ring. Instead, the callback function shall be changed to those commands
10832 * so that nothing happens when them finishes. This function is called with
10833 * hbalock held. The function returns 0 when the command iocb is an abort
10837 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10838 struct lpfc_iocbq *cmdiocb)
10840 struct lpfc_vport *vport = cmdiocb->vport;
10841 int retval = IOCB_ERROR;
10842 IOCB_t *icmd = NULL;
10844 lockdep_assert_held(&phba->hbalock);
10847 * There are certain command types we don't want to abort. And we
10848 * don't want to abort commands that are already in the process of
10851 icmd = &cmdiocb->iocb;
10852 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10853 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10854 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10858 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10859 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10861 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10862 goto abort_iotag_exit;
10866 * If we're unloading, don't abort iocb on the ELS ring, but change
10867 * the callback so that nothing happens when it finishes.
10869 if ((vport->load_flag & FC_UNLOADING) &&
10870 (pring->ringno == LPFC_ELS_RING)) {
10871 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10872 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10874 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10875 goto abort_iotag_exit;
10878 /* Now, we try to issue the abort to the cmdiocb out */
10879 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
10883 * Caller to this routine should check for IOCB_ERROR
10884 * and handle it properly. This routine no longer removes
10885 * iocb off txcmplq and call compl in case of IOCB_ERROR.
10891 * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
10892 * @phba: Pointer to HBA context object.
10893 * @pring: Pointer to driver SLI ring object.
10894 * @cmdiocb: Pointer to driver command iocb object.
10896 * This function issues an abort iocb for the provided command iocb down to
10897 * the port. Other than the case the outstanding command iocb is an abort
10898 * request, this function issues abort out unconditionally. This function is
10899 * called with hbalock held. The function returns 0 when it fails due to
10900 * memory allocation failure or when the command iocb is an abort request.
10903 lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10904 struct lpfc_iocbq *cmdiocb)
10906 struct lpfc_vport *vport = cmdiocb->vport;
10907 struct lpfc_iocbq *abtsiocbp;
10908 union lpfc_wqe *abts_wqe;
10912 * There are certain command types we don't want to abort. And we
10913 * don't want to abort commands that are already in the process of
10916 if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10917 cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
10918 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10921 /* issue ABTS for this io based on iotag */
10922 abtsiocbp = __lpfc_sli_get_iocbq(phba);
10923 if (abtsiocbp == NULL)
10926 /* This signals the response to set the correct status
10927 * before calling the completion handler
10929 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10931 /* Complete prepping the abort wqe and issue to the FW. */
10932 abts_wqe = &abtsiocbp->wqe;
10933 bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0);
10934 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
10936 /* Explicitly set reserved fields to zero.*/
10937 abts_wqe->abort_cmd.rsrvd4 = 0;
10938 abts_wqe->abort_cmd.rsrvd5 = 0;
10940 /* WQE Common - word 6. Context is XRI tag. Set 0. */
10941 bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0);
10942 bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0);
10945 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
10946 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
10947 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
10948 cmdiocb->iocb.ulpClass);
10950 /* word 8 - tell the FW to abort the IO associated with this
10951 * outstanding exchange ID.
10953 abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
10955 /* word 9 - this is the iotag for the abts_wqe completion. */
10956 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
10960 bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx);
10961 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
10962 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
10965 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
10966 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
10967 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10969 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10970 abtsiocbp->iocb_flag |= LPFC_IO_NVME;
10971 abtsiocbp->vport = vport;
10972 abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
10973 retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
10975 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
10976 "6147 Failed abts issue_wqe with status x%x "
10978 retval, cmdiocb->sli4_xritag);
10979 lpfc_sli_release_iocbq(phba, abtsiocbp);
10983 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
10984 "6148 Drv Abort NVME Request Issued for "
10985 "ox_id x%x on reqtag x%x\n",
10986 cmdiocb->sli4_xritag,
10993 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
10994 * @phba: pointer to lpfc HBA data structure.
10996 * This routine will abort all pending and outstanding iocbs to an HBA.
10999 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11001 struct lpfc_sli *psli = &phba->sli;
11002 struct lpfc_sli_ring *pring;
11003 struct lpfc_queue *qp = NULL;
11006 if (phba->sli_rev != LPFC_SLI_REV4) {
11007 for (i = 0; i < psli->num_rings; i++) {
11008 pring = &psli->sli3_ring[i];
11009 lpfc_sli_abort_iocb_ring(phba, pring);
11013 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11017 lpfc_sli_abort_iocb_ring(phba, pring);
11022 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11023 * @iocbq: Pointer to driver iocb object.
11024 * @vport: Pointer to driver virtual port object.
11025 * @tgt_id: SCSI ID of the target.
11026 * @lun_id: LUN ID of the scsi device.
11027 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11029 * This function acts as an iocb filter for functions which abort or count
11030 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11031 * 0 if the filtering criteria is met for the given iocb and will return
11032 * 1 if the filtering criteria is not met.
11033 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11034 * given iocb is for the SCSI device specified by vport, tgt_id and
11035 * lun_id parameter.
11036 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11037 * given iocb is for the SCSI target specified by vport and tgt_id
11039 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11040 * given iocb is for the SCSI host associated with the given vport.
11041 * This function is called with no locks held.
11044 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11045 uint16_t tgt_id, uint64_t lun_id,
11046 lpfc_ctx_cmd ctx_cmd)
11048 struct lpfc_scsi_buf *lpfc_cmd;
11051 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
11054 if (iocbq->vport != vport)
11057 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
11059 if (lpfc_cmd->pCmd == NULL)
11064 if ((lpfc_cmd->rdata->pnode) &&
11065 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11066 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11070 if ((lpfc_cmd->rdata->pnode) &&
11071 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11074 case LPFC_CTX_HOST:
11078 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11079 __func__, ctx_cmd);
11087 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11088 * @vport: Pointer to virtual port.
11089 * @tgt_id: SCSI ID of the target.
11090 * @lun_id: LUN ID of the scsi device.
11091 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11093 * This function returns number of FCP commands pending for the vport.
11094 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11095 * commands pending on the vport associated with SCSI device specified
11096 * by tgt_id and lun_id parameters.
11097 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11098 * commands pending on the vport associated with SCSI target specified
11099 * by tgt_id parameter.
11100 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11101 * commands pending on the vport.
11102 * This function returns the number of iocbs which satisfy the filter.
11103 * This function is called without any lock held.
11106 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11107 lpfc_ctx_cmd ctx_cmd)
11109 struct lpfc_hba *phba = vport->phba;
11110 struct lpfc_iocbq *iocbq;
11113 spin_lock_irq(&phba->hbalock);
11114 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11115 iocbq = phba->sli.iocbq_lookup[i];
11117 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11121 spin_unlock_irq(&phba->hbalock);
11127 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11128 * @phba: Pointer to HBA context object
11129 * @cmdiocb: Pointer to command iocb object.
11130 * @rspiocb: Pointer to response iocb object.
11132 * This function is called when an aborted FCP iocb completes. This
11133 * function is called by the ring event handler with no lock held.
11134 * This function frees the iocb.
11137 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11138 struct lpfc_iocbq *rspiocb)
11140 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11141 "3096 ABORT_XRI_CN completing on rpi x%x "
11142 "original iotag x%x, abort cmd iotag x%x "
11143 "status 0x%x, reason 0x%x\n",
11144 cmdiocb->iocb.un.acxri.abortContextTag,
11145 cmdiocb->iocb.un.acxri.abortIoTag,
11146 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11147 rspiocb->iocb.un.ulpWord[4]);
11148 lpfc_sli_release_iocbq(phba, cmdiocb);
11153 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11154 * @vport: Pointer to virtual port.
11155 * @pring: Pointer to driver SLI ring object.
11156 * @tgt_id: SCSI ID of the target.
11157 * @lun_id: LUN ID of the scsi device.
11158 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11160 * This function sends an abort command for every SCSI command
11161 * associated with the given virtual port pending on the ring
11162 * filtered by lpfc_sli_validate_fcp_iocb function.
11163 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11164 * FCP iocbs associated with lun specified by tgt_id and lun_id
11166 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11167 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11168 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11169 * FCP iocbs associated with virtual port.
11170 * This function returns number of iocbs it failed to abort.
11171 * This function is called with no locks held.
11174 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11175 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11177 struct lpfc_hba *phba = vport->phba;
11178 struct lpfc_iocbq *iocbq;
11179 struct lpfc_iocbq *abtsiocb;
11180 struct lpfc_sli_ring *pring_s4;
11181 IOCB_t *cmd = NULL;
11182 int errcnt = 0, ret_val = 0;
11185 for (i = 1; i <= phba->sli.last_iotag; i++) {
11186 iocbq = phba->sli.iocbq_lookup[i];
11188 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11193 * If the iocbq is already being aborted, don't take a second
11194 * action, but do count it.
11196 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11199 /* issue ABTS for this IOCB based on iotag */
11200 abtsiocb = lpfc_sli_get_iocbq(phba);
11201 if (abtsiocb == NULL) {
11206 /* indicate the IO is being aborted by the driver. */
11207 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11209 cmd = &iocbq->iocb;
11210 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11211 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11212 if (phba->sli_rev == LPFC_SLI_REV4)
11213 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11215 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11216 abtsiocb->iocb.ulpLe = 1;
11217 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11218 abtsiocb->vport = vport;
11220 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11221 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11222 if (iocbq->iocb_flag & LPFC_IO_FCP)
11223 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11224 if (iocbq->iocb_flag & LPFC_IO_FOF)
11225 abtsiocb->iocb_flag |= LPFC_IO_FOF;
11227 if (lpfc_is_link_up(phba))
11228 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11230 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11232 /* Setup callback routine and issue the command. */
11233 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11234 if (phba->sli_rev == LPFC_SLI_REV4) {
11235 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11238 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11241 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11243 if (ret_val == IOCB_ERROR) {
11244 lpfc_sli_release_iocbq(phba, abtsiocb);
11254 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11255 * @vport: Pointer to virtual port.
11256 * @pring: Pointer to driver SLI ring object.
11257 * @tgt_id: SCSI ID of the target.
11258 * @lun_id: LUN ID of the scsi device.
11259 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11261 * This function sends an abort command for every SCSI command
11262 * associated with the given virtual port pending on the ring
11263 * filtered by lpfc_sli_validate_fcp_iocb function.
11264 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11265 * FCP iocbs associated with lun specified by tgt_id and lun_id
11267 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11268 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11269 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11270 * FCP iocbs associated with virtual port.
11271 * This function returns number of iocbs it aborted .
11272 * This function is called with no locks held right after a taskmgmt
11276 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11277 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11279 struct lpfc_hba *phba = vport->phba;
11280 struct lpfc_scsi_buf *lpfc_cmd;
11281 struct lpfc_iocbq *abtsiocbq;
11282 struct lpfc_nodelist *ndlp;
11283 struct lpfc_iocbq *iocbq;
11285 int sum, i, ret_val;
11286 unsigned long iflags;
11287 struct lpfc_sli_ring *pring_s4;
11289 spin_lock_irq(&phba->hbalock);
11291 /* all I/Os are in process of being flushed */
11292 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
11293 spin_unlock_irq(&phba->hbalock);
11298 for (i = 1; i <= phba->sli.last_iotag; i++) {
11299 iocbq = phba->sli.iocbq_lookup[i];
11301 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11306 * If the iocbq is already being aborted, don't take a second
11307 * action, but do count it.
11309 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11312 /* issue ABTS for this IOCB based on iotag */
11313 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11314 if (abtsiocbq == NULL)
11317 icmd = &iocbq->iocb;
11318 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11319 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11320 if (phba->sli_rev == LPFC_SLI_REV4)
11321 abtsiocbq->iocb.un.acxri.abortIoTag =
11322 iocbq->sli4_xritag;
11324 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11325 abtsiocbq->iocb.ulpLe = 1;
11326 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11327 abtsiocbq->vport = vport;
11329 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11330 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11331 if (iocbq->iocb_flag & LPFC_IO_FCP)
11332 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11333 if (iocbq->iocb_flag & LPFC_IO_FOF)
11334 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11336 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
11337 ndlp = lpfc_cmd->rdata->pnode;
11339 if (lpfc_is_link_up(phba) &&
11340 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11341 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11343 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11345 /* Setup callback routine and issue the command. */
11346 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11349 * Indicate the IO is being aborted by the driver and set
11350 * the caller's flag into the aborted IO.
11352 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11354 if (phba->sli_rev == LPFC_SLI_REV4) {
11355 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11356 if (pring_s4 == NULL)
11358 /* Note: both hbalock and ring_lock must be set here */
11359 spin_lock_irqsave(&pring_s4->ring_lock, iflags);
11360 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11362 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
11364 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11369 if (ret_val == IOCB_ERROR)
11370 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11374 spin_unlock_irq(&phba->hbalock);
11379 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
11380 * @phba: Pointer to HBA context object.
11381 * @cmdiocbq: Pointer to command iocb.
11382 * @rspiocbq: Pointer to response iocb.
11384 * This function is the completion handler for iocbs issued using
11385 * lpfc_sli_issue_iocb_wait function. This function is called by the
11386 * ring event handler function without any lock held. This function
11387 * can be called from both worker thread context and interrupt
11388 * context. This function also can be called from other thread which
11389 * cleans up the SLI layer objects.
11390 * This function copy the contents of the response iocb to the
11391 * response iocb memory object provided by the caller of
11392 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11393 * sleeps for the iocb completion.
11396 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11397 struct lpfc_iocbq *cmdiocbq,
11398 struct lpfc_iocbq *rspiocbq)
11400 wait_queue_head_t *pdone_q;
11401 unsigned long iflags;
11402 struct lpfc_scsi_buf *lpfc_cmd;
11404 spin_lock_irqsave(&phba->hbalock, iflags);
11405 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11408 * A time out has occurred for the iocb. If a time out
11409 * completion handler has been supplied, call it. Otherwise,
11410 * just free the iocbq.
11413 spin_unlock_irqrestore(&phba->hbalock, iflags);
11414 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11415 cmdiocbq->wait_iocb_cmpl = NULL;
11416 if (cmdiocbq->iocb_cmpl)
11417 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11419 lpfc_sli_release_iocbq(phba, cmdiocbq);
11423 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11424 if (cmdiocbq->context2 && rspiocbq)
11425 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11426 &rspiocbq->iocb, sizeof(IOCB_t));
11428 /* Set the exchange busy flag for task management commands */
11429 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11430 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11431 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
11433 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11436 pdone_q = cmdiocbq->context_un.wait_queue;
11439 spin_unlock_irqrestore(&phba->hbalock, iflags);
11444 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11445 * @phba: Pointer to HBA context object..
11446 * @piocbq: Pointer to command iocb.
11447 * @flag: Flag to test.
11449 * This routine grabs the hbalock and then test the iocb_flag to
11450 * see if the passed in flag is set.
11452 * 1 if flag is set.
11453 * 0 if flag is not set.
11456 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11457 struct lpfc_iocbq *piocbq, uint32_t flag)
11459 unsigned long iflags;
11462 spin_lock_irqsave(&phba->hbalock, iflags);
11463 ret = piocbq->iocb_flag & flag;
11464 spin_unlock_irqrestore(&phba->hbalock, iflags);
11470 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
11471 * @phba: Pointer to HBA context object..
11472 * @pring: Pointer to sli ring.
11473 * @piocb: Pointer to command iocb.
11474 * @prspiocbq: Pointer to response iocb.
11475 * @timeout: Timeout in number of seconds.
11477 * This function issues the iocb to firmware and waits for the
11478 * iocb to complete. The iocb_cmpl field of the shall be used
11479 * to handle iocbs which time out. If the field is NULL, the
11480 * function shall free the iocbq structure. If more clean up is
11481 * needed, the caller is expected to provide a completion function
11482 * that will provide the needed clean up. If the iocb command is
11483 * not completed within timeout seconds, the function will either
11484 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11485 * completion function set in the iocb_cmpl field and then return
11486 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11487 * resources if this function returns IOCB_TIMEDOUT.
11488 * The function waits for the iocb completion using an
11489 * non-interruptible wait.
11490 * This function will sleep while waiting for iocb completion.
11491 * So, this function should not be called from any context which
11492 * does not allow sleeping. Due to the same reason, this function
11493 * cannot be called with interrupt disabled.
11494 * This function assumes that the iocb completions occur while
11495 * this function sleep. So, this function cannot be called from
11496 * the thread which process iocb completion for this ring.
11497 * This function clears the iocb_flag of the iocb object before
11498 * issuing the iocb and the iocb completion handler sets this
11499 * flag and wakes this thread when the iocb completes.
11500 * The contents of the response iocb will be copied to prspiocbq
11501 * by the completion handler when the command completes.
11502 * This function returns IOCB_SUCCESS when success.
11503 * This function is called with no lock held.
11506 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
11507 uint32_t ring_number,
11508 struct lpfc_iocbq *piocb,
11509 struct lpfc_iocbq *prspiocbq,
11512 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11513 long timeleft, timeout_req = 0;
11514 int retval = IOCB_SUCCESS;
11516 struct lpfc_iocbq *iocb;
11518 int txcmplq_cnt = 0;
11519 struct lpfc_sli_ring *pring;
11520 unsigned long iflags;
11521 bool iocb_completed = true;
11523 if (phba->sli_rev >= LPFC_SLI_REV4)
11524 pring = lpfc_sli4_calc_ring(phba, piocb);
11526 pring = &phba->sli.sli3_ring[ring_number];
11528 * If the caller has provided a response iocbq buffer, then context2
11529 * is NULL or its an error.
11532 if (piocb->context2)
11534 piocb->context2 = prspiocbq;
11537 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
11538 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11539 piocb->context_un.wait_queue = &done_q;
11540 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
11542 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11543 if (lpfc_readl(phba->HCregaddr, &creg_val))
11545 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11546 writel(creg_val, phba->HCregaddr);
11547 readl(phba->HCregaddr); /* flush */
11550 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11551 SLI_IOCB_RET_IOCB);
11552 if (retval == IOCB_SUCCESS) {
11553 timeout_req = msecs_to_jiffies(timeout * 1000);
11554 timeleft = wait_event_timeout(done_q,
11555 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
11557 spin_lock_irqsave(&phba->hbalock, iflags);
11558 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11561 * IOCB timed out. Inform the wake iocb wait
11562 * completion function and set local status
11565 iocb_completed = false;
11566 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11568 spin_unlock_irqrestore(&phba->hbalock, iflags);
11569 if (iocb_completed) {
11570 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11571 "0331 IOCB wake signaled\n");
11572 /* Note: we are not indicating if the IOCB has a success
11573 * status or not - that's for the caller to check.
11574 * IOCB_SUCCESS means just that the command was sent and
11575 * completed. Not that it completed successfully.
11577 } else if (timeleft == 0) {
11578 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11579 "0338 IOCB wait timeout error - no "
11580 "wake response Data x%x\n", timeout);
11581 retval = IOCB_TIMEDOUT;
11583 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11584 "0330 IOCB wake NOT set, "
11586 timeout, (timeleft / jiffies));
11587 retval = IOCB_TIMEDOUT;
11589 } else if (retval == IOCB_BUSY) {
11590 if (phba->cfg_log_verbose & LOG_SLI) {
11591 list_for_each_entry(iocb, &pring->txq, list) {
11594 list_for_each_entry(iocb, &pring->txcmplq, list) {
11597 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11598 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11599 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
11603 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11604 "0332 IOCB wait issue failed, Data x%x\n",
11606 retval = IOCB_ERROR;
11609 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11610 if (lpfc_readl(phba->HCregaddr, &creg_val))
11612 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
11613 writel(creg_val, phba->HCregaddr);
11614 readl(phba->HCregaddr); /* flush */
11618 piocb->context2 = NULL;
11620 piocb->context_un.wait_queue = NULL;
11621 piocb->iocb_cmpl = NULL;
11626 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
11627 * @phba: Pointer to HBA context object.
11628 * @pmboxq: Pointer to driver mailbox object.
11629 * @timeout: Timeout in number of seconds.
11631 * This function issues the mailbox to firmware and waits for the
11632 * mailbox command to complete. If the mailbox command is not
11633 * completed within timeout seconds, it returns MBX_TIMEOUT.
11634 * The function waits for the mailbox completion using an
11635 * interruptible wait. If the thread is woken up due to a
11636 * signal, MBX_TIMEOUT error is returned to the caller. Caller
11637 * should not free the mailbox resources, if this function returns
11639 * This function will sleep while waiting for mailbox completion.
11640 * So, this function should not be called from any context which
11641 * does not allow sleeping. Due to the same reason, this function
11642 * cannot be called with interrupt disabled.
11643 * This function assumes that the mailbox completion occurs while
11644 * this function sleep. So, this function cannot be called from
11645 * the worker thread which processes mailbox completion.
11646 * This function is called in the context of HBA management
11648 * This function returns MBX_SUCCESS when successful.
11649 * This function is called with no lock held.
11652 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
11655 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11656 MAILBOX_t *mb = NULL;
11658 unsigned long flag;
11660 /* The caller might set context1 for extended buffer */
11661 if (pmboxq->context1)
11662 mb = (MAILBOX_t *)pmboxq->context1;
11664 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
11665 /* setup wake call as IOCB callback */
11666 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
11667 /* setup context field to pass wait_queue pointer to wake function */
11668 pmboxq->context1 = &done_q;
11670 /* now issue the command */
11671 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
11672 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
11673 wait_event_interruptible_timeout(done_q,
11674 pmboxq->mbox_flag & LPFC_MBX_WAKE,
11675 msecs_to_jiffies(timeout * 1000));
11677 spin_lock_irqsave(&phba->hbalock, flag);
11678 /* restore the possible extended buffer for free resource */
11679 pmboxq->context1 = (uint8_t *)mb;
11681 * if LPFC_MBX_WAKE flag is set the mailbox is completed
11682 * else do not free the resources.
11684 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
11685 retval = MBX_SUCCESS;
11687 retval = MBX_TIMEOUT;
11688 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
11690 spin_unlock_irqrestore(&phba->hbalock, flag);
11692 /* restore the possible extended buffer for free resource */
11693 pmboxq->context1 = (uint8_t *)mb;
11700 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
11701 * @phba: Pointer to HBA context.
11703 * This function is called to shutdown the driver's mailbox sub-system.
11704 * It first marks the mailbox sub-system is in a block state to prevent
11705 * the asynchronous mailbox command from issued off the pending mailbox
11706 * command queue. If the mailbox command sub-system shutdown is due to
11707 * HBA error conditions such as EEH or ERATT, this routine shall invoke
11708 * the mailbox sub-system flush routine to forcefully bring down the
11709 * mailbox sub-system. Otherwise, if it is due to normal condition (such
11710 * as with offline or HBA function reset), this routine will wait for the
11711 * outstanding mailbox command to complete before invoking the mailbox
11712 * sub-system flush routine to gracefully bring down mailbox sub-system.
11715 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
11717 struct lpfc_sli *psli = &phba->sli;
11718 unsigned long timeout;
11720 if (mbx_action == LPFC_MBX_NO_WAIT) {
11721 /* delay 100ms for port state */
11723 lpfc_sli_mbox_sys_flush(phba);
11726 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
11728 spin_lock_irq(&phba->hbalock);
11729 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
11731 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
11732 /* Determine how long we might wait for the active mailbox
11733 * command to be gracefully completed by firmware.
11735 if (phba->sli.mbox_active)
11736 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
11737 phba->sli.mbox_active) *
11739 spin_unlock_irq(&phba->hbalock);
11741 while (phba->sli.mbox_active) {
11742 /* Check active mailbox complete status every 2ms */
11744 if (time_after(jiffies, timeout))
11745 /* Timeout, let the mailbox flush routine to
11746 * forcefully release active mailbox command
11751 spin_unlock_irq(&phba->hbalock);
11753 lpfc_sli_mbox_sys_flush(phba);
11757 * lpfc_sli_eratt_read - read sli-3 error attention events
11758 * @phba: Pointer to HBA context.
11760 * This function is called to read the SLI3 device error attention registers
11761 * for possible error attention events. The caller must hold the hostlock
11762 * with spin_lock_irq().
11764 * This function returns 1 when there is Error Attention in the Host Attention
11765 * Register and returns 0 otherwise.
11768 lpfc_sli_eratt_read(struct lpfc_hba *phba)
11772 /* Read chip Host Attention (HA) register */
11773 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11776 if (ha_copy & HA_ERATT) {
11777 /* Read host status register to retrieve error event */
11778 if (lpfc_sli_read_hs(phba))
11781 /* Check if there is a deferred error condition is active */
11782 if ((HS_FFER1 & phba->work_hs) &&
11783 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
11784 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
11785 phba->hba_flag |= DEFER_ERATT;
11786 /* Clear all interrupt enable conditions */
11787 writel(0, phba->HCregaddr);
11788 readl(phba->HCregaddr);
11791 /* Set the driver HA work bitmap */
11792 phba->work_ha |= HA_ERATT;
11793 /* Indicate polling handles this ERATT */
11794 phba->hba_flag |= HBA_ERATT_HANDLED;
11800 /* Set the driver HS work bitmap */
11801 phba->work_hs |= UNPLUG_ERR;
11802 /* Set the driver HA work bitmap */
11803 phba->work_ha |= HA_ERATT;
11804 /* Indicate polling handles this ERATT */
11805 phba->hba_flag |= HBA_ERATT_HANDLED;
11810 * lpfc_sli4_eratt_read - read sli-4 error attention events
11811 * @phba: Pointer to HBA context.
11813 * This function is called to read the SLI4 device error attention registers
11814 * for possible error attention events. The caller must hold the hostlock
11815 * with spin_lock_irq().
11817 * This function returns 1 when there is Error Attention in the Host Attention
11818 * Register and returns 0 otherwise.
11821 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
11823 uint32_t uerr_sta_hi, uerr_sta_lo;
11824 uint32_t if_type, portsmphr;
11825 struct lpfc_register portstat_reg;
11828 * For now, use the SLI4 device internal unrecoverable error
11829 * registers for error attention. This can be changed later.
11831 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11833 case LPFC_SLI_INTF_IF_TYPE_0:
11834 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
11836 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
11838 phba->work_hs |= UNPLUG_ERR;
11839 phba->work_ha |= HA_ERATT;
11840 phba->hba_flag |= HBA_ERATT_HANDLED;
11843 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
11844 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
11845 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11846 "1423 HBA Unrecoverable error: "
11847 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
11848 "ue_mask_lo_reg=0x%x, "
11849 "ue_mask_hi_reg=0x%x\n",
11850 uerr_sta_lo, uerr_sta_hi,
11851 phba->sli4_hba.ue_mask_lo,
11852 phba->sli4_hba.ue_mask_hi);
11853 phba->work_status[0] = uerr_sta_lo;
11854 phba->work_status[1] = uerr_sta_hi;
11855 phba->work_ha |= HA_ERATT;
11856 phba->hba_flag |= HBA_ERATT_HANDLED;
11860 case LPFC_SLI_INTF_IF_TYPE_2:
11861 case LPFC_SLI_INTF_IF_TYPE_6:
11862 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
11863 &portstat_reg.word0) ||
11864 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
11866 phba->work_hs |= UNPLUG_ERR;
11867 phba->work_ha |= HA_ERATT;
11868 phba->hba_flag |= HBA_ERATT_HANDLED;
11871 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
11872 phba->work_status[0] =
11873 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
11874 phba->work_status[1] =
11875 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
11876 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11877 "2885 Port Status Event: "
11878 "port status reg 0x%x, "
11879 "port smphr reg 0x%x, "
11880 "error 1=0x%x, error 2=0x%x\n",
11881 portstat_reg.word0,
11883 phba->work_status[0],
11884 phba->work_status[1]);
11885 phba->work_ha |= HA_ERATT;
11886 phba->hba_flag |= HBA_ERATT_HANDLED;
11890 case LPFC_SLI_INTF_IF_TYPE_1:
11892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11893 "2886 HBA Error Attention on unsupported "
11894 "if type %d.", if_type);
11902 * lpfc_sli_check_eratt - check error attention events
11903 * @phba: Pointer to HBA context.
11905 * This function is called from timer soft interrupt context to check HBA's
11906 * error attention register bit for error attention events.
11908 * This function returns 1 when there is Error Attention in the Host Attention
11909 * Register and returns 0 otherwise.
11912 lpfc_sli_check_eratt(struct lpfc_hba *phba)
11916 /* If somebody is waiting to handle an eratt, don't process it
11917 * here. The brdkill function will do this.
11919 if (phba->link_flag & LS_IGNORE_ERATT)
11922 /* Check if interrupt handler handles this ERATT */
11923 spin_lock_irq(&phba->hbalock);
11924 if (phba->hba_flag & HBA_ERATT_HANDLED) {
11925 /* Interrupt handler has handled ERATT */
11926 spin_unlock_irq(&phba->hbalock);
11931 * If there is deferred error attention, do not check for error
11934 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11935 spin_unlock_irq(&phba->hbalock);
11939 /* If PCI channel is offline, don't process it */
11940 if (unlikely(pci_channel_offline(phba->pcidev))) {
11941 spin_unlock_irq(&phba->hbalock);
11945 switch (phba->sli_rev) {
11946 case LPFC_SLI_REV2:
11947 case LPFC_SLI_REV3:
11948 /* Read chip Host Attention (HA) register */
11949 ha_copy = lpfc_sli_eratt_read(phba);
11951 case LPFC_SLI_REV4:
11952 /* Read device Uncoverable Error (UERR) registers */
11953 ha_copy = lpfc_sli4_eratt_read(phba);
11956 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11957 "0299 Invalid SLI revision (%d)\n",
11962 spin_unlock_irq(&phba->hbalock);
11968 * lpfc_intr_state_check - Check device state for interrupt handling
11969 * @phba: Pointer to HBA context.
11971 * This inline routine checks whether a device or its PCI slot is in a state
11972 * that the interrupt should be handled.
11974 * This function returns 0 if the device or the PCI slot is in a state that
11975 * interrupt should be handled, otherwise -EIO.
11978 lpfc_intr_state_check(struct lpfc_hba *phba)
11980 /* If the pci channel is offline, ignore all the interrupts */
11981 if (unlikely(pci_channel_offline(phba->pcidev)))
11984 /* Update device level interrupt statistics */
11985 phba->sli.slistat.sli_intr++;
11987 /* Ignore all interrupts during initialization. */
11988 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
11995 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
11996 * @irq: Interrupt number.
11997 * @dev_id: The device context pointer.
11999 * This function is directly called from the PCI layer as an interrupt
12000 * service routine when device with SLI-3 interface spec is enabled with
12001 * MSI-X multi-message interrupt mode and there are slow-path events in
12002 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12003 * interrupt mode, this function is called as part of the device-level
12004 * interrupt handler. When the PCI slot is in error recovery or the HBA
12005 * is undergoing initialization, the interrupt handler will not process
12006 * the interrupt. The link attention and ELS ring attention events are
12007 * handled by the worker thread. The interrupt handler signals the worker
12008 * thread and returns for these events. This function is called without
12009 * any lock held. It gets the hbalock to access and update SLI data
12012 * This function returns IRQ_HANDLED when interrupt is handled else it
12013 * returns IRQ_NONE.
12016 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12018 struct lpfc_hba *phba;
12019 uint32_t ha_copy, hc_copy;
12020 uint32_t work_ha_copy;
12021 unsigned long status;
12022 unsigned long iflag;
12025 MAILBOX_t *mbox, *pmbox;
12026 struct lpfc_vport *vport;
12027 struct lpfc_nodelist *ndlp;
12028 struct lpfc_dmabuf *mp;
12033 * Get the driver's phba structure from the dev_id and
12034 * assume the HBA is not interrupting.
12036 phba = (struct lpfc_hba *)dev_id;
12038 if (unlikely(!phba))
12042 * Stuff needs to be attented to when this function is invoked as an
12043 * individual interrupt handler in MSI-X multi-message interrupt mode
12045 if (phba->intr_type == MSIX) {
12046 /* Check device state for handling interrupt */
12047 if (lpfc_intr_state_check(phba))
12049 /* Need to read HA REG for slow-path events */
12050 spin_lock_irqsave(&phba->hbalock, iflag);
12051 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12053 /* If somebody is waiting to handle an eratt don't process it
12054 * here. The brdkill function will do this.
12056 if (phba->link_flag & LS_IGNORE_ERATT)
12057 ha_copy &= ~HA_ERATT;
12058 /* Check the need for handling ERATT in interrupt handler */
12059 if (ha_copy & HA_ERATT) {
12060 if (phba->hba_flag & HBA_ERATT_HANDLED)
12061 /* ERATT polling has handled ERATT */
12062 ha_copy &= ~HA_ERATT;
12064 /* Indicate interrupt handler handles ERATT */
12065 phba->hba_flag |= HBA_ERATT_HANDLED;
12069 * If there is deferred error attention, do not check for any
12072 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12073 spin_unlock_irqrestore(&phba->hbalock, iflag);
12077 /* Clear up only attention source related to slow-path */
12078 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12081 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12082 HC_LAINT_ENA | HC_ERINT_ENA),
12084 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12086 writel(hc_copy, phba->HCregaddr);
12087 readl(phba->HAregaddr); /* flush */
12088 spin_unlock_irqrestore(&phba->hbalock, iflag);
12090 ha_copy = phba->ha_copy;
12092 work_ha_copy = ha_copy & phba->work_ha_mask;
12094 if (work_ha_copy) {
12095 if (work_ha_copy & HA_LATT) {
12096 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12098 * Turn off Link Attention interrupts
12099 * until CLEAR_LA done
12101 spin_lock_irqsave(&phba->hbalock, iflag);
12102 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12103 if (lpfc_readl(phba->HCregaddr, &control))
12105 control &= ~HC_LAINT_ENA;
12106 writel(control, phba->HCregaddr);
12107 readl(phba->HCregaddr); /* flush */
12108 spin_unlock_irqrestore(&phba->hbalock, iflag);
12111 work_ha_copy &= ~HA_LATT;
12114 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12116 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12117 * the only slow ring.
12119 status = (work_ha_copy &
12120 (HA_RXMASK << (4*LPFC_ELS_RING)));
12121 status >>= (4*LPFC_ELS_RING);
12122 if (status & HA_RXMASK) {
12123 spin_lock_irqsave(&phba->hbalock, iflag);
12124 if (lpfc_readl(phba->HCregaddr, &control))
12127 lpfc_debugfs_slow_ring_trc(phba,
12128 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12130 (uint32_t)phba->sli.slistat.sli_intr);
12132 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12133 lpfc_debugfs_slow_ring_trc(phba,
12134 "ISR Disable ring:"
12135 "pwork:x%x hawork:x%x wait:x%x",
12136 phba->work_ha, work_ha_copy,
12137 (uint32_t)((unsigned long)
12138 &phba->work_waitq));
12141 ~(HC_R0INT_ENA << LPFC_ELS_RING);
12142 writel(control, phba->HCregaddr);
12143 readl(phba->HCregaddr); /* flush */
12146 lpfc_debugfs_slow_ring_trc(phba,
12147 "ISR slow ring: pwork:"
12148 "x%x hawork:x%x wait:x%x",
12149 phba->work_ha, work_ha_copy,
12150 (uint32_t)((unsigned long)
12151 &phba->work_waitq));
12153 spin_unlock_irqrestore(&phba->hbalock, iflag);
12156 spin_lock_irqsave(&phba->hbalock, iflag);
12157 if (work_ha_copy & HA_ERATT) {
12158 if (lpfc_sli_read_hs(phba))
12161 * Check if there is a deferred error condition
12164 if ((HS_FFER1 & phba->work_hs) &&
12165 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12166 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12168 phba->hba_flag |= DEFER_ERATT;
12169 /* Clear all interrupt enable conditions */
12170 writel(0, phba->HCregaddr);
12171 readl(phba->HCregaddr);
12175 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12176 pmb = phba->sli.mbox_active;
12177 pmbox = &pmb->u.mb;
12179 vport = pmb->vport;
12181 /* First check out the status word */
12182 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12183 if (pmbox->mbxOwner != OWN_HOST) {
12184 spin_unlock_irqrestore(&phba->hbalock, iflag);
12186 * Stray Mailbox Interrupt, mbxCommand <cmd>
12187 * mbxStatus <status>
12189 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12191 "(%d):0304 Stray Mailbox "
12192 "Interrupt mbxCommand x%x "
12194 (vport ? vport->vpi : 0),
12197 /* clear mailbox attention bit */
12198 work_ha_copy &= ~HA_MBATT;
12200 phba->sli.mbox_active = NULL;
12201 spin_unlock_irqrestore(&phba->hbalock, iflag);
12202 phba->last_completion_time = jiffies;
12203 del_timer(&phba->sli.mbox_tmo);
12204 if (pmb->mbox_cmpl) {
12205 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12207 if (pmb->out_ext_byte_len &&
12209 lpfc_sli_pcimem_bcopy(
12212 pmb->out_ext_byte_len);
12214 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12215 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12217 lpfc_debugfs_disc_trc(vport,
12218 LPFC_DISC_TRC_MBOX_VPORT,
12219 "MBOX dflt rpi: : "
12220 "status:x%x rpi:x%x",
12221 (uint32_t)pmbox->mbxStatus,
12222 pmbox->un.varWords[0], 0);
12224 if (!pmbox->mbxStatus) {
12225 mp = (struct lpfc_dmabuf *)
12227 ndlp = (struct lpfc_nodelist *)
12230 /* Reg_LOGIN of dflt RPI was
12231 * successful. new lets get
12232 * rid of the RPI using the
12233 * same mbox buffer.
12235 lpfc_unreg_login(phba,
12237 pmbox->un.varWords[0],
12240 lpfc_mbx_cmpl_dflt_rpi;
12241 pmb->context1 = mp;
12242 pmb->context2 = ndlp;
12243 pmb->vport = vport;
12244 rc = lpfc_sli_issue_mbox(phba,
12247 if (rc != MBX_BUSY)
12248 lpfc_printf_log(phba,
12250 LOG_MBOX | LOG_SLI,
12251 "0350 rc should have"
12252 "been MBX_BUSY\n");
12253 if (rc != MBX_NOT_FINISHED)
12254 goto send_current_mbox;
12258 &phba->pport->work_port_lock,
12260 phba->pport->work_port_events &=
12262 spin_unlock_irqrestore(
12263 &phba->pport->work_port_lock,
12265 lpfc_mbox_cmpl_put(phba, pmb);
12268 spin_unlock_irqrestore(&phba->hbalock, iflag);
12270 if ((work_ha_copy & HA_MBATT) &&
12271 (phba->sli.mbox_active == NULL)) {
12273 /* Process next mailbox command if there is one */
12275 rc = lpfc_sli_issue_mbox(phba, NULL,
12277 } while (rc == MBX_NOT_FINISHED);
12278 if (rc != MBX_SUCCESS)
12279 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12280 LOG_SLI, "0349 rc should be "
12284 spin_lock_irqsave(&phba->hbalock, iflag);
12285 phba->work_ha |= work_ha_copy;
12286 spin_unlock_irqrestore(&phba->hbalock, iflag);
12287 lpfc_worker_wake_up(phba);
12289 return IRQ_HANDLED;
12291 spin_unlock_irqrestore(&phba->hbalock, iflag);
12292 return IRQ_HANDLED;
12294 } /* lpfc_sli_sp_intr_handler */
12297 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
12298 * @irq: Interrupt number.
12299 * @dev_id: The device context pointer.
12301 * This function is directly called from the PCI layer as an interrupt
12302 * service routine when device with SLI-3 interface spec is enabled with
12303 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12304 * ring event in the HBA. However, when the device is enabled with either
12305 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12306 * device-level interrupt handler. When the PCI slot is in error recovery
12307 * or the HBA is undergoing initialization, the interrupt handler will not
12308 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12309 * the intrrupt context. This function is called without any lock held.
12310 * It gets the hbalock to access and update SLI data structures.
12312 * This function returns IRQ_HANDLED when interrupt is handled else it
12313 * returns IRQ_NONE.
12316 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12318 struct lpfc_hba *phba;
12320 unsigned long status;
12321 unsigned long iflag;
12322 struct lpfc_sli_ring *pring;
12324 /* Get the driver's phba structure from the dev_id and
12325 * assume the HBA is not interrupting.
12327 phba = (struct lpfc_hba *) dev_id;
12329 if (unlikely(!phba))
12333 * Stuff needs to be attented to when this function is invoked as an
12334 * individual interrupt handler in MSI-X multi-message interrupt mode
12336 if (phba->intr_type == MSIX) {
12337 /* Check device state for handling interrupt */
12338 if (lpfc_intr_state_check(phba))
12340 /* Need to read HA REG for FCP ring and other ring events */
12341 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12342 return IRQ_HANDLED;
12343 /* Clear up only attention source related to fast-path */
12344 spin_lock_irqsave(&phba->hbalock, iflag);
12346 * If there is deferred error attention, do not check for
12349 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12350 spin_unlock_irqrestore(&phba->hbalock, iflag);
12353 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12355 readl(phba->HAregaddr); /* flush */
12356 spin_unlock_irqrestore(&phba->hbalock, iflag);
12358 ha_copy = phba->ha_copy;
12361 * Process all events on FCP ring. Take the optimized path for FCP IO.
12363 ha_copy &= ~(phba->work_ha_mask);
12365 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12366 status >>= (4*LPFC_FCP_RING);
12367 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12368 if (status & HA_RXMASK)
12369 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12371 if (phba->cfg_multi_ring_support == 2) {
12373 * Process all events on extra ring. Take the optimized path
12374 * for extra ring IO.
12376 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12377 status >>= (4*LPFC_EXTRA_RING);
12378 if (status & HA_RXMASK) {
12379 lpfc_sli_handle_fast_ring_event(phba,
12380 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12384 return IRQ_HANDLED;
12385 } /* lpfc_sli_fp_intr_handler */
12388 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
12389 * @irq: Interrupt number.
12390 * @dev_id: The device context pointer.
12392 * This function is the HBA device-level interrupt handler to device with
12393 * SLI-3 interface spec, called from the PCI layer when either MSI or
12394 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12395 * requires driver attention. This function invokes the slow-path interrupt
12396 * attention handling function and fast-path interrupt attention handling
12397 * function in turn to process the relevant HBA attention events. This
12398 * function is called without any lock held. It gets the hbalock to access
12399 * and update SLI data structures.
12401 * This function returns IRQ_HANDLED when interrupt is handled, else it
12402 * returns IRQ_NONE.
12405 lpfc_sli_intr_handler(int irq, void *dev_id)
12407 struct lpfc_hba *phba;
12408 irqreturn_t sp_irq_rc, fp_irq_rc;
12409 unsigned long status1, status2;
12413 * Get the driver's phba structure from the dev_id and
12414 * assume the HBA is not interrupting.
12416 phba = (struct lpfc_hba *) dev_id;
12418 if (unlikely(!phba))
12421 /* Check device state for handling interrupt */
12422 if (lpfc_intr_state_check(phba))
12425 spin_lock(&phba->hbalock);
12426 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12427 spin_unlock(&phba->hbalock);
12428 return IRQ_HANDLED;
12431 if (unlikely(!phba->ha_copy)) {
12432 spin_unlock(&phba->hbalock);
12434 } else if (phba->ha_copy & HA_ERATT) {
12435 if (phba->hba_flag & HBA_ERATT_HANDLED)
12436 /* ERATT polling has handled ERATT */
12437 phba->ha_copy &= ~HA_ERATT;
12439 /* Indicate interrupt handler handles ERATT */
12440 phba->hba_flag |= HBA_ERATT_HANDLED;
12444 * If there is deferred error attention, do not check for any interrupt.
12446 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12447 spin_unlock(&phba->hbalock);
12451 /* Clear attention sources except link and error attentions */
12452 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12453 spin_unlock(&phba->hbalock);
12454 return IRQ_HANDLED;
12456 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12457 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12459 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
12460 writel(hc_copy, phba->HCregaddr);
12461 readl(phba->HAregaddr); /* flush */
12462 spin_unlock(&phba->hbalock);
12465 * Invokes slow-path host attention interrupt handling as appropriate.
12468 /* status of events with mailbox and link attention */
12469 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12471 /* status of events with ELS ring */
12472 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12473 status2 >>= (4*LPFC_ELS_RING);
12475 if (status1 || (status2 & HA_RXMASK))
12476 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
12478 sp_irq_rc = IRQ_NONE;
12481 * Invoke fast-path host attention interrupt handling as appropriate.
12484 /* status of events with FCP ring */
12485 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12486 status1 >>= (4*LPFC_FCP_RING);
12488 /* status of events with extra ring */
12489 if (phba->cfg_multi_ring_support == 2) {
12490 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12491 status2 >>= (4*LPFC_EXTRA_RING);
12495 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
12496 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
12498 fp_irq_rc = IRQ_NONE;
12500 /* Return device-level interrupt handling status */
12501 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
12502 } /* lpfc_sli_intr_handler */
12505 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
12506 * @phba: pointer to lpfc hba data structure.
12508 * This routine is invoked by the worker thread to process all the pending
12509 * SLI4 FCP abort XRI events.
12511 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
12513 struct lpfc_cq_event *cq_event;
12515 /* First, declare the fcp xri abort event has been handled */
12516 spin_lock_irq(&phba->hbalock);
12517 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
12518 spin_unlock_irq(&phba->hbalock);
12519 /* Now, handle all the fcp xri abort events */
12520 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
12521 /* Get the first event from the head of the event queue */
12522 spin_lock_irq(&phba->hbalock);
12523 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
12524 cq_event, struct lpfc_cq_event, list);
12525 spin_unlock_irq(&phba->hbalock);
12526 /* Notify aborted XRI for FCP work queue */
12527 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12528 /* Free the event processed back to the free pool */
12529 lpfc_sli4_cq_event_release(phba, cq_event);
12534 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12535 * @phba: pointer to lpfc hba data structure.
12537 * This routine is invoked by the worker thread to process all the pending
12538 * SLI4 els abort xri events.
12540 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12542 struct lpfc_cq_event *cq_event;
12544 /* First, declare the els xri abort event has been handled */
12545 spin_lock_irq(&phba->hbalock);
12546 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12547 spin_unlock_irq(&phba->hbalock);
12548 /* Now, handle all the els xri abort events */
12549 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12550 /* Get the first event from the head of the event queue */
12551 spin_lock_irq(&phba->hbalock);
12552 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12553 cq_event, struct lpfc_cq_event, list);
12554 spin_unlock_irq(&phba->hbalock);
12555 /* Notify aborted XRI for ELS work queue */
12556 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12557 /* Free the event processed back to the free pool */
12558 lpfc_sli4_cq_event_release(phba, cq_event);
12563 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12564 * @phba: pointer to lpfc hba data structure
12565 * @pIocbIn: pointer to the rspiocbq
12566 * @pIocbOut: pointer to the cmdiocbq
12567 * @wcqe: pointer to the complete wcqe
12569 * This routine transfers the fields of a command iocbq to a response iocbq
12570 * by copying all the IOCB fields from command iocbq and transferring the
12571 * completion status information from the complete wcqe.
12574 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12575 struct lpfc_iocbq *pIocbIn,
12576 struct lpfc_iocbq *pIocbOut,
12577 struct lpfc_wcqe_complete *wcqe)
12580 unsigned long iflags;
12581 uint32_t status, max_response;
12582 struct lpfc_dmabuf *dmabuf;
12583 struct ulp_bde64 *bpl, bde;
12584 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12586 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12587 sizeof(struct lpfc_iocbq) - offset);
12588 /* Map WCQE parameters into irspiocb parameters */
12589 status = bf_get(lpfc_wcqe_c_status, wcqe);
12590 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
12591 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12592 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12593 pIocbIn->iocb.un.fcpi.fcpi_parm =
12594 pIocbOut->iocb.un.fcpi.fcpi_parm -
12595 wcqe->total_data_placed;
12597 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12599 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12600 switch (pIocbOut->iocb.ulpCommand) {
12601 case CMD_ELS_REQUEST64_CR:
12602 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12603 bpl = (struct ulp_bde64 *)dmabuf->virt;
12604 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12605 max_response = bde.tus.f.bdeSize;
12607 case CMD_GEN_REQUEST64_CR:
12609 if (!pIocbOut->context3)
12611 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12612 sizeof(struct ulp_bde64);
12613 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12614 bpl = (struct ulp_bde64 *)dmabuf->virt;
12615 for (i = 0; i < numBdes; i++) {
12616 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12617 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12618 max_response += bde.tus.f.bdeSize;
12622 max_response = wcqe->total_data_placed;
12625 if (max_response < wcqe->total_data_placed)
12626 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12628 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12629 wcqe->total_data_placed;
12632 /* Convert BG errors for completion status */
12633 if (status == CQE_STATUS_DI_ERROR) {
12634 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
12636 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
12637 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
12639 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
12641 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
12642 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
12643 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12644 BGS_GUARD_ERR_MASK;
12645 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
12646 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12647 BGS_APPTAG_ERR_MASK;
12648 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
12649 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12650 BGS_REFTAG_ERR_MASK;
12652 /* Check to see if there was any good data before the error */
12653 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
12654 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12655 BGS_HI_WATER_MARK_PRESENT_MASK;
12656 pIocbIn->iocb.unsli3.sli3_bg.bghm =
12657 wcqe->total_data_placed;
12661 * Set ALL the error bits to indicate we don't know what
12662 * type of error it is.
12664 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
12665 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12666 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
12667 BGS_GUARD_ERR_MASK);
12670 /* Pick up HBA exchange busy condition */
12671 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
12672 spin_lock_irqsave(&phba->hbalock, iflags);
12673 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
12674 spin_unlock_irqrestore(&phba->hbalock, iflags);
12679 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
12680 * @phba: Pointer to HBA context object.
12681 * @wcqe: Pointer to work-queue completion queue entry.
12683 * This routine handles an ELS work-queue completion event and construct
12684 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
12685 * discovery engine to handle.
12687 * Return: Pointer to the receive IOCBQ, NULL otherwise.
12689 static struct lpfc_iocbq *
12690 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
12691 struct lpfc_iocbq *irspiocbq)
12693 struct lpfc_sli_ring *pring;
12694 struct lpfc_iocbq *cmdiocbq;
12695 struct lpfc_wcqe_complete *wcqe;
12696 unsigned long iflags;
12698 pring = lpfc_phba_elsring(phba);
12699 if (unlikely(!pring))
12702 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
12703 spin_lock_irqsave(&pring->ring_lock, iflags);
12704 pring->stats.iocb_event++;
12705 /* Look up the ELS command IOCB and create pseudo response IOCB */
12706 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12707 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12708 if (unlikely(!cmdiocbq)) {
12709 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12710 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12711 "0386 ELS complete with no corresponding "
12712 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
12713 wcqe->word0, wcqe->total_data_placed,
12714 wcqe->parameter, wcqe->word3);
12715 lpfc_sli_release_iocbq(phba, irspiocbq);
12719 /* Put the iocb back on the txcmplq */
12720 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
12721 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12723 /* Fake the irspiocbq and copy necessary response information */
12724 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
12729 inline struct lpfc_cq_event *
12730 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
12732 struct lpfc_cq_event *cq_event;
12734 /* Allocate a new internal CQ_EVENT entry */
12735 cq_event = lpfc_sli4_cq_event_alloc(phba);
12737 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12738 "0602 Failed to alloc CQ_EVENT entry\n");
12742 /* Move the CQE into the event */
12743 memcpy(&cq_event->cqe, entry, size);
12748 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
12749 * @phba: Pointer to HBA context object.
12750 * @cqe: Pointer to mailbox completion queue entry.
12752 * This routine process a mailbox completion queue entry with asynchrous
12755 * Return: true if work posted to worker thread, otherwise false.
12758 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12760 struct lpfc_cq_event *cq_event;
12761 unsigned long iflags;
12763 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12764 "0392 Async Event: word0:x%x, word1:x%x, "
12765 "word2:x%x, word3:x%x\n", mcqe->word0,
12766 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
12768 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
12771 spin_lock_irqsave(&phba->hbalock, iflags);
12772 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
12773 /* Set the async event flag */
12774 phba->hba_flag |= ASYNC_EVENT;
12775 spin_unlock_irqrestore(&phba->hbalock, iflags);
12781 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
12782 * @phba: Pointer to HBA context object.
12783 * @cqe: Pointer to mailbox completion queue entry.
12785 * This routine process a mailbox completion queue entry with mailbox
12786 * completion event.
12788 * Return: true if work posted to worker thread, otherwise false.
12791 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12793 uint32_t mcqe_status;
12794 MAILBOX_t *mbox, *pmbox;
12795 struct lpfc_mqe *mqe;
12796 struct lpfc_vport *vport;
12797 struct lpfc_nodelist *ndlp;
12798 struct lpfc_dmabuf *mp;
12799 unsigned long iflags;
12801 bool workposted = false;
12804 /* If not a mailbox complete MCQE, out by checking mailbox consume */
12805 if (!bf_get(lpfc_trailer_completed, mcqe))
12806 goto out_no_mqe_complete;
12808 /* Get the reference to the active mbox command */
12809 spin_lock_irqsave(&phba->hbalock, iflags);
12810 pmb = phba->sli.mbox_active;
12811 if (unlikely(!pmb)) {
12812 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
12813 "1832 No pending MBOX command to handle\n");
12814 spin_unlock_irqrestore(&phba->hbalock, iflags);
12815 goto out_no_mqe_complete;
12817 spin_unlock_irqrestore(&phba->hbalock, iflags);
12819 pmbox = (MAILBOX_t *)&pmb->u.mqe;
12821 vport = pmb->vport;
12823 /* Reset heartbeat timer */
12824 phba->last_completion_time = jiffies;
12825 del_timer(&phba->sli.mbox_tmo);
12827 /* Move mbox data to caller's mailbox region, do endian swapping */
12828 if (pmb->mbox_cmpl && mbox)
12829 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
12832 * For mcqe errors, conditionally move a modified error code to
12833 * the mbox so that the error will not be missed.
12835 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
12836 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
12837 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
12838 bf_set(lpfc_mqe_status, mqe,
12839 (LPFC_MBX_ERROR_RANGE | mcqe_status));
12841 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12842 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12843 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
12844 "MBOX dflt rpi: status:x%x rpi:x%x",
12846 pmbox->un.varWords[0], 0);
12847 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
12848 mp = (struct lpfc_dmabuf *)(pmb->context1);
12849 ndlp = (struct lpfc_nodelist *)pmb->context2;
12850 /* Reg_LOGIN of dflt RPI was successful. Now lets get
12851 * RID of the PPI using the same mbox buffer.
12853 lpfc_unreg_login(phba, vport->vpi,
12854 pmbox->un.varWords[0], pmb);
12855 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
12856 pmb->context1 = mp;
12857 pmb->context2 = ndlp;
12858 pmb->vport = vport;
12859 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
12860 if (rc != MBX_BUSY)
12861 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12862 LOG_SLI, "0385 rc should "
12863 "have been MBX_BUSY\n");
12864 if (rc != MBX_NOT_FINISHED)
12865 goto send_current_mbox;
12868 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
12869 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12870 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
12872 /* There is mailbox completion work to do */
12873 spin_lock_irqsave(&phba->hbalock, iflags);
12874 __lpfc_mbox_cmpl_put(phba, pmb);
12875 phba->work_ha |= HA_MBATT;
12876 spin_unlock_irqrestore(&phba->hbalock, iflags);
12880 spin_lock_irqsave(&phba->hbalock, iflags);
12881 /* Release the mailbox command posting token */
12882 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
12883 /* Setting active mailbox pointer need to be in sync to flag clear */
12884 phba->sli.mbox_active = NULL;
12885 spin_unlock_irqrestore(&phba->hbalock, iflags);
12886 /* Wake up worker thread to post the next pending mailbox command */
12887 lpfc_worker_wake_up(phba);
12888 out_no_mqe_complete:
12889 if (bf_get(lpfc_trailer_consumed, mcqe))
12890 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
12895 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
12896 * @phba: Pointer to HBA context object.
12897 * @cqe: Pointer to mailbox completion queue entry.
12899 * This routine process a mailbox completion queue entry, it invokes the
12900 * proper mailbox complete handling or asynchrous event handling routine
12901 * according to the MCQE's async bit.
12903 * Return: true if work posted to worker thread, otherwise false.
12906 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
12908 struct lpfc_mcqe mcqe;
12911 /* Copy the mailbox MCQE and convert endian order as needed */
12912 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
12914 /* Invoke the proper event handling routine */
12915 if (!bf_get(lpfc_trailer_async, &mcqe))
12916 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
12918 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
12923 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
12924 * @phba: Pointer to HBA context object.
12925 * @cq: Pointer to associated CQ
12926 * @wcqe: Pointer to work-queue completion queue entry.
12928 * This routine handles an ELS work-queue completion event.
12930 * Return: true if work posted to worker thread, otherwise false.
12933 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12934 struct lpfc_wcqe_complete *wcqe)
12936 struct lpfc_iocbq *irspiocbq;
12937 unsigned long iflags;
12938 struct lpfc_sli_ring *pring = cq->pring;
12940 int txcmplq_cnt = 0;
12941 int fcp_txcmplq_cnt = 0;
12943 /* Get an irspiocbq for later ELS response processing use */
12944 irspiocbq = lpfc_sli_get_iocbq(phba);
12946 if (!list_empty(&pring->txq))
12948 if (!list_empty(&pring->txcmplq))
12950 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12951 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
12952 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
12953 txq_cnt, phba->iocb_cnt,
12959 /* Save off the slow-path queue event for work thread to process */
12960 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
12961 spin_lock_irqsave(&phba->hbalock, iflags);
12962 list_add_tail(&irspiocbq->cq_event.list,
12963 &phba->sli4_hba.sp_queue_event);
12964 phba->hba_flag |= HBA_SP_QUEUE_EVT;
12965 spin_unlock_irqrestore(&phba->hbalock, iflags);
12971 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
12972 * @phba: Pointer to HBA context object.
12973 * @wcqe: Pointer to work-queue completion queue entry.
12975 * This routine handles slow-path WQ entry consumed event by invoking the
12976 * proper WQ release routine to the slow-path WQ.
12979 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
12980 struct lpfc_wcqe_release *wcqe)
12982 /* sanity check on queue memory */
12983 if (unlikely(!phba->sli4_hba.els_wq))
12985 /* Check for the slow-path ELS work queue */
12986 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
12987 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
12988 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
12990 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12991 "2579 Slow-path wqe consume event carries "
12992 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
12993 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
12994 phba->sli4_hba.els_wq->queue_id);
12998 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
12999 * @phba: Pointer to HBA context object.
13000 * @cq: Pointer to a WQ completion queue.
13001 * @wcqe: Pointer to work-queue completion queue entry.
13003 * This routine handles an XRI abort event.
13005 * Return: true if work posted to worker thread, otherwise false.
13008 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13009 struct lpfc_queue *cq,
13010 struct sli4_wcqe_xri_aborted *wcqe)
13012 bool workposted = false;
13013 struct lpfc_cq_event *cq_event;
13014 unsigned long iflags;
13016 switch (cq->subtype) {
13018 cq_event = lpfc_cq_event_setup(
13019 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13022 spin_lock_irqsave(&phba->hbalock, iflags);
13023 list_add_tail(&cq_event->list,
13024 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
13025 /* Set the fcp xri abort event flag */
13026 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
13027 spin_unlock_irqrestore(&phba->hbalock, iflags);
13030 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
13032 cq_event = lpfc_cq_event_setup(
13033 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13036 spin_lock_irqsave(&phba->hbalock, iflags);
13037 list_add_tail(&cq_event->list,
13038 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13039 /* Set the els xri abort event flag */
13040 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13041 spin_unlock_irqrestore(&phba->hbalock, iflags);
13045 /* Notify aborted XRI for NVME work queue */
13046 if (phba->nvmet_support)
13047 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13049 lpfc_sli4_nvme_xri_aborted(phba, wcqe);
13051 workposted = false;
13054 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13055 "0603 Invalid CQ subtype %d: "
13056 "%08x %08x %08x %08x\n",
13057 cq->subtype, wcqe->word0, wcqe->parameter,
13058 wcqe->word2, wcqe->word3);
13059 workposted = false;
13066 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13067 * @phba: Pointer to HBA context object.
13068 * @rcqe: Pointer to receive-queue completion queue entry.
13070 * This routine process a receive-queue completion queue entry.
13072 * Return: true if work posted to worker thread, otherwise false.
13075 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13077 bool workposted = false;
13078 struct fc_frame_header *fc_hdr;
13079 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13080 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13081 struct lpfc_nvmet_tgtport *tgtp;
13082 struct hbq_dmabuf *dma_buf;
13083 uint32_t status, rq_id;
13084 unsigned long iflags;
13086 /* sanity check on queue memory */
13087 if (unlikely(!hrq) || unlikely(!drq))
13090 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13091 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13093 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13094 if (rq_id != hrq->queue_id)
13097 status = bf_get(lpfc_rcqe_status, rcqe);
13099 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13100 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13101 "2537 Receive Frame Truncated!!\n");
13102 case FC_STATUS_RQ_SUCCESS:
13103 spin_lock_irqsave(&phba->hbalock, iflags);
13104 lpfc_sli4_rq_release(hrq, drq);
13105 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13107 hrq->RQ_no_buf_found++;
13108 spin_unlock_irqrestore(&phba->hbalock, iflags);
13112 hrq->RQ_buf_posted--;
13113 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13115 /* If a NVME LS event (type 0x28), treat it as Fast path */
13116 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13118 /* save off the frame for the word thread to process */
13119 list_add_tail(&dma_buf->cq_event.list,
13120 &phba->sli4_hba.sp_queue_event);
13121 /* Frame received */
13122 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13123 spin_unlock_irqrestore(&phba->hbalock, iflags);
13126 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13127 if (phba->nvmet_support) {
13128 tgtp = phba->targetport->private;
13129 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13130 "6402 RQE Error x%x, posted %d err_cnt "
13132 status, hrq->RQ_buf_posted,
13133 hrq->RQ_no_posted_buf,
13134 atomic_read(&tgtp->rcv_fcp_cmd_in),
13135 atomic_read(&tgtp->rcv_fcp_cmd_out),
13136 atomic_read(&tgtp->xmt_fcp_release));
13140 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13141 hrq->RQ_no_posted_buf++;
13142 /* Post more buffers if possible */
13143 spin_lock_irqsave(&phba->hbalock, iflags);
13144 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13145 spin_unlock_irqrestore(&phba->hbalock, iflags);
13154 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13155 * @phba: Pointer to HBA context object.
13156 * @cq: Pointer to the completion queue.
13157 * @wcqe: Pointer to a completion queue entry.
13159 * This routine process a slow-path work-queue or receive queue completion queue
13162 * Return: true if work posted to worker thread, otherwise false.
13165 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13166 struct lpfc_cqe *cqe)
13168 struct lpfc_cqe cqevt;
13169 bool workposted = false;
13171 /* Copy the work queue CQE and convert endian order if needed */
13172 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13174 /* Check and process for different type of WCQE and dispatch */
13175 switch (bf_get(lpfc_cqe_code, &cqevt)) {
13176 case CQE_CODE_COMPL_WQE:
13177 /* Process the WQ/RQ complete event */
13178 phba->last_completion_time = jiffies;
13179 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13180 (struct lpfc_wcqe_complete *)&cqevt);
13182 case CQE_CODE_RELEASE_WQE:
13183 /* Process the WQ release event */
13184 lpfc_sli4_sp_handle_rel_wcqe(phba,
13185 (struct lpfc_wcqe_release *)&cqevt);
13187 case CQE_CODE_XRI_ABORTED:
13188 /* Process the WQ XRI abort event */
13189 phba->last_completion_time = jiffies;
13190 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13191 (struct sli4_wcqe_xri_aborted *)&cqevt);
13193 case CQE_CODE_RECEIVE:
13194 case CQE_CODE_RECEIVE_V1:
13195 /* Process the RQ event */
13196 phba->last_completion_time = jiffies;
13197 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13198 (struct lpfc_rcqe *)&cqevt);
13201 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13202 "0388 Not a valid WCQE code: x%x\n",
13203 bf_get(lpfc_cqe_code, &cqevt));
13210 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13211 * @phba: Pointer to HBA context object.
13212 * @eqe: Pointer to fast-path event queue entry.
13214 * This routine process a event queue entry from the slow-path event queue.
13215 * It will check the MajorCode and MinorCode to determine this is for a
13216 * completion event on a completion queue, if not, an error shall be logged
13217 * and just return. Otherwise, it will get to the corresponding completion
13218 * queue and process all the entries on that completion queue, rearm the
13219 * completion queue, and then return.
13223 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13224 struct lpfc_queue *speq)
13226 struct lpfc_queue *cq = NULL, *childq;
13229 /* Get the reference to the corresponding CQ */
13230 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13232 list_for_each_entry(childq, &speq->child_list, list) {
13233 if (childq->queue_id == cqid) {
13238 if (unlikely(!cq)) {
13239 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13240 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13241 "0365 Slow-path CQ identifier "
13242 "(%d) does not exist\n", cqid);
13246 /* Save EQ associated with this CQ */
13247 cq->assoc_qp = speq;
13249 if (!queue_work(phba->wq, &cq->spwork))
13250 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13251 "0390 Cannot schedule soft IRQ "
13252 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13253 cqid, cq->queue_id, smp_processor_id());
13257 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13258 * @phba: Pointer to HBA context object.
13260 * This routine process a event queue entry from the slow-path event queue.
13261 * It will check the MajorCode and MinorCode to determine this is for a
13262 * completion event on a completion queue, if not, an error shall be logged
13263 * and just return. Otherwise, it will get to the corresponding completion
13264 * queue and process all the entries on that completion queue, rearm the
13265 * completion queue, and then return.
13269 lpfc_sli4_sp_process_cq(struct work_struct *work)
13271 struct lpfc_queue *cq =
13272 container_of(work, struct lpfc_queue, spwork);
13273 struct lpfc_hba *phba = cq->phba;
13274 struct lpfc_cqe *cqe;
13275 bool workposted = false;
13278 /* Process all the entries to the CQ */
13279 switch (cq->type) {
13281 while ((cqe = lpfc_sli4_cq_get(cq))) {
13282 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
13283 if (!(++ccount % cq->entry_repost))
13289 while ((cqe = lpfc_sli4_cq_get(cq))) {
13290 if (cq->subtype == LPFC_FCP ||
13291 cq->subtype == LPFC_NVME) {
13292 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13293 if (phba->ktime_on)
13294 cq->isr_timestamp = ktime_get_ns();
13296 cq->isr_timestamp = 0;
13298 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
13301 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
13304 if (!(++ccount % cq->entry_repost))
13308 /* Track the max number of CQEs processed in 1 EQ */
13309 if (ccount > cq->CQ_max_cqe)
13310 cq->CQ_max_cqe = ccount;
13313 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13314 "0370 Invalid completion queue type (%d)\n",
13319 /* Catch the no cq entry condition, log an error */
13320 if (unlikely(ccount == 0))
13321 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13322 "0371 No entry from the CQ: identifier "
13323 "(x%x), type (%d)\n", cq->queue_id, cq->type);
13325 /* In any case, flash and re-arm the RCQ */
13326 phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
13328 /* wake up worker thread if there are works to be done */
13330 lpfc_worker_wake_up(phba);
13334 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
13335 * @phba: Pointer to HBA context object.
13336 * @cq: Pointer to associated CQ
13337 * @wcqe: Pointer to work-queue completion queue entry.
13339 * This routine process a fast-path work queue completion entry from fast-path
13340 * event queue for FCP command response completion.
13343 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13344 struct lpfc_wcqe_complete *wcqe)
13346 struct lpfc_sli_ring *pring = cq->pring;
13347 struct lpfc_iocbq *cmdiocbq;
13348 struct lpfc_iocbq irspiocbq;
13349 unsigned long iflags;
13351 /* Check for response status */
13352 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13353 /* If resource errors reported from HBA, reduce queue
13354 * depth of the SCSI device.
13356 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13357 IOSTAT_LOCAL_REJECT)) &&
13358 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13359 IOERR_NO_RESOURCES))
13360 phba->lpfc_rampdown_queue_depth(phba);
13362 /* Log the error status */
13363 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13364 "0373 FCP complete error: status=x%x, "
13365 "hw_status=x%x, total_data_specified=%d, "
13366 "parameter=x%x, word3=x%x\n",
13367 bf_get(lpfc_wcqe_c_status, wcqe),
13368 bf_get(lpfc_wcqe_c_hw_status, wcqe),
13369 wcqe->total_data_placed, wcqe->parameter,
13373 /* Look up the FCP command IOCB and create pseudo response IOCB */
13374 spin_lock_irqsave(&pring->ring_lock, iflags);
13375 pring->stats.iocb_event++;
13376 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13377 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13378 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13379 if (unlikely(!cmdiocbq)) {
13380 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13381 "0374 FCP complete with no corresponding "
13382 "cmdiocb: iotag (%d)\n",
13383 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13386 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13387 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13389 if (cmdiocbq->iocb_cmpl == NULL) {
13390 if (cmdiocbq->wqe_cmpl) {
13391 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13392 spin_lock_irqsave(&phba->hbalock, iflags);
13393 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13394 spin_unlock_irqrestore(&phba->hbalock, iflags);
13397 /* Pass the cmd_iocb and the wcqe to the upper layer */
13398 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13401 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13402 "0375 FCP cmdiocb not callback function "
13404 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13408 /* Fake the irspiocb and copy necessary response information */
13409 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
13411 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13412 spin_lock_irqsave(&phba->hbalock, iflags);
13413 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13414 spin_unlock_irqrestore(&phba->hbalock, iflags);
13417 /* Pass the cmd_iocb and the rsp state to the upper layer */
13418 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13422 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13423 * @phba: Pointer to HBA context object.
13424 * @cq: Pointer to completion queue.
13425 * @wcqe: Pointer to work-queue completion queue entry.
13427 * This routine handles an fast-path WQ entry consumed event by invoking the
13428 * proper WQ release routine to the slow-path WQ.
13431 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13432 struct lpfc_wcqe_release *wcqe)
13434 struct lpfc_queue *childwq;
13435 bool wqid_matched = false;
13438 /* Check for fast-path FCP work queue release */
13439 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
13440 list_for_each_entry(childwq, &cq->child_list, list) {
13441 if (childwq->queue_id == hba_wqid) {
13442 lpfc_sli4_wq_release(childwq,
13443 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13444 if (childwq->q_flag & HBA_NVMET_WQFULL)
13445 lpfc_nvmet_wqfull_process(phba, childwq);
13446 wqid_matched = true;
13450 /* Report warning log message if no match found */
13451 if (wqid_matched != true)
13452 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13453 "2580 Fast-path wqe consume event carries "
13454 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
13458 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13459 * @phba: Pointer to HBA context object.
13460 * @rcqe: Pointer to receive-queue completion queue entry.
13462 * This routine process a receive-queue completion queue entry.
13464 * Return: true if work posted to worker thread, otherwise false.
13467 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13468 struct lpfc_rcqe *rcqe)
13470 bool workposted = false;
13471 struct lpfc_queue *hrq;
13472 struct lpfc_queue *drq;
13473 struct rqb_dmabuf *dma_buf;
13474 struct fc_frame_header *fc_hdr;
13475 struct lpfc_nvmet_tgtport *tgtp;
13476 uint32_t status, rq_id;
13477 unsigned long iflags;
13478 uint32_t fctl, idx;
13480 if ((phba->nvmet_support == 0) ||
13481 (phba->sli4_hba.nvmet_cqset == NULL))
13484 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13485 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13486 drq = phba->sli4_hba.nvmet_mrq_data[idx];
13488 /* sanity check on queue memory */
13489 if (unlikely(!hrq) || unlikely(!drq))
13492 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13493 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13495 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13497 if ((phba->nvmet_support == 0) ||
13498 (rq_id != hrq->queue_id))
13501 status = bf_get(lpfc_rcqe_status, rcqe);
13503 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13504 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13505 "6126 Receive Frame Truncated!!\n");
13507 case FC_STATUS_RQ_SUCCESS:
13508 spin_lock_irqsave(&phba->hbalock, iflags);
13509 lpfc_sli4_rq_release(hrq, drq);
13510 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13512 hrq->RQ_no_buf_found++;
13513 spin_unlock_irqrestore(&phba->hbalock, iflags);
13516 spin_unlock_irqrestore(&phba->hbalock, iflags);
13518 hrq->RQ_buf_posted--;
13519 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13521 /* Just some basic sanity checks on FCP Command frame */
13522 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13523 fc_hdr->fh_f_ctl[1] << 8 |
13524 fc_hdr->fh_f_ctl[2]);
13526 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13527 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13528 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
13531 if (fc_hdr->fh_type == FC_TYPE_FCP) {
13532 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
13533 lpfc_nvmet_unsol_fcp_event(
13534 phba, idx, dma_buf,
13535 cq->isr_timestamp);
13539 lpfc_in_buf_free(phba, &dma_buf->dbuf);
13541 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13542 if (phba->nvmet_support) {
13543 tgtp = phba->targetport->private;
13544 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13545 "6401 RQE Error x%x, posted %d err_cnt "
13547 status, hrq->RQ_buf_posted,
13548 hrq->RQ_no_posted_buf,
13549 atomic_read(&tgtp->rcv_fcp_cmd_in),
13550 atomic_read(&tgtp->rcv_fcp_cmd_out),
13551 atomic_read(&tgtp->xmt_fcp_release));
13555 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13556 hrq->RQ_no_posted_buf++;
13557 /* Post more buffers if possible */
13565 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
13566 * @cq: Pointer to the completion queue.
13567 * @eqe: Pointer to fast-path completion queue entry.
13569 * This routine process a fast-path work queue completion entry from fast-path
13570 * event queue for FCP command response completion.
13573 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13574 struct lpfc_cqe *cqe)
13576 struct lpfc_wcqe_release wcqe;
13577 bool workposted = false;
13579 /* Copy the work queue CQE and convert endian order if needed */
13580 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
13582 /* Check and process for different type of WCQE and dispatch */
13583 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
13584 case CQE_CODE_COMPL_WQE:
13585 case CQE_CODE_NVME_ERSP:
13587 /* Process the WQ complete event */
13588 phba->last_completion_time = jiffies;
13589 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
13590 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
13591 (struct lpfc_wcqe_complete *)&wcqe);
13592 if (cq->subtype == LPFC_NVME_LS)
13593 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
13594 (struct lpfc_wcqe_complete *)&wcqe);
13596 case CQE_CODE_RELEASE_WQE:
13597 cq->CQ_release_wqe++;
13598 /* Process the WQ release event */
13599 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
13600 (struct lpfc_wcqe_release *)&wcqe);
13602 case CQE_CODE_XRI_ABORTED:
13603 cq->CQ_xri_aborted++;
13604 /* Process the WQ XRI abort event */
13605 phba->last_completion_time = jiffies;
13606 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13607 (struct sli4_wcqe_xri_aborted *)&wcqe);
13609 case CQE_CODE_RECEIVE_V1:
13610 case CQE_CODE_RECEIVE:
13611 phba->last_completion_time = jiffies;
13612 if (cq->subtype == LPFC_NVMET) {
13613 workposted = lpfc_sli4_nvmet_handle_rcqe(
13614 phba, cq, (struct lpfc_rcqe *)&wcqe);
13618 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13619 "0144 Not a valid CQE code: x%x\n",
13620 bf_get(lpfc_wcqe_c_code, &wcqe));
13627 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
13628 * @phba: Pointer to HBA context object.
13629 * @eqe: Pointer to fast-path event queue entry.
13631 * This routine process a event queue entry from the fast-path event queue.
13632 * It will check the MajorCode and MinorCode to determine this is for a
13633 * completion event on a completion queue, if not, an error shall be logged
13634 * and just return. Otherwise, it will get to the corresponding completion
13635 * queue and process all the entries on the completion queue, rearm the
13636 * completion queue, and then return.
13639 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13642 struct lpfc_queue *cq = NULL;
13645 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13646 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13647 "0366 Not a valid completion "
13648 "event: majorcode=x%x, minorcode=x%x\n",
13649 bf_get_le32(lpfc_eqe_major_code, eqe),
13650 bf_get_le32(lpfc_eqe_minor_code, eqe));
13654 /* Get the reference to the corresponding CQ */
13655 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13657 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
13658 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
13659 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
13660 /* Process NVMET unsol rcv */
13661 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
13666 if (phba->sli4_hba.nvme_cq_map &&
13667 (cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
13668 /* Process NVME / NVMET command completion */
13669 cq = phba->sli4_hba.nvme_cq[qidx];
13673 if (phba->sli4_hba.fcp_cq_map &&
13674 (cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
13675 /* Process FCP command completion */
13676 cq = phba->sli4_hba.fcp_cq[qidx];
13680 if (phba->sli4_hba.nvmels_cq &&
13681 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
13682 /* Process NVME unsol rcv */
13683 cq = phba->sli4_hba.nvmels_cq;
13686 /* Otherwise this is a Slow path event */
13688 lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
13693 if (unlikely(cqid != cq->queue_id)) {
13694 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13695 "0368 Miss-matched fast-path completion "
13696 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
13697 cqid, cq->queue_id);
13701 /* Save EQ associated with this CQ */
13702 cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
13704 if (!queue_work(phba->wq, &cq->irqwork))
13705 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13706 "0363 Cannot schedule soft IRQ "
13707 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13708 cqid, cq->queue_id, smp_processor_id());
13712 * lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
13713 * @phba: Pointer to HBA context object.
13714 * @eqe: Pointer to fast-path event queue entry.
13716 * This routine process a event queue entry from the fast-path event queue.
13717 * It will check the MajorCode and MinorCode to determine this is for a
13718 * completion event on a completion queue, if not, an error shall be logged
13719 * and just return. Otherwise, it will get to the corresponding completion
13720 * queue and process all the entries on the completion queue, rearm the
13721 * completion queue, and then return.
13724 lpfc_sli4_hba_process_cq(struct work_struct *work)
13726 struct lpfc_queue *cq =
13727 container_of(work, struct lpfc_queue, irqwork);
13728 struct lpfc_hba *phba = cq->phba;
13729 struct lpfc_cqe *cqe;
13730 bool workposted = false;
13733 /* Process all the entries to the CQ */
13734 while ((cqe = lpfc_sli4_cq_get(cq))) {
13735 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13736 if (phba->ktime_on)
13737 cq->isr_timestamp = ktime_get_ns();
13739 cq->isr_timestamp = 0;
13741 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
13742 if (!(++ccount % cq->entry_repost))
13746 /* Track the max number of CQEs processed in 1 EQ */
13747 if (ccount > cq->CQ_max_cqe)
13748 cq->CQ_max_cqe = ccount;
13749 cq->assoc_qp->EQ_cqe_cnt += ccount;
13751 /* Catch the no cq entry condition */
13752 if (unlikely(ccount == 0))
13753 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13754 "0369 No entry from fast-path completion "
13755 "queue fcpcqid=%d\n", cq->queue_id);
13757 /* In any case, flash and re-arm the CQ */
13758 phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
13760 /* wake up worker thread if there are works to be done */
13762 lpfc_worker_wake_up(phba);
13766 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
13768 struct lpfc_eqe *eqe;
13770 /* walk all the EQ entries and drop on the floor */
13771 while ((eqe = lpfc_sli4_eq_get(eq)))
13774 /* Clear and re-arm the EQ */
13775 phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
13780 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
13782 * @phba: Pointer to HBA context object.
13783 * @eqe: Pointer to fast-path event queue entry.
13785 * This routine process a event queue entry from the Flash Optimized Fabric
13786 * event queue. It will check the MajorCode and MinorCode to determine this
13787 * is for a completion event on a completion queue, if not, an error shall be
13788 * logged and just return. Otherwise, it will get to the corresponding
13789 * completion queue and process all the entries on the completion queue, rearm
13790 * the completion queue, and then return.
13793 lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
13795 struct lpfc_queue *cq;
13798 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13799 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13800 "9147 Not a valid completion "
13801 "event: majorcode=x%x, minorcode=x%x\n",
13802 bf_get_le32(lpfc_eqe_major_code, eqe),
13803 bf_get_le32(lpfc_eqe_minor_code, eqe));
13807 /* Get the reference to the corresponding CQ */
13808 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13810 /* Next check for OAS */
13811 cq = phba->sli4_hba.oas_cq;
13812 if (unlikely(!cq)) {
13813 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13814 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13815 "9148 OAS completion queue "
13816 "does not exist\n");
13820 if (unlikely(cqid != cq->queue_id)) {
13821 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13822 "9149 Miss-matched fast-path compl "
13823 "queue id: eqcqid=%d, fcpcqid=%d\n",
13824 cqid, cq->queue_id);
13828 /* Save EQ associated with this CQ */
13829 cq->assoc_qp = phba->sli4_hba.fof_eq;
13831 /* CQ work will be processed on CPU affinitized to this IRQ */
13832 if (!queue_work(phba->wq, &cq->irqwork))
13833 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13834 "0367 Cannot schedule soft IRQ "
13835 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13836 cqid, cq->queue_id, smp_processor_id());
13840 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
13841 * @irq: Interrupt number.
13842 * @dev_id: The device context pointer.
13844 * This function is directly called from the PCI layer as an interrupt
13845 * service routine when device with SLI-4 interface spec is enabled with
13846 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
13847 * IOCB ring event in the HBA. However, when the device is enabled with either
13848 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13849 * device-level interrupt handler. When the PCI slot is in error recovery
13850 * or the HBA is undergoing initialization, the interrupt handler will not
13851 * process the interrupt. The Flash Optimized Fabric ring event are handled in
13852 * the intrrupt context. This function is called without any lock held.
13853 * It gets the hbalock to access and update SLI data structures. Note that,
13854 * the EQ to CQ are one-to-one map such that the EQ index is
13855 * equal to that of CQ index.
13857 * This function returns IRQ_HANDLED when interrupt is handled else it
13858 * returns IRQ_NONE.
13861 lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
13863 struct lpfc_hba *phba;
13864 struct lpfc_hba_eq_hdl *hba_eq_hdl;
13865 struct lpfc_queue *eq;
13866 struct lpfc_eqe *eqe;
13867 unsigned long iflag;
13870 /* Get the driver's phba structure from the dev_id */
13871 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13872 phba = hba_eq_hdl->phba;
13874 if (unlikely(!phba))
13877 /* Get to the EQ struct associated with this vector */
13878 eq = phba->sli4_hba.fof_eq;
13882 /* Check device state for handling interrupt */
13883 if (unlikely(lpfc_intr_state_check(phba))) {
13884 /* Check again for link_state with lock held */
13885 spin_lock_irqsave(&phba->hbalock, iflag);
13886 if (phba->link_state < LPFC_LINK_DOWN)
13887 /* Flush, clear interrupt, and rearm the EQ */
13888 lpfc_sli4_eq_flush(phba, eq);
13889 spin_unlock_irqrestore(&phba->hbalock, iflag);
13894 * Process all the event on FCP fast-path EQ
13896 while ((eqe = lpfc_sli4_eq_get(eq))) {
13897 lpfc_sli4_fof_handle_eqe(phba, eqe);
13898 if (!(++ecount % eq->entry_repost))
13900 eq->EQ_processed++;
13903 /* Track the max number of EQEs processed in 1 intr */
13904 if (ecount > eq->EQ_max_eqe)
13905 eq->EQ_max_eqe = ecount;
13908 if (unlikely(ecount == 0)) {
13911 if (phba->intr_type == MSIX)
13912 /* MSI-X treated interrupt served as no EQ share INT */
13913 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13914 "9145 MSI-X interrupt with no EQE\n");
13916 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13917 "9146 ISR interrupt with no EQE\n");
13918 /* Non MSI-X treated on interrupt as EQ share INT */
13922 /* Always clear and re-arm the fast-path EQ */
13923 phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
13924 return IRQ_HANDLED;
13928 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
13929 * @irq: Interrupt number.
13930 * @dev_id: The device context pointer.
13932 * This function is directly called from the PCI layer as an interrupt
13933 * service routine when device with SLI-4 interface spec is enabled with
13934 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13935 * ring event in the HBA. However, when the device is enabled with either
13936 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13937 * device-level interrupt handler. When the PCI slot is in error recovery
13938 * or the HBA is undergoing initialization, the interrupt handler will not
13939 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13940 * the intrrupt context. This function is called without any lock held.
13941 * It gets the hbalock to access and update SLI data structures. Note that,
13942 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
13943 * equal to that of FCP CQ index.
13945 * The link attention and ELS ring attention events are handled
13946 * by the worker thread. The interrupt handler signals the worker thread
13947 * and returns for these events. This function is called without any lock
13948 * held. It gets the hbalock to access and update SLI data structures.
13950 * This function returns IRQ_HANDLED when interrupt is handled else it
13951 * returns IRQ_NONE.
13954 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
13956 struct lpfc_hba *phba;
13957 struct lpfc_hba_eq_hdl *hba_eq_hdl;
13958 struct lpfc_queue *fpeq;
13959 struct lpfc_eqe *eqe;
13960 unsigned long iflag;
13964 /* Get the driver's phba structure from the dev_id */
13965 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13966 phba = hba_eq_hdl->phba;
13967 hba_eqidx = hba_eq_hdl->idx;
13969 if (unlikely(!phba))
13971 if (unlikely(!phba->sli4_hba.hba_eq))
13974 /* Get to the EQ struct associated with this vector */
13975 fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
13976 if (unlikely(!fpeq))
13979 if (lpfc_fcp_look_ahead) {
13980 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
13981 phba->sli4_hba.sli4_eq_clr_intr(fpeq);
13983 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13988 /* Check device state for handling interrupt */
13989 if (unlikely(lpfc_intr_state_check(phba))) {
13990 /* Check again for link_state with lock held */
13991 spin_lock_irqsave(&phba->hbalock, iflag);
13992 if (phba->link_state < LPFC_LINK_DOWN)
13993 /* Flush, clear interrupt, and rearm the EQ */
13994 lpfc_sli4_eq_flush(phba, fpeq);
13995 spin_unlock_irqrestore(&phba->hbalock, iflag);
13996 if (lpfc_fcp_look_ahead)
13997 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14002 * Process all the event on FCP fast-path EQ
14004 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
14005 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
14006 if (!(++ecount % fpeq->entry_repost))
14008 fpeq->EQ_processed++;
14011 /* Track the max number of EQEs processed in 1 intr */
14012 if (ecount > fpeq->EQ_max_eqe)
14013 fpeq->EQ_max_eqe = ecount;
14015 /* Always clear and re-arm the fast-path EQ */
14016 phba->sli4_hba.sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
14018 if (unlikely(ecount == 0)) {
14019 fpeq->EQ_no_entry++;
14021 if (lpfc_fcp_look_ahead) {
14022 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14026 if (phba->intr_type == MSIX)
14027 /* MSI-X treated interrupt served as no EQ share INT */
14028 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14029 "0358 MSI-X interrupt with no EQE\n");
14031 /* Non MSI-X treated on interrupt as EQ share INT */
14035 if (lpfc_fcp_look_ahead)
14036 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14038 return IRQ_HANDLED;
14039 } /* lpfc_sli4_fp_intr_handler */
14042 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14043 * @irq: Interrupt number.
14044 * @dev_id: The device context pointer.
14046 * This function is the device-level interrupt handler to device with SLI-4
14047 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14048 * interrupt mode is enabled and there is an event in the HBA which requires
14049 * driver attention. This function invokes the slow-path interrupt attention
14050 * handling function and fast-path interrupt attention handling function in
14051 * turn to process the relevant HBA attention events. This function is called
14052 * without any lock held. It gets the hbalock to access and update SLI data
14055 * This function returns IRQ_HANDLED when interrupt is handled, else it
14056 * returns IRQ_NONE.
14059 lpfc_sli4_intr_handler(int irq, void *dev_id)
14061 struct lpfc_hba *phba;
14062 irqreturn_t hba_irq_rc;
14063 bool hba_handled = false;
14066 /* Get the driver's phba structure from the dev_id */
14067 phba = (struct lpfc_hba *)dev_id;
14069 if (unlikely(!phba))
14073 * Invoke fast-path host attention interrupt handling as appropriate.
14075 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
14076 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14077 &phba->sli4_hba.hba_eq_hdl[qidx]);
14078 if (hba_irq_rc == IRQ_HANDLED)
14079 hba_handled |= true;
14082 if (phba->cfg_fof) {
14083 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
14084 &phba->sli4_hba.hba_eq_hdl[qidx]);
14085 if (hba_irq_rc == IRQ_HANDLED)
14086 hba_handled |= true;
14089 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14090 } /* lpfc_sli4_intr_handler */
14093 * lpfc_sli4_queue_free - free a queue structure and associated memory
14094 * @queue: The queue structure to free.
14096 * This function frees a queue structure and the DMAable memory used for
14097 * the host resident queue. This function must be called after destroying the
14098 * queue on the HBA.
14101 lpfc_sli4_queue_free(struct lpfc_queue *queue)
14103 struct lpfc_dmabuf *dmabuf;
14108 while (!list_empty(&queue->page_list)) {
14109 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14111 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14112 dmabuf->virt, dmabuf->phys);
14116 lpfc_free_rq_buffer(queue->phba, queue);
14117 kfree(queue->rqbp);
14120 if (!list_empty(&queue->wq_list))
14121 list_del(&queue->wq_list);
14128 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14129 * @phba: The HBA that this queue is being created on.
14130 * @page_size: The size of a queue page
14131 * @entry_size: The size of each queue entry for this queue.
14132 * @entry count: The number of entries that this queue will handle.
14134 * This function allocates a queue structure and the DMAable memory used for
14135 * the host resident queue. This function must be called before creating the
14136 * queue on the HBA.
14138 struct lpfc_queue *
14139 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14140 uint32_t entry_size, uint32_t entry_count)
14142 struct lpfc_queue *queue;
14143 struct lpfc_dmabuf *dmabuf;
14144 int x, total_qe_count;
14146 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14148 if (!phba->sli4_hba.pc_sli4_params.supported)
14149 hw_page_size = page_size;
14151 queue = kzalloc(sizeof(struct lpfc_queue) +
14152 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
14155 queue->page_count = (ALIGN(entry_size * entry_count,
14156 hw_page_size))/hw_page_size;
14158 /* If needed, Adjust page count to match the max the adapter supports */
14159 if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)
14160 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
14162 INIT_LIST_HEAD(&queue->list);
14163 INIT_LIST_HEAD(&queue->wq_list);
14164 INIT_LIST_HEAD(&queue->wqfull_list);
14165 INIT_LIST_HEAD(&queue->page_list);
14166 INIT_LIST_HEAD(&queue->child_list);
14168 /* Set queue parameters now. If the system cannot provide memory
14169 * resources, the free routine needs to know what was allocated.
14171 queue->entry_size = entry_size;
14172 queue->entry_count = entry_count;
14173 queue->page_size = hw_page_size;
14174 queue->phba = phba;
14176 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
14177 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
14180 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
14181 hw_page_size, &dmabuf->phys,
14183 if (!dmabuf->virt) {
14187 dmabuf->buffer_tag = x;
14188 list_add_tail(&dmabuf->list, &queue->page_list);
14189 /* initialize queue's entry array */
14190 dma_pointer = dmabuf->virt;
14191 for (; total_qe_count < entry_count &&
14192 dma_pointer < (hw_page_size + dmabuf->virt);
14193 total_qe_count++, dma_pointer += entry_size) {
14194 queue->qe[total_qe_count].address = dma_pointer;
14197 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14198 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14200 /* entry_repost will be set during q creation */
14204 lpfc_sli4_queue_free(queue);
14209 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14210 * @phba: HBA structure that indicates port to create a queue on.
14211 * @pci_barset: PCI BAR set flag.
14213 * This function shall perform iomap of the specified PCI BAR address to host
14214 * memory address if not already done so and return it. The returned host
14215 * memory address can be NULL.
14217 static void __iomem *
14218 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14223 switch (pci_barset) {
14224 case WQ_PCI_BAR_0_AND_1:
14225 return phba->pci_bar0_memmap_p;
14226 case WQ_PCI_BAR_2_AND_3:
14227 return phba->pci_bar2_memmap_p;
14228 case WQ_PCI_BAR_4_AND_5:
14229 return phba->pci_bar4_memmap_p;
14237 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs
14238 * @phba: HBA structure that indicates port to create a queue on.
14239 * @startq: The starting FCP EQ to modify
14241 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
14242 * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
14243 * updated in one mailbox command.
14245 * The @phba struct is used to send mailbox command to HBA. The @startq
14246 * is used to get the starting FCP EQ to change.
14247 * This function is asynchronous and will wait for the mailbox
14248 * command to finish before continuing.
14250 * On success this function will return a zero. If unable to allocate enough
14251 * memory this function will return -ENOMEM. If the queue create mailbox command
14252 * fails this function will return -ENXIO.
14255 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14256 uint32_t numq, uint32_t imax)
14258 struct lpfc_mbx_modify_eq_delay *eq_delay;
14259 LPFC_MBOXQ_t *mbox;
14260 struct lpfc_queue *eq;
14261 int cnt, rc, length, status = 0;
14262 uint32_t shdr_status, shdr_add_status;
14263 uint32_t result, val;
14265 union lpfc_sli4_cfg_shdr *shdr;
14268 if (startq >= phba->io_channel_irqs)
14271 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14274 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14275 sizeof(struct lpfc_sli4_cfg_mhdr));
14276 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14277 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14278 length, LPFC_SLI4_MBX_EMBED);
14279 eq_delay = &mbox->u.mqe.un.eq_delay;
14281 /* Calculate delay multiper from maximum interrupt per second */
14282 result = imax / phba->io_channel_irqs;
14283 if (result > LPFC_DMULT_CONST || result == 0)
14286 dmult = LPFC_DMULT_CONST/result - 1;
14287 if (dmult > LPFC_DMULT_MAX)
14288 dmult = LPFC_DMULT_MAX;
14291 for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
14292 eq = phba->sli4_hba.hba_eq[qidx];
14296 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14297 eq_delay->u.request.eq[cnt].phase = 0;
14298 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14301 /* q_mode is only used for auto_imax */
14302 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14303 /* Use EQ Delay Register method for q_mode */
14305 /* Convert for EQ Delay register */
14306 val = phba->cfg_fcp_imax;
14308 /* First, interrupts per sec per EQ */
14309 val = phba->cfg_fcp_imax /
14310 phba->io_channel_irqs;
14312 /* us delay between each interrupt */
14313 val = LPFC_SEC_TO_USEC / val;
14323 eq_delay->u.request.num_eq = cnt;
14325 mbox->vport = phba->pport;
14326 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14327 mbox->context1 = NULL;
14328 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14329 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14330 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14331 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14332 if (shdr_status || shdr_add_status || rc) {
14333 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14334 "2512 MODIFY_EQ_DELAY mailbox failed with "
14335 "status x%x add_status x%x, mbx status x%x\n",
14336 shdr_status, shdr_add_status, rc);
14339 mempool_free(mbox, phba->mbox_mem_pool);
14344 * lpfc_eq_create - Create an Event Queue on the HBA
14345 * @phba: HBA structure that indicates port to create a queue on.
14346 * @eq: The queue structure to use to create the event queue.
14347 * @imax: The maximum interrupt per second limit.
14349 * This function creates an event queue, as detailed in @eq, on a port,
14350 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14352 * The @phba struct is used to send mailbox command to HBA. The @eq struct
14353 * is used to get the entry count and entry size that are necessary to
14354 * determine the number of pages to allocate and use for this queue. This
14355 * function will send the EQ_CREATE mailbox command to the HBA to setup the
14356 * event queue. This function is asynchronous and will wait for the mailbox
14357 * command to finish before continuing.
14359 * On success this function will return a zero. If unable to allocate enough
14360 * memory this function will return -ENOMEM. If the queue create mailbox command
14361 * fails this function will return -ENXIO.
14364 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14366 struct lpfc_mbx_eq_create *eq_create;
14367 LPFC_MBOXQ_t *mbox;
14368 int rc, length, status = 0;
14369 struct lpfc_dmabuf *dmabuf;
14370 uint32_t shdr_status, shdr_add_status;
14371 union lpfc_sli4_cfg_shdr *shdr;
14373 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14375 /* sanity check on queue memory */
14378 if (!phba->sli4_hba.pc_sli4_params.supported)
14379 hw_page_size = SLI4_PAGE_SIZE;
14381 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14384 length = (sizeof(struct lpfc_mbx_eq_create) -
14385 sizeof(struct lpfc_sli4_cfg_mhdr));
14386 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14387 LPFC_MBOX_OPCODE_EQ_CREATE,
14388 length, LPFC_SLI4_MBX_EMBED);
14389 eq_create = &mbox->u.mqe.un.eq_create;
14390 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14391 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14393 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14395 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
14397 /* Use version 2 of CREATE_EQ if eqav is set */
14398 if (phba->sli4_hba.pc_sli4_params.eqav) {
14399 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14400 LPFC_Q_CREATE_VERSION_2);
14401 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14402 phba->sli4_hba.pc_sli4_params.eqav);
14405 /* don't setup delay multiplier using EQ_CREATE */
14407 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14409 switch (eq->entry_count) {
14411 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14412 "0360 Unsupported EQ count. (%d)\n",
14414 if (eq->entry_count < 256)
14416 /* otherwise default to smallest count (drop through) */
14418 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14422 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14426 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14430 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14434 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14438 list_for_each_entry(dmabuf, &eq->page_list, list) {
14439 memset(dmabuf->virt, 0, hw_page_size);
14440 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14441 putPaddrLow(dmabuf->phys);
14442 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14443 putPaddrHigh(dmabuf->phys);
14445 mbox->vport = phba->pport;
14446 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14447 mbox->context1 = NULL;
14448 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14449 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14450 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14451 if (shdr_status || shdr_add_status || rc) {
14452 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14453 "2500 EQ_CREATE mailbox failed with "
14454 "status x%x add_status x%x, mbx status x%x\n",
14455 shdr_status, shdr_add_status, rc);
14458 eq->type = LPFC_EQ;
14459 eq->subtype = LPFC_NONE;
14460 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14461 if (eq->queue_id == 0xFFFF)
14463 eq->host_index = 0;
14465 eq->entry_repost = LPFC_EQ_REPOST;
14467 mempool_free(mbox, phba->mbox_mem_pool);
14472 * lpfc_cq_create - Create a Completion Queue on the HBA
14473 * @phba: HBA structure that indicates port to create a queue on.
14474 * @cq: The queue structure to use to create the completion queue.
14475 * @eq: The event queue to bind this completion queue to.
14477 * This function creates a completion queue, as detailed in @wq, on a port,
14478 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14480 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14481 * is used to get the entry count and entry size that are necessary to
14482 * determine the number of pages to allocate and use for this queue. The @eq
14483 * is used to indicate which event queue to bind this completion queue to. This
14484 * function will send the CQ_CREATE mailbox command to the HBA to setup the
14485 * completion queue. This function is asynchronous and will wait for the mailbox
14486 * command to finish before continuing.
14488 * On success this function will return a zero. If unable to allocate enough
14489 * memory this function will return -ENOMEM. If the queue create mailbox command
14490 * fails this function will return -ENXIO.
14493 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14494 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14496 struct lpfc_mbx_cq_create *cq_create;
14497 struct lpfc_dmabuf *dmabuf;
14498 LPFC_MBOXQ_t *mbox;
14499 int rc, length, status = 0;
14500 uint32_t shdr_status, shdr_add_status;
14501 union lpfc_sli4_cfg_shdr *shdr;
14502 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14504 /* sanity check on queue memory */
14507 if (!phba->sli4_hba.pc_sli4_params.supported)
14508 hw_page_size = cq->page_size;
14510 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14513 length = (sizeof(struct lpfc_mbx_cq_create) -
14514 sizeof(struct lpfc_sli4_cfg_mhdr));
14515 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14516 LPFC_MBOX_OPCODE_CQ_CREATE,
14517 length, LPFC_SLI4_MBX_EMBED);
14518 cq_create = &mbox->u.mqe.un.cq_create;
14519 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
14520 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14522 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14523 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
14524 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14525 phba->sli4_hba.pc_sli4_params.cqv);
14526 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
14527 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14528 (cq->page_size / SLI4_PAGE_SIZE));
14529 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14531 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14532 phba->sli4_hba.pc_sli4_params.cqav);
14534 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14537 switch (cq->entry_count) {
14540 if (phba->sli4_hba.pc_sli4_params.cqv ==
14541 LPFC_Q_CREATE_VERSION_2) {
14542 cq_create->u.request.context.lpfc_cq_context_count =
14544 bf_set(lpfc_cq_context_count,
14545 &cq_create->u.request.context,
14546 LPFC_CQ_CNT_WORD7);
14551 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14552 "0361 Unsupported CQ count: "
14553 "entry cnt %d sz %d pg cnt %d\n",
14554 cq->entry_count, cq->entry_size,
14556 if (cq->entry_count < 256) {
14560 /* otherwise default to smallest count (drop through) */
14562 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14566 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14570 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14574 list_for_each_entry(dmabuf, &cq->page_list, list) {
14575 memset(dmabuf->virt, 0, cq->page_size);
14576 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14577 putPaddrLow(dmabuf->phys);
14578 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14579 putPaddrHigh(dmabuf->phys);
14581 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14583 /* The IOCTL status is embedded in the mailbox subheader. */
14584 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14585 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14586 if (shdr_status || shdr_add_status || rc) {
14587 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14588 "2501 CQ_CREATE mailbox failed with "
14589 "status x%x add_status x%x, mbx status x%x\n",
14590 shdr_status, shdr_add_status, rc);
14594 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14595 if (cq->queue_id == 0xFFFF) {
14599 /* link the cq onto the parent eq child list */
14600 list_add_tail(&cq->list, &eq->child_list);
14601 /* Set up completion queue's type and subtype */
14603 cq->subtype = subtype;
14604 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14605 cq->assoc_qid = eq->queue_id;
14606 cq->host_index = 0;
14608 cq->entry_repost = LPFC_CQ_REPOST;
14611 mempool_free(mbox, phba->mbox_mem_pool);
14616 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
14617 * @phba: HBA structure that indicates port to create a queue on.
14618 * @cqp: The queue structure array to use to create the completion queues.
14619 * @eqp: The event queue array to bind these completion queues to.
14621 * This function creates a set of completion queue, s to support MRQ
14622 * as detailed in @cqp, on a port,
14623 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
14625 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14626 * is used to get the entry count and entry size that are necessary to
14627 * determine the number of pages to allocate and use for this queue. The @eq
14628 * is used to indicate which event queue to bind this completion queue to. This
14629 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
14630 * completion queue. This function is asynchronous and will wait for the mailbox
14631 * command to finish before continuing.
14633 * On success this function will return a zero. If unable to allocate enough
14634 * memory this function will return -ENOMEM. If the queue create mailbox command
14635 * fails this function will return -ENXIO.
14638 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14639 struct lpfc_queue **eqp, uint32_t type, uint32_t subtype)
14641 struct lpfc_queue *cq;
14642 struct lpfc_queue *eq;
14643 struct lpfc_mbx_cq_create_set *cq_set;
14644 struct lpfc_dmabuf *dmabuf;
14645 LPFC_MBOXQ_t *mbox;
14646 int rc, length, alloclen, status = 0;
14647 int cnt, idx, numcq, page_idx = 0;
14648 uint32_t shdr_status, shdr_add_status;
14649 union lpfc_sli4_cfg_shdr *shdr;
14650 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14652 /* sanity check on queue memory */
14653 numcq = phba->cfg_nvmet_mrq;
14654 if (!cqp || !eqp || !numcq)
14657 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14661 length = sizeof(struct lpfc_mbx_cq_create_set);
14662 length += ((numcq * cqp[0]->page_count) *
14663 sizeof(struct dma_address));
14664 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14665 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
14666 LPFC_SLI4_MBX_NEMBED);
14667 if (alloclen < length) {
14668 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14669 "3098 Allocated DMA memory size (%d) is "
14670 "less than the requested DMA memory size "
14671 "(%d)\n", alloclen, length);
14675 cq_set = mbox->sge_array->addr[0];
14676 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
14677 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
14679 for (idx = 0; idx < numcq; idx++) {
14686 if (!phba->sli4_hba.pc_sli4_params.supported)
14687 hw_page_size = cq->page_size;
14691 bf_set(lpfc_mbx_cq_create_set_page_size,
14692 &cq_set->u.request,
14693 (hw_page_size / SLI4_PAGE_SIZE));
14694 bf_set(lpfc_mbx_cq_create_set_num_pages,
14695 &cq_set->u.request, cq->page_count);
14696 bf_set(lpfc_mbx_cq_create_set_evt,
14697 &cq_set->u.request, 1);
14698 bf_set(lpfc_mbx_cq_create_set_valid,
14699 &cq_set->u.request, 1);
14700 bf_set(lpfc_mbx_cq_create_set_cqe_size,
14701 &cq_set->u.request, 0);
14702 bf_set(lpfc_mbx_cq_create_set_num_cq,
14703 &cq_set->u.request, numcq);
14704 bf_set(lpfc_mbx_cq_create_set_autovalid,
14705 &cq_set->u.request,
14706 phba->sli4_hba.pc_sli4_params.cqav);
14707 switch (cq->entry_count) {
14710 if (phba->sli4_hba.pc_sli4_params.cqv ==
14711 LPFC_Q_CREATE_VERSION_2) {
14712 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14713 &cq_set->u.request,
14715 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14716 &cq_set->u.request,
14717 LPFC_CQ_CNT_WORD7);
14722 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14723 "3118 Bad CQ count. (%d)\n",
14725 if (cq->entry_count < 256) {
14729 /* otherwise default to smallest (drop thru) */
14731 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14732 &cq_set->u.request, LPFC_CQ_CNT_256);
14735 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14736 &cq_set->u.request, LPFC_CQ_CNT_512);
14739 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14740 &cq_set->u.request, LPFC_CQ_CNT_1024);
14743 bf_set(lpfc_mbx_cq_create_set_eq_id0,
14744 &cq_set->u.request, eq->queue_id);
14747 bf_set(lpfc_mbx_cq_create_set_eq_id1,
14748 &cq_set->u.request, eq->queue_id);
14751 bf_set(lpfc_mbx_cq_create_set_eq_id2,
14752 &cq_set->u.request, eq->queue_id);
14755 bf_set(lpfc_mbx_cq_create_set_eq_id3,
14756 &cq_set->u.request, eq->queue_id);
14759 bf_set(lpfc_mbx_cq_create_set_eq_id4,
14760 &cq_set->u.request, eq->queue_id);
14763 bf_set(lpfc_mbx_cq_create_set_eq_id5,
14764 &cq_set->u.request, eq->queue_id);
14767 bf_set(lpfc_mbx_cq_create_set_eq_id6,
14768 &cq_set->u.request, eq->queue_id);
14771 bf_set(lpfc_mbx_cq_create_set_eq_id7,
14772 &cq_set->u.request, eq->queue_id);
14775 bf_set(lpfc_mbx_cq_create_set_eq_id8,
14776 &cq_set->u.request, eq->queue_id);
14779 bf_set(lpfc_mbx_cq_create_set_eq_id9,
14780 &cq_set->u.request, eq->queue_id);
14783 bf_set(lpfc_mbx_cq_create_set_eq_id10,
14784 &cq_set->u.request, eq->queue_id);
14787 bf_set(lpfc_mbx_cq_create_set_eq_id11,
14788 &cq_set->u.request, eq->queue_id);
14791 bf_set(lpfc_mbx_cq_create_set_eq_id12,
14792 &cq_set->u.request, eq->queue_id);
14795 bf_set(lpfc_mbx_cq_create_set_eq_id13,
14796 &cq_set->u.request, eq->queue_id);
14799 bf_set(lpfc_mbx_cq_create_set_eq_id14,
14800 &cq_set->u.request, eq->queue_id);
14803 bf_set(lpfc_mbx_cq_create_set_eq_id15,
14804 &cq_set->u.request, eq->queue_id);
14808 /* link the cq onto the parent eq child list */
14809 list_add_tail(&cq->list, &eq->child_list);
14810 /* Set up completion queue's type and subtype */
14812 cq->subtype = subtype;
14813 cq->assoc_qid = eq->queue_id;
14814 cq->host_index = 0;
14816 cq->entry_repost = LPFC_CQ_REPOST;
14820 list_for_each_entry(dmabuf, &cq->page_list, list) {
14821 memset(dmabuf->virt, 0, hw_page_size);
14822 cnt = page_idx + dmabuf->buffer_tag;
14823 cq_set->u.request.page[cnt].addr_lo =
14824 putPaddrLow(dmabuf->phys);
14825 cq_set->u.request.page[cnt].addr_hi =
14826 putPaddrHigh(dmabuf->phys);
14832 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14834 /* The IOCTL status is embedded in the mailbox subheader. */
14835 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14836 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14837 if (shdr_status || shdr_add_status || rc) {
14838 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14839 "3119 CQ_CREATE_SET mailbox failed with "
14840 "status x%x add_status x%x, mbx status x%x\n",
14841 shdr_status, shdr_add_status, rc);
14845 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
14846 if (rc == 0xFFFF) {
14851 for (idx = 0; idx < numcq; idx++) {
14853 cq->queue_id = rc + idx;
14857 lpfc_sli4_mbox_cmd_free(phba, mbox);
14862 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
14863 * @phba: HBA structure that indicates port to create a queue on.
14864 * @mq: The queue structure to use to create the mailbox queue.
14865 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
14866 * @cq: The completion queue to associate with this cq.
14868 * This function provides failback (fb) functionality when the
14869 * mq_create_ext fails on older FW generations. It's purpose is identical
14870 * to mq_create_ext otherwise.
14872 * This routine cannot fail as all attributes were previously accessed and
14873 * initialized in mq_create_ext.
14876 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
14877 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
14879 struct lpfc_mbx_mq_create *mq_create;
14880 struct lpfc_dmabuf *dmabuf;
14883 length = (sizeof(struct lpfc_mbx_mq_create) -
14884 sizeof(struct lpfc_sli4_cfg_mhdr));
14885 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14886 LPFC_MBOX_OPCODE_MQ_CREATE,
14887 length, LPFC_SLI4_MBX_EMBED);
14888 mq_create = &mbox->u.mqe.un.mq_create;
14889 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
14891 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
14893 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
14894 switch (mq->entry_count) {
14896 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14897 LPFC_MQ_RING_SIZE_16);
14900 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14901 LPFC_MQ_RING_SIZE_32);
14904 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14905 LPFC_MQ_RING_SIZE_64);
14908 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14909 LPFC_MQ_RING_SIZE_128);
14912 list_for_each_entry(dmabuf, &mq->page_list, list) {
14913 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14914 putPaddrLow(dmabuf->phys);
14915 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14916 putPaddrHigh(dmabuf->phys);
14921 * lpfc_mq_create - Create a mailbox Queue on the HBA
14922 * @phba: HBA structure that indicates port to create a queue on.
14923 * @mq: The queue structure to use to create the mailbox queue.
14924 * @cq: The completion queue to associate with this cq.
14925 * @subtype: The queue's subtype.
14927 * This function creates a mailbox queue, as detailed in @mq, on a port,
14928 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
14930 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14931 * is used to get the entry count and entry size that are necessary to
14932 * determine the number of pages to allocate and use for this queue. This
14933 * function will send the MQ_CREATE mailbox command to the HBA to setup the
14934 * mailbox queue. This function is asynchronous and will wait for the mailbox
14935 * command to finish before continuing.
14937 * On success this function will return a zero. If unable to allocate enough
14938 * memory this function will return -ENOMEM. If the queue create mailbox command
14939 * fails this function will return -ENXIO.
14942 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
14943 struct lpfc_queue *cq, uint32_t subtype)
14945 struct lpfc_mbx_mq_create *mq_create;
14946 struct lpfc_mbx_mq_create_ext *mq_create_ext;
14947 struct lpfc_dmabuf *dmabuf;
14948 LPFC_MBOXQ_t *mbox;
14949 int rc, length, status = 0;
14950 uint32_t shdr_status, shdr_add_status;
14951 union lpfc_sli4_cfg_shdr *shdr;
14952 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14954 /* sanity check on queue memory */
14957 if (!phba->sli4_hba.pc_sli4_params.supported)
14958 hw_page_size = SLI4_PAGE_SIZE;
14960 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14963 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
14964 sizeof(struct lpfc_sli4_cfg_mhdr));
14965 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14966 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
14967 length, LPFC_SLI4_MBX_EMBED);
14969 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
14970 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
14971 bf_set(lpfc_mbx_mq_create_ext_num_pages,
14972 &mq_create_ext->u.request, mq->page_count);
14973 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
14974 &mq_create_ext->u.request, 1);
14975 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
14976 &mq_create_ext->u.request, 1);
14977 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
14978 &mq_create_ext->u.request, 1);
14979 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
14980 &mq_create_ext->u.request, 1);
14981 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
14982 &mq_create_ext->u.request, 1);
14983 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
14984 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14985 phba->sli4_hba.pc_sli4_params.mqv);
14986 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
14987 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
14990 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
14992 switch (mq->entry_count) {
14994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14995 "0362 Unsupported MQ count. (%d)\n",
14997 if (mq->entry_count < 16) {
15001 /* otherwise default to smallest count (drop through) */
15003 bf_set(lpfc_mq_context_ring_size,
15004 &mq_create_ext->u.request.context,
15005 LPFC_MQ_RING_SIZE_16);
15008 bf_set(lpfc_mq_context_ring_size,
15009 &mq_create_ext->u.request.context,
15010 LPFC_MQ_RING_SIZE_32);
15013 bf_set(lpfc_mq_context_ring_size,
15014 &mq_create_ext->u.request.context,
15015 LPFC_MQ_RING_SIZE_64);
15018 bf_set(lpfc_mq_context_ring_size,
15019 &mq_create_ext->u.request.context,
15020 LPFC_MQ_RING_SIZE_128);
15023 list_for_each_entry(dmabuf, &mq->page_list, list) {
15024 memset(dmabuf->virt, 0, hw_page_size);
15025 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15026 putPaddrLow(dmabuf->phys);
15027 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15028 putPaddrHigh(dmabuf->phys);
15030 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15031 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15032 &mq_create_ext->u.response);
15033 if (rc != MBX_SUCCESS) {
15034 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15035 "2795 MQ_CREATE_EXT failed with "
15036 "status x%x. Failback to MQ_CREATE.\n",
15038 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15039 mq_create = &mbox->u.mqe.un.mq_create;
15040 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15041 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15042 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15043 &mq_create->u.response);
15046 /* The IOCTL status is embedded in the mailbox subheader. */
15047 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15048 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15049 if (shdr_status || shdr_add_status || rc) {
15050 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15051 "2502 MQ_CREATE mailbox failed with "
15052 "status x%x add_status x%x, mbx status x%x\n",
15053 shdr_status, shdr_add_status, rc);
15057 if (mq->queue_id == 0xFFFF) {
15061 mq->type = LPFC_MQ;
15062 mq->assoc_qid = cq->queue_id;
15063 mq->subtype = subtype;
15064 mq->host_index = 0;
15066 mq->entry_repost = LPFC_MQ_REPOST;
15068 /* link the mq onto the parent cq child list */
15069 list_add_tail(&mq->list, &cq->child_list);
15071 mempool_free(mbox, phba->mbox_mem_pool);
15076 * lpfc_wq_create - Create a Work Queue on the HBA
15077 * @phba: HBA structure that indicates port to create a queue on.
15078 * @wq: The queue structure to use to create the work queue.
15079 * @cq: The completion queue to bind this work queue to.
15080 * @subtype: The subtype of the work queue indicating its functionality.
15082 * This function creates a work queue, as detailed in @wq, on a port, described
15083 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15085 * The @phba struct is used to send mailbox command to HBA. The @wq struct
15086 * is used to get the entry count and entry size that are necessary to
15087 * determine the number of pages to allocate and use for this queue. The @cq
15088 * is used to indicate which completion queue to bind this work queue to. This
15089 * function will send the WQ_CREATE mailbox command to the HBA to setup the
15090 * work queue. This function is asynchronous and will wait for the mailbox
15091 * command to finish before continuing.
15093 * On success this function will return a zero. If unable to allocate enough
15094 * memory this function will return -ENOMEM. If the queue create mailbox command
15095 * fails this function will return -ENXIO.
15098 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15099 struct lpfc_queue *cq, uint32_t subtype)
15101 struct lpfc_mbx_wq_create *wq_create;
15102 struct lpfc_dmabuf *dmabuf;
15103 LPFC_MBOXQ_t *mbox;
15104 int rc, length, status = 0;
15105 uint32_t shdr_status, shdr_add_status;
15106 union lpfc_sli4_cfg_shdr *shdr;
15107 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15108 struct dma_address *page;
15109 void __iomem *bar_memmap_p;
15110 uint32_t db_offset;
15111 uint16_t pci_barset;
15112 uint8_t dpp_barset;
15113 uint32_t dpp_offset;
15114 unsigned long pg_addr;
15115 uint8_t wq_create_version;
15117 /* sanity check on queue memory */
15120 if (!phba->sli4_hba.pc_sli4_params.supported)
15121 hw_page_size = wq->page_size;
15123 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15126 length = (sizeof(struct lpfc_mbx_wq_create) -
15127 sizeof(struct lpfc_sli4_cfg_mhdr));
15128 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15129 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15130 length, LPFC_SLI4_MBX_EMBED);
15131 wq_create = &mbox->u.mqe.un.wq_create;
15132 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15133 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15135 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15138 /* wqv is the earliest version supported, NOT the latest */
15139 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15140 phba->sli4_hba.pc_sli4_params.wqv);
15142 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15143 (wq->page_size > SLI4_PAGE_SIZE))
15144 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15146 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15149 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15150 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15152 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15154 switch (wq_create_version) {
15155 case LPFC_Q_CREATE_VERSION_1:
15156 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15158 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15159 LPFC_Q_CREATE_VERSION_1);
15161 switch (wq->entry_size) {
15164 bf_set(lpfc_mbx_wq_create_wqe_size,
15165 &wq_create->u.request_1,
15166 LPFC_WQ_WQE_SIZE_64);
15169 bf_set(lpfc_mbx_wq_create_wqe_size,
15170 &wq_create->u.request_1,
15171 LPFC_WQ_WQE_SIZE_128);
15174 /* Request DPP by default */
15175 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15176 bf_set(lpfc_mbx_wq_create_page_size,
15177 &wq_create->u.request_1,
15178 (wq->page_size / SLI4_PAGE_SIZE));
15179 page = wq_create->u.request_1.page;
15182 page = wq_create->u.request.page;
15186 list_for_each_entry(dmabuf, &wq->page_list, list) {
15187 memset(dmabuf->virt, 0, hw_page_size);
15188 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15189 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15192 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15193 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15195 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15196 /* The IOCTL status is embedded in the mailbox subheader. */
15197 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15198 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15199 if (shdr_status || shdr_add_status || rc) {
15200 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15201 "2503 WQ_CREATE mailbox failed with "
15202 "status x%x add_status x%x, mbx status x%x\n",
15203 shdr_status, shdr_add_status, rc);
15208 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15209 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15210 &wq_create->u.response);
15212 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15213 &wq_create->u.response_1);
15215 if (wq->queue_id == 0xFFFF) {
15220 wq->db_format = LPFC_DB_LIST_FORMAT;
15221 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15222 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15223 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15224 &wq_create->u.response);
15225 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15226 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15227 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15228 "3265 WQ[%d] doorbell format "
15229 "not supported: x%x\n",
15230 wq->queue_id, wq->db_format);
15234 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15235 &wq_create->u.response);
15236 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15238 if (!bar_memmap_p) {
15239 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15240 "3263 WQ[%d] failed to memmap "
15241 "pci barset:x%x\n",
15242 wq->queue_id, pci_barset);
15246 db_offset = wq_create->u.response.doorbell_offset;
15247 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15248 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15249 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15250 "3252 WQ[%d] doorbell offset "
15251 "not supported: x%x\n",
15252 wq->queue_id, db_offset);
15256 wq->db_regaddr = bar_memmap_p + db_offset;
15257 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15258 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15259 "format:x%x\n", wq->queue_id,
15260 pci_barset, db_offset, wq->db_format);
15262 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15264 /* Check if DPP was honored by the firmware */
15265 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15266 &wq_create->u.response_1);
15267 if (wq->dpp_enable) {
15268 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15269 &wq_create->u.response_1);
15270 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15272 if (!bar_memmap_p) {
15273 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15274 "3267 WQ[%d] failed to memmap "
15275 "pci barset:x%x\n",
15276 wq->queue_id, pci_barset);
15280 db_offset = wq_create->u.response_1.doorbell_offset;
15281 wq->db_regaddr = bar_memmap_p + db_offset;
15282 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15283 &wq_create->u.response_1);
15284 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15285 &wq_create->u.response_1);
15286 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15288 if (!bar_memmap_p) {
15289 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15290 "3268 WQ[%d] failed to memmap "
15291 "pci barset:x%x\n",
15292 wq->queue_id, dpp_barset);
15296 dpp_offset = wq_create->u.response_1.dpp_offset;
15297 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15298 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15299 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15300 "dpp_id:x%x dpp_barset:x%x "
15301 "dpp_offset:x%x\n",
15302 wq->queue_id, pci_barset, db_offset,
15303 wq->dpp_id, dpp_barset, dpp_offset);
15305 /* Enable combined writes for DPP aperture */
15306 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15308 rc = set_memory_wc(pg_addr, 1);
15310 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15311 "3272 Cannot setup Combined "
15312 "Write on WQ[%d] - disable DPP\n",
15314 phba->cfg_enable_dpp = 0;
15317 phba->cfg_enable_dpp = 0;
15320 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15322 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15323 if (wq->pring == NULL) {
15327 wq->type = LPFC_WQ;
15328 wq->assoc_qid = cq->queue_id;
15329 wq->subtype = subtype;
15330 wq->host_index = 0;
15332 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
15334 /* link the wq onto the parent cq child list */
15335 list_add_tail(&wq->list, &cq->child_list);
15337 mempool_free(mbox, phba->mbox_mem_pool);
15342 * lpfc_rq_create - Create a Receive Queue on the HBA
15343 * @phba: HBA structure that indicates port to create a queue on.
15344 * @hrq: The queue structure to use to create the header receive queue.
15345 * @drq: The queue structure to use to create the data receive queue.
15346 * @cq: The completion queue to bind this work queue to.
15348 * This function creates a receive buffer queue pair , as detailed in @hrq and
15349 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15352 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15353 * struct is used to get the entry count that is necessary to determine the
15354 * number of pages to use for this queue. The @cq is used to indicate which
15355 * completion queue to bind received buffers that are posted to these queues to.
15356 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15357 * receive queue pair. This function is asynchronous and will wait for the
15358 * mailbox command to finish before continuing.
15360 * On success this function will return a zero. If unable to allocate enough
15361 * memory this function will return -ENOMEM. If the queue create mailbox command
15362 * fails this function will return -ENXIO.
15365 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15366 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15368 struct lpfc_mbx_rq_create *rq_create;
15369 struct lpfc_dmabuf *dmabuf;
15370 LPFC_MBOXQ_t *mbox;
15371 int rc, length, status = 0;
15372 uint32_t shdr_status, shdr_add_status;
15373 union lpfc_sli4_cfg_shdr *shdr;
15374 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15375 void __iomem *bar_memmap_p;
15376 uint32_t db_offset;
15377 uint16_t pci_barset;
15379 /* sanity check on queue memory */
15380 if (!hrq || !drq || !cq)
15382 if (!phba->sli4_hba.pc_sli4_params.supported)
15383 hw_page_size = SLI4_PAGE_SIZE;
15385 if (hrq->entry_count != drq->entry_count)
15387 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15390 length = (sizeof(struct lpfc_mbx_rq_create) -
15391 sizeof(struct lpfc_sli4_cfg_mhdr));
15392 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15393 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15394 length, LPFC_SLI4_MBX_EMBED);
15395 rq_create = &mbox->u.mqe.un.rq_create;
15396 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15397 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15398 phba->sli4_hba.pc_sli4_params.rqv);
15399 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15400 bf_set(lpfc_rq_context_rqe_count_1,
15401 &rq_create->u.request.context,
15403 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
15404 bf_set(lpfc_rq_context_rqe_size,
15405 &rq_create->u.request.context,
15407 bf_set(lpfc_rq_context_page_size,
15408 &rq_create->u.request.context,
15409 LPFC_RQ_PAGE_SIZE_4096);
15411 switch (hrq->entry_count) {
15413 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15414 "2535 Unsupported RQ count. (%d)\n",
15416 if (hrq->entry_count < 512) {
15420 /* otherwise default to smallest count (drop through) */
15422 bf_set(lpfc_rq_context_rqe_count,
15423 &rq_create->u.request.context,
15424 LPFC_RQ_RING_SIZE_512);
15427 bf_set(lpfc_rq_context_rqe_count,
15428 &rq_create->u.request.context,
15429 LPFC_RQ_RING_SIZE_1024);
15432 bf_set(lpfc_rq_context_rqe_count,
15433 &rq_create->u.request.context,
15434 LPFC_RQ_RING_SIZE_2048);
15437 bf_set(lpfc_rq_context_rqe_count,
15438 &rq_create->u.request.context,
15439 LPFC_RQ_RING_SIZE_4096);
15442 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15443 LPFC_HDR_BUF_SIZE);
15445 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15447 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15449 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15450 memset(dmabuf->virt, 0, hw_page_size);
15451 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15452 putPaddrLow(dmabuf->phys);
15453 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15454 putPaddrHigh(dmabuf->phys);
15456 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15457 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15459 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15460 /* The IOCTL status is embedded in the mailbox subheader. */
15461 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15462 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15463 if (shdr_status || shdr_add_status || rc) {
15464 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15465 "2504 RQ_CREATE mailbox failed with "
15466 "status x%x add_status x%x, mbx status x%x\n",
15467 shdr_status, shdr_add_status, rc);
15471 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15472 if (hrq->queue_id == 0xFFFF) {
15477 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15478 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15479 &rq_create->u.response);
15480 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15481 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15482 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15483 "3262 RQ [%d] doorbell format not "
15484 "supported: x%x\n", hrq->queue_id,
15490 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15491 &rq_create->u.response);
15492 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15493 if (!bar_memmap_p) {
15494 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15495 "3269 RQ[%d] failed to memmap pci "
15496 "barset:x%x\n", hrq->queue_id,
15502 db_offset = rq_create->u.response.doorbell_offset;
15503 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15504 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15505 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15506 "3270 RQ[%d] doorbell offset not "
15507 "supported: x%x\n", hrq->queue_id,
15512 hrq->db_regaddr = bar_memmap_p + db_offset;
15513 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15514 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15515 "format:x%x\n", hrq->queue_id, pci_barset,
15516 db_offset, hrq->db_format);
15518 hrq->db_format = LPFC_DB_RING_FORMAT;
15519 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15521 hrq->type = LPFC_HRQ;
15522 hrq->assoc_qid = cq->queue_id;
15523 hrq->subtype = subtype;
15524 hrq->host_index = 0;
15525 hrq->hba_index = 0;
15526 hrq->entry_repost = LPFC_RQ_REPOST;
15528 /* now create the data queue */
15529 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15530 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15531 length, LPFC_SLI4_MBX_EMBED);
15532 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15533 phba->sli4_hba.pc_sli4_params.rqv);
15534 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15535 bf_set(lpfc_rq_context_rqe_count_1,
15536 &rq_create->u.request.context, hrq->entry_count);
15537 if (subtype == LPFC_NVMET)
15538 rq_create->u.request.context.buffer_size =
15539 LPFC_NVMET_DATA_BUF_SIZE;
15541 rq_create->u.request.context.buffer_size =
15542 LPFC_DATA_BUF_SIZE;
15543 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15545 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15546 (PAGE_SIZE/SLI4_PAGE_SIZE));
15548 switch (drq->entry_count) {
15550 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15551 "2536 Unsupported RQ count. (%d)\n",
15553 if (drq->entry_count < 512) {
15557 /* otherwise default to smallest count (drop through) */
15559 bf_set(lpfc_rq_context_rqe_count,
15560 &rq_create->u.request.context,
15561 LPFC_RQ_RING_SIZE_512);
15564 bf_set(lpfc_rq_context_rqe_count,
15565 &rq_create->u.request.context,
15566 LPFC_RQ_RING_SIZE_1024);
15569 bf_set(lpfc_rq_context_rqe_count,
15570 &rq_create->u.request.context,
15571 LPFC_RQ_RING_SIZE_2048);
15574 bf_set(lpfc_rq_context_rqe_count,
15575 &rq_create->u.request.context,
15576 LPFC_RQ_RING_SIZE_4096);
15579 if (subtype == LPFC_NVMET)
15580 bf_set(lpfc_rq_context_buf_size,
15581 &rq_create->u.request.context,
15582 LPFC_NVMET_DATA_BUF_SIZE);
15584 bf_set(lpfc_rq_context_buf_size,
15585 &rq_create->u.request.context,
15586 LPFC_DATA_BUF_SIZE);
15588 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15590 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15592 list_for_each_entry(dmabuf, &drq->page_list, list) {
15593 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15594 putPaddrLow(dmabuf->phys);
15595 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15596 putPaddrHigh(dmabuf->phys);
15598 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15599 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15600 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15601 /* The IOCTL status is embedded in the mailbox subheader. */
15602 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15603 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15604 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15605 if (shdr_status || shdr_add_status || rc) {
15609 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15610 if (drq->queue_id == 0xFFFF) {
15614 drq->type = LPFC_DRQ;
15615 drq->assoc_qid = cq->queue_id;
15616 drq->subtype = subtype;
15617 drq->host_index = 0;
15618 drq->hba_index = 0;
15619 drq->entry_repost = LPFC_RQ_REPOST;
15621 /* link the header and data RQs onto the parent cq child list */
15622 list_add_tail(&hrq->list, &cq->child_list);
15623 list_add_tail(&drq->list, &cq->child_list);
15626 mempool_free(mbox, phba->mbox_mem_pool);
15631 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
15632 * @phba: HBA structure that indicates port to create a queue on.
15633 * @hrqp: The queue structure array to use to create the header receive queues.
15634 * @drqp: The queue structure array to use to create the data receive queues.
15635 * @cqp: The completion queue array to bind these receive queues to.
15637 * This function creates a receive buffer queue pair , as detailed in @hrq and
15638 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15641 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15642 * struct is used to get the entry count that is necessary to determine the
15643 * number of pages to use for this queue. The @cq is used to indicate which
15644 * completion queue to bind received buffers that are posted to these queues to.
15645 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15646 * receive queue pair. This function is asynchronous and will wait for the
15647 * mailbox command to finish before continuing.
15649 * On success this function will return a zero. If unable to allocate enough
15650 * memory this function will return -ENOMEM. If the queue create mailbox command
15651 * fails this function will return -ENXIO.
15654 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15655 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
15658 struct lpfc_queue *hrq, *drq, *cq;
15659 struct lpfc_mbx_rq_create_v2 *rq_create;
15660 struct lpfc_dmabuf *dmabuf;
15661 LPFC_MBOXQ_t *mbox;
15662 int rc, length, alloclen, status = 0;
15663 int cnt, idx, numrq, page_idx = 0;
15664 uint32_t shdr_status, shdr_add_status;
15665 union lpfc_sli4_cfg_shdr *shdr;
15666 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15668 numrq = phba->cfg_nvmet_mrq;
15669 /* sanity check on array memory */
15670 if (!hrqp || !drqp || !cqp || !numrq)
15672 if (!phba->sli4_hba.pc_sli4_params.supported)
15673 hw_page_size = SLI4_PAGE_SIZE;
15675 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15679 length = sizeof(struct lpfc_mbx_rq_create_v2);
15680 length += ((2 * numrq * hrqp[0]->page_count) *
15681 sizeof(struct dma_address));
15683 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15684 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
15685 LPFC_SLI4_MBX_NEMBED);
15686 if (alloclen < length) {
15687 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15688 "3099 Allocated DMA memory size (%d) is "
15689 "less than the requested DMA memory size "
15690 "(%d)\n", alloclen, length);
15697 rq_create = mbox->sge_array->addr[0];
15698 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
15700 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
15703 for (idx = 0; idx < numrq; idx++) {
15708 /* sanity check on queue memory */
15709 if (!hrq || !drq || !cq) {
15714 if (hrq->entry_count != drq->entry_count) {
15720 bf_set(lpfc_mbx_rq_create_num_pages,
15721 &rq_create->u.request,
15723 bf_set(lpfc_mbx_rq_create_rq_cnt,
15724 &rq_create->u.request, (numrq * 2));
15725 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
15727 bf_set(lpfc_rq_context_base_cq,
15728 &rq_create->u.request.context,
15730 bf_set(lpfc_rq_context_data_size,
15731 &rq_create->u.request.context,
15732 LPFC_NVMET_DATA_BUF_SIZE);
15733 bf_set(lpfc_rq_context_hdr_size,
15734 &rq_create->u.request.context,
15735 LPFC_HDR_BUF_SIZE);
15736 bf_set(lpfc_rq_context_rqe_count_1,
15737 &rq_create->u.request.context,
15739 bf_set(lpfc_rq_context_rqe_size,
15740 &rq_create->u.request.context,
15742 bf_set(lpfc_rq_context_page_size,
15743 &rq_create->u.request.context,
15744 (PAGE_SIZE/SLI4_PAGE_SIZE));
15747 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15748 memset(dmabuf->virt, 0, hw_page_size);
15749 cnt = page_idx + dmabuf->buffer_tag;
15750 rq_create->u.request.page[cnt].addr_lo =
15751 putPaddrLow(dmabuf->phys);
15752 rq_create->u.request.page[cnt].addr_hi =
15753 putPaddrHigh(dmabuf->phys);
15759 list_for_each_entry(dmabuf, &drq->page_list, list) {
15760 memset(dmabuf->virt, 0, hw_page_size);
15761 cnt = page_idx + dmabuf->buffer_tag;
15762 rq_create->u.request.page[cnt].addr_lo =
15763 putPaddrLow(dmabuf->phys);
15764 rq_create->u.request.page[cnt].addr_hi =
15765 putPaddrHigh(dmabuf->phys);
15770 hrq->db_format = LPFC_DB_RING_FORMAT;
15771 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15772 hrq->type = LPFC_HRQ;
15773 hrq->assoc_qid = cq->queue_id;
15774 hrq->subtype = subtype;
15775 hrq->host_index = 0;
15776 hrq->hba_index = 0;
15777 hrq->entry_repost = LPFC_RQ_REPOST;
15779 drq->db_format = LPFC_DB_RING_FORMAT;
15780 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15781 drq->type = LPFC_DRQ;
15782 drq->assoc_qid = cq->queue_id;
15783 drq->subtype = subtype;
15784 drq->host_index = 0;
15785 drq->hba_index = 0;
15786 drq->entry_repost = LPFC_RQ_REPOST;
15788 list_add_tail(&hrq->list, &cq->child_list);
15789 list_add_tail(&drq->list, &cq->child_list);
15792 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15793 /* The IOCTL status is embedded in the mailbox subheader. */
15794 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15795 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15796 if (shdr_status || shdr_add_status || rc) {
15797 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15798 "3120 RQ_CREATE mailbox failed with "
15799 "status x%x add_status x%x, mbx status x%x\n",
15800 shdr_status, shdr_add_status, rc);
15804 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15805 if (rc == 0xFFFF) {
15810 /* Initialize all RQs with associated queue id */
15811 for (idx = 0; idx < numrq; idx++) {
15813 hrq->queue_id = rc + (2 * idx);
15815 drq->queue_id = rc + (2 * idx) + 1;
15819 lpfc_sli4_mbox_cmd_free(phba, mbox);
15824 * lpfc_eq_destroy - Destroy an event Queue on the HBA
15825 * @eq: The queue structure associated with the queue to destroy.
15827 * This function destroys a queue, as detailed in @eq by sending an mailbox
15828 * command, specific to the type of queue, to the HBA.
15830 * The @eq struct is used to get the queue ID of the queue to destroy.
15832 * On success this function will return a zero. If the queue destroy mailbox
15833 * command fails this function will return -ENXIO.
15836 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
15838 LPFC_MBOXQ_t *mbox;
15839 int rc, length, status = 0;
15840 uint32_t shdr_status, shdr_add_status;
15841 union lpfc_sli4_cfg_shdr *shdr;
15843 /* sanity check on queue memory */
15846 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
15849 length = (sizeof(struct lpfc_mbx_eq_destroy) -
15850 sizeof(struct lpfc_sli4_cfg_mhdr));
15851 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15852 LPFC_MBOX_OPCODE_EQ_DESTROY,
15853 length, LPFC_SLI4_MBX_EMBED);
15854 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
15856 mbox->vport = eq->phba->pport;
15857 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15859 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
15860 /* The IOCTL status is embedded in the mailbox subheader. */
15861 shdr = (union lpfc_sli4_cfg_shdr *)
15862 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
15863 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15864 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15865 if (shdr_status || shdr_add_status || rc) {
15866 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15867 "2505 EQ_DESTROY mailbox failed with "
15868 "status x%x add_status x%x, mbx status x%x\n",
15869 shdr_status, shdr_add_status, rc);
15873 /* Remove eq from any list */
15874 list_del_init(&eq->list);
15875 mempool_free(mbox, eq->phba->mbox_mem_pool);
15880 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
15881 * @cq: The queue structure associated with the queue to destroy.
15883 * This function destroys a queue, as detailed in @cq by sending an mailbox
15884 * command, specific to the type of queue, to the HBA.
15886 * The @cq struct is used to get the queue ID of the queue to destroy.
15888 * On success this function will return a zero. If the queue destroy mailbox
15889 * command fails this function will return -ENXIO.
15892 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
15894 LPFC_MBOXQ_t *mbox;
15895 int rc, length, status = 0;
15896 uint32_t shdr_status, shdr_add_status;
15897 union lpfc_sli4_cfg_shdr *shdr;
15899 /* sanity check on queue memory */
15902 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
15905 length = (sizeof(struct lpfc_mbx_cq_destroy) -
15906 sizeof(struct lpfc_sli4_cfg_mhdr));
15907 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15908 LPFC_MBOX_OPCODE_CQ_DESTROY,
15909 length, LPFC_SLI4_MBX_EMBED);
15910 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
15912 mbox->vport = cq->phba->pport;
15913 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15914 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
15915 /* The IOCTL status is embedded in the mailbox subheader. */
15916 shdr = (union lpfc_sli4_cfg_shdr *)
15917 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
15918 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15919 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15920 if (shdr_status || shdr_add_status || rc) {
15921 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15922 "2506 CQ_DESTROY mailbox failed with "
15923 "status x%x add_status x%x, mbx status x%x\n",
15924 shdr_status, shdr_add_status, rc);
15927 /* Remove cq from any list */
15928 list_del_init(&cq->list);
15929 mempool_free(mbox, cq->phba->mbox_mem_pool);
15934 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
15935 * @qm: The queue structure associated with the queue to destroy.
15937 * This function destroys a queue, as detailed in @mq by sending an mailbox
15938 * command, specific to the type of queue, to the HBA.
15940 * The @mq struct is used to get the queue ID of the queue to destroy.
15942 * On success this function will return a zero. If the queue destroy mailbox
15943 * command fails this function will return -ENXIO.
15946 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
15948 LPFC_MBOXQ_t *mbox;
15949 int rc, length, status = 0;
15950 uint32_t shdr_status, shdr_add_status;
15951 union lpfc_sli4_cfg_shdr *shdr;
15953 /* sanity check on queue memory */
15956 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
15959 length = (sizeof(struct lpfc_mbx_mq_destroy) -
15960 sizeof(struct lpfc_sli4_cfg_mhdr));
15961 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15962 LPFC_MBOX_OPCODE_MQ_DESTROY,
15963 length, LPFC_SLI4_MBX_EMBED);
15964 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
15966 mbox->vport = mq->phba->pport;
15967 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15968 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
15969 /* The IOCTL status is embedded in the mailbox subheader. */
15970 shdr = (union lpfc_sli4_cfg_shdr *)
15971 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
15972 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15973 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15974 if (shdr_status || shdr_add_status || rc) {
15975 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15976 "2507 MQ_DESTROY mailbox failed with "
15977 "status x%x add_status x%x, mbx status x%x\n",
15978 shdr_status, shdr_add_status, rc);
15981 /* Remove mq from any list */
15982 list_del_init(&mq->list);
15983 mempool_free(mbox, mq->phba->mbox_mem_pool);
15988 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
15989 * @wq: The queue structure associated with the queue to destroy.
15991 * This function destroys a queue, as detailed in @wq by sending an mailbox
15992 * command, specific to the type of queue, to the HBA.
15994 * The @wq struct is used to get the queue ID of the queue to destroy.
15996 * On success this function will return a zero. If the queue destroy mailbox
15997 * command fails this function will return -ENXIO.
16000 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16002 LPFC_MBOXQ_t *mbox;
16003 int rc, length, status = 0;
16004 uint32_t shdr_status, shdr_add_status;
16005 union lpfc_sli4_cfg_shdr *shdr;
16007 /* sanity check on queue memory */
16010 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16013 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16014 sizeof(struct lpfc_sli4_cfg_mhdr));
16015 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16016 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16017 length, LPFC_SLI4_MBX_EMBED);
16018 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16020 mbox->vport = wq->phba->pport;
16021 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16022 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16023 shdr = (union lpfc_sli4_cfg_shdr *)
16024 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16025 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16026 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16027 if (shdr_status || shdr_add_status || rc) {
16028 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16029 "2508 WQ_DESTROY mailbox failed with "
16030 "status x%x add_status x%x, mbx status x%x\n",
16031 shdr_status, shdr_add_status, rc);
16034 /* Remove wq from any list */
16035 list_del_init(&wq->list);
16038 mempool_free(mbox, wq->phba->mbox_mem_pool);
16043 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16044 * @rq: The queue structure associated with the queue to destroy.
16046 * This function destroys a queue, as detailed in @rq by sending an mailbox
16047 * command, specific to the type of queue, to the HBA.
16049 * The @rq struct is used to get the queue ID of the queue to destroy.
16051 * On success this function will return a zero. If the queue destroy mailbox
16052 * command fails this function will return -ENXIO.
16055 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16056 struct lpfc_queue *drq)
16058 LPFC_MBOXQ_t *mbox;
16059 int rc, length, status = 0;
16060 uint32_t shdr_status, shdr_add_status;
16061 union lpfc_sli4_cfg_shdr *shdr;
16063 /* sanity check on queue memory */
16066 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16069 length = (sizeof(struct lpfc_mbx_rq_destroy) -
16070 sizeof(struct lpfc_sli4_cfg_mhdr));
16071 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16072 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16073 length, LPFC_SLI4_MBX_EMBED);
16074 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16076 mbox->vport = hrq->phba->pport;
16077 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16078 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16079 /* The IOCTL status is embedded in the mailbox subheader. */
16080 shdr = (union lpfc_sli4_cfg_shdr *)
16081 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16082 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16083 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16084 if (shdr_status || shdr_add_status || rc) {
16085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16086 "2509 RQ_DESTROY mailbox failed with "
16087 "status x%x add_status x%x, mbx status x%x\n",
16088 shdr_status, shdr_add_status, rc);
16089 if (rc != MBX_TIMEOUT)
16090 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16093 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16095 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16096 shdr = (union lpfc_sli4_cfg_shdr *)
16097 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16098 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16099 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16100 if (shdr_status || shdr_add_status || rc) {
16101 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16102 "2510 RQ_DESTROY mailbox failed with "
16103 "status x%x add_status x%x, mbx status x%x\n",
16104 shdr_status, shdr_add_status, rc);
16107 list_del_init(&hrq->list);
16108 list_del_init(&drq->list);
16109 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16114 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16115 * @phba: The virtual port for which this call being executed.
16116 * @pdma_phys_addr0: Physical address of the 1st SGL page.
16117 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16118 * @xritag: the xritag that ties this io to the SGL pages.
16120 * This routine will post the sgl pages for the IO that has the xritag
16121 * that is in the iocbq structure. The xritag is assigned during iocbq
16122 * creation and persists for as long as the driver is loaded.
16123 * if the caller has fewer than 256 scatter gather segments to map then
16124 * pdma_phys_addr1 should be 0.
16125 * If the caller needs to map more than 256 scatter gather segment then
16126 * pdma_phys_addr1 should be a valid physical address.
16127 * physical address for SGLs must be 64 byte aligned.
16128 * If you are going to map 2 SGL's then the first one must have 256 entries
16129 * the second sgl can have between 1 and 256 entries.
16133 * -ENXIO, -ENOMEM - Failure
16136 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16137 dma_addr_t pdma_phys_addr0,
16138 dma_addr_t pdma_phys_addr1,
16141 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16142 LPFC_MBOXQ_t *mbox;
16144 uint32_t shdr_status, shdr_add_status;
16146 union lpfc_sli4_cfg_shdr *shdr;
16148 if (xritag == NO_XRI) {
16149 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16150 "0364 Invalid param:\n");
16154 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16158 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16159 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16160 sizeof(struct lpfc_mbx_post_sgl_pages) -
16161 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16163 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16164 &mbox->u.mqe.un.post_sgl_pages;
16165 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16166 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16168 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16169 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16170 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16171 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16173 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16174 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16175 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16176 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16177 if (!phba->sli4_hba.intr_enable)
16178 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16180 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16181 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16183 /* The IOCTL status is embedded in the mailbox subheader. */
16184 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16185 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16186 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16187 if (rc != MBX_TIMEOUT)
16188 mempool_free(mbox, phba->mbox_mem_pool);
16189 if (shdr_status || shdr_add_status || rc) {
16190 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16191 "2511 POST_SGL mailbox failed with "
16192 "status x%x add_status x%x, mbx status x%x\n",
16193 shdr_status, shdr_add_status, rc);
16199 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
16200 * @phba: pointer to lpfc hba data structure.
16202 * This routine is invoked to post rpi header templates to the
16203 * HBA consistent with the SLI-4 interface spec. This routine
16204 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16205 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
16208 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16209 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16212 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16217 * Fetch the next logical xri. Because this index is logical,
16218 * the driver starts at 0 each time.
16220 spin_lock_irq(&phba->hbalock);
16221 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16222 phba->sli4_hba.max_cfg_param.max_xri, 0);
16223 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16224 spin_unlock_irq(&phba->hbalock);
16227 set_bit(xri, phba->sli4_hba.xri_bmask);
16228 phba->sli4_hba.max_cfg_param.xri_used++;
16230 spin_unlock_irq(&phba->hbalock);
16235 * lpfc_sli4_free_xri - Release an xri for reuse.
16236 * @phba: pointer to lpfc hba data structure.
16238 * This routine is invoked to release an xri to the pool of
16239 * available rpis maintained by the driver.
16242 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16244 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
16245 phba->sli4_hba.max_cfg_param.xri_used--;
16250 * lpfc_sli4_free_xri - Release an xri for reuse.
16251 * @phba: pointer to lpfc hba data structure.
16253 * This routine is invoked to release an xri to the pool of
16254 * available rpis maintained by the driver.
16257 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16259 spin_lock_irq(&phba->hbalock);
16260 __lpfc_sli4_free_xri(phba, xri);
16261 spin_unlock_irq(&phba->hbalock);
16265 * lpfc_sli4_next_xritag - Get an xritag for the io
16266 * @phba: Pointer to HBA context object.
16268 * This function gets an xritag for the iocb. If there is no unused xritag
16269 * it will return 0xffff.
16270 * The function returns the allocated xritag if successful, else returns zero.
16271 * Zero is not a valid xritag.
16272 * The caller is not required to hold any lock.
16275 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16277 uint16_t xri_index;
16279 xri_index = lpfc_sli4_alloc_xri(phba);
16280 if (xri_index == NO_XRI)
16281 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16282 "2004 Failed to allocate XRI.last XRITAG is %d"
16283 " Max XRI is %d, Used XRI is %d\n",
16285 phba->sli4_hba.max_cfg_param.max_xri,
16286 phba->sli4_hba.max_cfg_param.xri_used);
16291 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
16292 * @phba: pointer to lpfc hba data structure.
16293 * @post_sgl_list: pointer to els sgl entry list.
16294 * @count: number of els sgl entries on the list.
16296 * This routine is invoked to post a block of driver's sgl pages to the
16297 * HBA using non-embedded mailbox command. No Lock is held. This routine
16298 * is only called when the driver is loading and after all IO has been
16302 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
16303 struct list_head *post_sgl_list,
16306 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
16307 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16308 struct sgl_page_pairs *sgl_pg_pairs;
16310 LPFC_MBOXQ_t *mbox;
16311 uint32_t reqlen, alloclen, pg_pairs;
16313 uint16_t xritag_start = 0;
16315 uint32_t shdr_status, shdr_add_status;
16316 union lpfc_sli4_cfg_shdr *shdr;
16318 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
16319 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16320 if (reqlen > SLI4_PAGE_SIZE) {
16321 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16322 "2559 Block sgl registration required DMA "
16323 "size (%d) great than a page\n", reqlen);
16327 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16331 /* Allocate DMA memory and set up the non-embedded mailbox command */
16332 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16333 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16334 LPFC_SLI4_MBX_NEMBED);
16336 if (alloclen < reqlen) {
16337 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16338 "0285 Allocated DMA memory size (%d) is "
16339 "less than the requested DMA memory "
16340 "size (%d)\n", alloclen, reqlen);
16341 lpfc_sli4_mbox_cmd_free(phba, mbox);
16344 /* Set up the SGL pages in the non-embedded DMA pages */
16345 viraddr = mbox->sge_array->addr[0];
16346 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16347 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16350 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
16351 /* Set up the sge entry */
16352 sgl_pg_pairs->sgl_pg0_addr_lo =
16353 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16354 sgl_pg_pairs->sgl_pg0_addr_hi =
16355 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16356 sgl_pg_pairs->sgl_pg1_addr_lo =
16357 cpu_to_le32(putPaddrLow(0));
16358 sgl_pg_pairs->sgl_pg1_addr_hi =
16359 cpu_to_le32(putPaddrHigh(0));
16361 /* Keep the first xritag on the list */
16363 xritag_start = sglq_entry->sli4_xritag;
16368 /* Complete initialization and perform endian conversion. */
16369 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16370 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
16371 sgl->word0 = cpu_to_le32(sgl->word0);
16373 if (!phba->sli4_hba.intr_enable)
16374 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16376 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16377 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16379 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16380 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16381 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16382 if (rc != MBX_TIMEOUT)
16383 lpfc_sli4_mbox_cmd_free(phba, mbox);
16384 if (shdr_status || shdr_add_status || rc) {
16385 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16386 "2513 POST_SGL_BLOCK mailbox command failed "
16387 "status x%x add_status x%x mbx status x%x\n",
16388 shdr_status, shdr_add_status, rc);
16395 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
16396 * @phba: pointer to lpfc hba data structure.
16397 * @sblist: pointer to scsi buffer list.
16398 * @count: number of scsi buffers on the list.
16400 * This routine is invoked to post a block of @count scsi sgl pages from a
16401 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
16406 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
16407 struct list_head *sblist,
16410 struct lpfc_scsi_buf *psb;
16411 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16412 struct sgl_page_pairs *sgl_pg_pairs;
16414 LPFC_MBOXQ_t *mbox;
16415 uint32_t reqlen, alloclen, pg_pairs;
16417 uint16_t xritag_start = 0;
16419 uint32_t shdr_status, shdr_add_status;
16420 dma_addr_t pdma_phys_bpl1;
16421 union lpfc_sli4_cfg_shdr *shdr;
16423 /* Calculate the requested length of the dma memory */
16424 reqlen = count * sizeof(struct sgl_page_pairs) +
16425 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16426 if (reqlen > SLI4_PAGE_SIZE) {
16427 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16428 "0217 Block sgl registration required DMA "
16429 "size (%d) great than a page\n", reqlen);
16432 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16434 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16435 "0283 Failed to allocate mbox cmd memory\n");
16439 /* Allocate DMA memory and set up the non-embedded mailbox command */
16440 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16441 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16442 LPFC_SLI4_MBX_NEMBED);
16444 if (alloclen < reqlen) {
16445 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16446 "2561 Allocated DMA memory size (%d) is "
16447 "less than the requested DMA memory "
16448 "size (%d)\n", alloclen, reqlen);
16449 lpfc_sli4_mbox_cmd_free(phba, mbox);
16453 /* Get the first SGE entry from the non-embedded DMA memory */
16454 viraddr = mbox->sge_array->addr[0];
16456 /* Set up the SGL pages in the non-embedded DMA pages */
16457 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16458 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16461 list_for_each_entry(psb, sblist, list) {
16462 /* Set up the sge entry */
16463 sgl_pg_pairs->sgl_pg0_addr_lo =
16464 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
16465 sgl_pg_pairs->sgl_pg0_addr_hi =
16466 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
16467 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16468 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
16470 pdma_phys_bpl1 = 0;
16471 sgl_pg_pairs->sgl_pg1_addr_lo =
16472 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16473 sgl_pg_pairs->sgl_pg1_addr_hi =
16474 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16475 /* Keep the first xritag on the list */
16477 xritag_start = psb->cur_iocbq.sli4_xritag;
16481 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16482 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16483 /* Perform endian conversion if necessary */
16484 sgl->word0 = cpu_to_le32(sgl->word0);
16486 if (!phba->sli4_hba.intr_enable)
16487 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16489 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16490 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16492 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16493 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16494 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16495 if (rc != MBX_TIMEOUT)
16496 lpfc_sli4_mbox_cmd_free(phba, mbox);
16497 if (shdr_status || shdr_add_status || rc) {
16498 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16499 "2564 POST_SGL_BLOCK mailbox command failed "
16500 "status x%x add_status x%x mbx status x%x\n",
16501 shdr_status, shdr_add_status, rc);
16508 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
16509 * @phba: pointer to lpfc_hba struct that the frame was received on
16510 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16512 * This function checks the fields in the @fc_hdr to see if the FC frame is a
16513 * valid type of frame that the LPFC driver will handle. This function will
16514 * return a zero if the frame is a valid frame or a non zero value when the
16515 * frame does not pass the check.
16518 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16520 /* make rctl_names static to save stack space */
16521 struct fc_vft_header *fc_vft_hdr;
16522 uint32_t *header = (uint32_t *) fc_hdr;
16524 #define FC_RCTL_MDS_DIAGS 0xF4
16526 switch (fc_hdr->fh_r_ctl) {
16527 case FC_RCTL_DD_UNCAT: /* uncategorized information */
16528 case FC_RCTL_DD_SOL_DATA: /* solicited data */
16529 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
16530 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
16531 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
16532 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
16533 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
16534 case FC_RCTL_DD_CMD_STATUS: /* command status */
16535 case FC_RCTL_ELS_REQ: /* extended link services request */
16536 case FC_RCTL_ELS_REP: /* extended link services reply */
16537 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
16538 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
16539 case FC_RCTL_BA_NOP: /* basic link service NOP */
16540 case FC_RCTL_BA_ABTS: /* basic link service abort */
16541 case FC_RCTL_BA_RMC: /* remove connection */
16542 case FC_RCTL_BA_ACC: /* basic accept */
16543 case FC_RCTL_BA_RJT: /* basic reject */
16544 case FC_RCTL_BA_PRMT:
16545 case FC_RCTL_ACK_1: /* acknowledge_1 */
16546 case FC_RCTL_ACK_0: /* acknowledge_0 */
16547 case FC_RCTL_P_RJT: /* port reject */
16548 case FC_RCTL_F_RJT: /* fabric reject */
16549 case FC_RCTL_P_BSY: /* port busy */
16550 case FC_RCTL_F_BSY: /* fabric busy to data frame */
16551 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
16552 case FC_RCTL_LCR: /* link credit reset */
16553 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
16554 case FC_RCTL_END: /* end */
16556 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
16557 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16558 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
16559 return lpfc_fc_frame_check(phba, fc_hdr);
16564 #define FC_TYPE_VENDOR_UNIQUE 0xFF
16566 switch (fc_hdr->fh_type) {
16572 case FC_TYPE_VENDOR_UNIQUE:
16580 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
16581 "2538 Received frame rctl:x%x, type:x%x, "
16582 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
16583 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
16584 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
16585 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
16586 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
16587 be32_to_cpu(header[6]));
16590 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
16591 "2539 Dropped frame rctl:x%x type:x%x\n",
16592 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
16597 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
16598 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16600 * This function processes the FC header to retrieve the VFI from the VF
16601 * header, if one exists. This function will return the VFI if one exists
16602 * or 0 if no VSAN Header exists.
16605 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
16607 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16609 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
16611 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
16615 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
16616 * @phba: Pointer to the HBA structure to search for the vport on
16617 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16618 * @fcfi: The FC Fabric ID that the frame came from
16620 * This function searches the @phba for a vport that matches the content of the
16621 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
16622 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
16623 * returns the matching vport pointer or NULL if unable to match frame to a
16626 static struct lpfc_vport *
16627 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
16628 uint16_t fcfi, uint32_t did)
16630 struct lpfc_vport **vports;
16631 struct lpfc_vport *vport = NULL;
16634 if (did == Fabric_DID)
16635 return phba->pport;
16636 if ((phba->pport->fc_flag & FC_PT2PT) &&
16637 !(phba->link_state == LPFC_HBA_READY))
16638 return phba->pport;
16640 vports = lpfc_create_vport_work_array(phba);
16641 if (vports != NULL) {
16642 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
16643 if (phba->fcf.fcfi == fcfi &&
16644 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
16645 vports[i]->fc_myDID == did) {
16651 lpfc_destroy_vport_work_array(phba, vports);
16656 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
16657 * @vport: The vport to work on.
16659 * This function updates the receive sequence time stamp for this vport. The
16660 * receive sequence time stamp indicates the time that the last frame of the
16661 * the sequence that has been idle for the longest amount of time was received.
16662 * the driver uses this time stamp to indicate if any received sequences have
16666 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
16668 struct lpfc_dmabuf *h_buf;
16669 struct hbq_dmabuf *dmabuf = NULL;
16671 /* get the oldest sequence on the rcv list */
16672 h_buf = list_get_first(&vport->rcv_buffer_list,
16673 struct lpfc_dmabuf, list);
16676 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16677 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
16681 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
16682 * @vport: The vport that the received sequences were sent to.
16684 * This function cleans up all outstanding received sequences. This is called
16685 * by the driver when a link event or user action invalidates all the received
16689 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
16691 struct lpfc_dmabuf *h_buf, *hnext;
16692 struct lpfc_dmabuf *d_buf, *dnext;
16693 struct hbq_dmabuf *dmabuf = NULL;
16695 /* start with the oldest sequence on the rcv list */
16696 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
16697 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16698 list_del_init(&dmabuf->hbuf.list);
16699 list_for_each_entry_safe(d_buf, dnext,
16700 &dmabuf->dbuf.list, list) {
16701 list_del_init(&d_buf->list);
16702 lpfc_in_buf_free(vport->phba, d_buf);
16704 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
16709 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
16710 * @vport: The vport that the received sequences were sent to.
16712 * This function determines whether any received sequences have timed out by
16713 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
16714 * indicates that there is at least one timed out sequence this routine will
16715 * go through the received sequences one at a time from most inactive to most
16716 * active to determine which ones need to be cleaned up. Once it has determined
16717 * that a sequence needs to be cleaned up it will simply free up the resources
16718 * without sending an abort.
16721 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
16723 struct lpfc_dmabuf *h_buf, *hnext;
16724 struct lpfc_dmabuf *d_buf, *dnext;
16725 struct hbq_dmabuf *dmabuf = NULL;
16726 unsigned long timeout;
16727 int abort_count = 0;
16729 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
16730 vport->rcv_buffer_time_stamp);
16731 if (list_empty(&vport->rcv_buffer_list) ||
16732 time_before(jiffies, timeout))
16734 /* start with the oldest sequence on the rcv list */
16735 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
16736 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16737 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
16738 dmabuf->time_stamp);
16739 if (time_before(jiffies, timeout))
16742 list_del_init(&dmabuf->hbuf.list);
16743 list_for_each_entry_safe(d_buf, dnext,
16744 &dmabuf->dbuf.list, list) {
16745 list_del_init(&d_buf->list);
16746 lpfc_in_buf_free(vport->phba, d_buf);
16748 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
16751 lpfc_update_rcv_time_stamp(vport);
16755 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
16756 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
16758 * This function searches through the existing incomplete sequences that have
16759 * been sent to this @vport. If the frame matches one of the incomplete
16760 * sequences then the dbuf in the @dmabuf is added to the list of frames that
16761 * make up that sequence. If no sequence is found that matches this frame then
16762 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
16763 * This function returns a pointer to the first dmabuf in the sequence list that
16764 * the frame was linked to.
16766 static struct hbq_dmabuf *
16767 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
16769 struct fc_frame_header *new_hdr;
16770 struct fc_frame_header *temp_hdr;
16771 struct lpfc_dmabuf *d_buf;
16772 struct lpfc_dmabuf *h_buf;
16773 struct hbq_dmabuf *seq_dmabuf = NULL;
16774 struct hbq_dmabuf *temp_dmabuf = NULL;
16777 INIT_LIST_HEAD(&dmabuf->dbuf.list);
16778 dmabuf->time_stamp = jiffies;
16779 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16781 /* Use the hdr_buf to find the sequence that this frame belongs to */
16782 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
16783 temp_hdr = (struct fc_frame_header *)h_buf->virt;
16784 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
16785 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
16786 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
16788 /* found a pending sequence that matches this frame */
16789 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16794 * This indicates first frame received for this sequence.
16795 * Queue the buffer on the vport's rcv_buffer_list.
16797 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
16798 lpfc_update_rcv_time_stamp(vport);
16801 temp_hdr = seq_dmabuf->hbuf.virt;
16802 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
16803 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
16804 list_del_init(&seq_dmabuf->hbuf.list);
16805 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
16806 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
16807 lpfc_update_rcv_time_stamp(vport);
16810 /* move this sequence to the tail to indicate a young sequence */
16811 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
16812 seq_dmabuf->time_stamp = jiffies;
16813 lpfc_update_rcv_time_stamp(vport);
16814 if (list_empty(&seq_dmabuf->dbuf.list)) {
16815 temp_hdr = dmabuf->hbuf.virt;
16816 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
16819 /* find the correct place in the sequence to insert this frame */
16820 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
16822 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16823 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
16825 * If the frame's sequence count is greater than the frame on
16826 * the list then insert the frame right after this frame
16828 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
16829 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
16830 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
16835 if (&d_buf->list == &seq_dmabuf->dbuf.list)
16837 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
16846 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
16847 * @vport: pointer to a vitural port
16848 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16850 * This function tries to abort from the partially assembed sequence, described
16851 * by the information from basic abbort @dmabuf. It checks to see whether such
16852 * partially assembled sequence held by the driver. If so, it shall free up all
16853 * the frames from the partially assembled sequence.
16856 * true -- if there is matching partially assembled sequence present and all
16857 * the frames freed with the sequence;
16858 * false -- if there is no matching partially assembled sequence present so
16859 * nothing got aborted in the lower layer driver
16862 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
16863 struct hbq_dmabuf *dmabuf)
16865 struct fc_frame_header *new_hdr;
16866 struct fc_frame_header *temp_hdr;
16867 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
16868 struct hbq_dmabuf *seq_dmabuf = NULL;
16870 /* Use the hdr_buf to find the sequence that matches this frame */
16871 INIT_LIST_HEAD(&dmabuf->dbuf.list);
16872 INIT_LIST_HEAD(&dmabuf->hbuf.list);
16873 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16874 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
16875 temp_hdr = (struct fc_frame_header *)h_buf->virt;
16876 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
16877 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
16878 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
16880 /* found a pending sequence that matches this frame */
16881 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16885 /* Free up all the frames from the partially assembled sequence */
16887 list_for_each_entry_safe(d_buf, n_buf,
16888 &seq_dmabuf->dbuf.list, list) {
16889 list_del_init(&d_buf->list);
16890 lpfc_in_buf_free(vport->phba, d_buf);
16898 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
16899 * @vport: pointer to a vitural port
16900 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16902 * This function tries to abort from the assembed sequence from upper level
16903 * protocol, described by the information from basic abbort @dmabuf. It
16904 * checks to see whether such pending context exists at upper level protocol.
16905 * If so, it shall clean up the pending context.
16908 * true -- if there is matching pending context of the sequence cleaned
16910 * false -- if there is no matching pending context of the sequence present
16914 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
16916 struct lpfc_hba *phba = vport->phba;
16919 /* Accepting abort at ulp with SLI4 only */
16920 if (phba->sli_rev < LPFC_SLI_REV4)
16923 /* Register all caring upper level protocols to attend abort */
16924 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
16932 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
16933 * @phba: Pointer to HBA context object.
16934 * @cmd_iocbq: pointer to the command iocbq structure.
16935 * @rsp_iocbq: pointer to the response iocbq structure.
16937 * This function handles the sequence abort response iocb command complete
16938 * event. It properly releases the memory allocated to the sequence abort
16942 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
16943 struct lpfc_iocbq *cmd_iocbq,
16944 struct lpfc_iocbq *rsp_iocbq)
16946 struct lpfc_nodelist *ndlp;
16949 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
16950 lpfc_nlp_put(ndlp);
16951 lpfc_nlp_not_used(ndlp);
16952 lpfc_sli_release_iocbq(phba, cmd_iocbq);
16955 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
16956 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
16957 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16958 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
16959 rsp_iocbq->iocb.ulpStatus,
16960 rsp_iocbq->iocb.un.ulpWord[4]);
16964 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
16965 * @phba: Pointer to HBA context object.
16966 * @xri: xri id in transaction.
16968 * This function validates the xri maps to the known range of XRIs allocated an
16969 * used by the driver.
16972 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
16977 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
16978 if (xri == phba->sli4_hba.xri_ids[i])
16985 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
16986 * @phba: Pointer to HBA context object.
16987 * @fc_hdr: pointer to a FC frame header.
16989 * This function sends a basic response to a previous unsol sequence abort
16990 * event after aborting the sequence handling.
16993 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
16994 struct fc_frame_header *fc_hdr, bool aborted)
16996 struct lpfc_hba *phba = vport->phba;
16997 struct lpfc_iocbq *ctiocb = NULL;
16998 struct lpfc_nodelist *ndlp;
16999 uint16_t oxid, rxid, xri, lxri;
17000 uint32_t sid, fctl;
17004 if (!lpfc_is_link_up(phba))
17007 sid = sli4_sid_from_fc_hdr(fc_hdr);
17008 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17009 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17011 ndlp = lpfc_findnode_did(vport, sid);
17013 ndlp = lpfc_nlp_init(vport, sid);
17015 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17016 "1268 Failed to allocate ndlp for "
17017 "oxid:x%x SID:x%x\n", oxid, sid);
17020 /* Put ndlp onto pport node list */
17021 lpfc_enqueue_node(vport, ndlp);
17022 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17023 /* re-setup ndlp without removing from node list */
17024 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17026 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17027 "3275 Failed to active ndlp found "
17028 "for oxid:x%x SID:x%x\n", oxid, sid);
17033 /* Allocate buffer for rsp iocb */
17034 ctiocb = lpfc_sli_get_iocbq(phba);
17038 /* Extract the F_CTL field from FC_HDR */
17039 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17041 icmd = &ctiocb->iocb;
17042 icmd->un.xseq64.bdl.bdeSize = 0;
17043 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17044 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17045 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17046 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17048 /* Fill in the rest of iocb fields */
17049 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17050 icmd->ulpBdeCount = 0;
17052 icmd->ulpClass = CLASS3;
17053 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17054 ctiocb->context1 = lpfc_nlp_get(ndlp);
17056 ctiocb->iocb_cmpl = NULL;
17057 ctiocb->vport = phba->pport;
17058 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17059 ctiocb->sli4_lxritag = NO_XRI;
17060 ctiocb->sli4_xritag = NO_XRI;
17062 if (fctl & FC_FC_EX_CTX)
17063 /* Exchange responder sent the abort so we
17069 lxri = lpfc_sli4_xri_inrange(phba, xri);
17070 if (lxri != NO_XRI)
17071 lpfc_set_rrq_active(phba, ndlp, lxri,
17072 (xri == oxid) ? rxid : oxid, 0);
17073 /* For BA_ABTS from exchange responder, if the logical xri with
17074 * the oxid maps to the FCP XRI range, the port no longer has
17075 * that exchange context, send a BLS_RJT. Override the IOCB for
17078 if ((fctl & FC_FC_EX_CTX) &&
17079 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17080 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17081 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17082 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17083 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17086 /* If BA_ABTS failed to abort a partially assembled receive sequence,
17087 * the driver no longer has that exchange, send a BLS_RJT. Override
17088 * the IOCB for a BA_RJT.
17090 if (aborted == false) {
17091 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17092 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17093 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17094 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17097 if (fctl & FC_FC_EX_CTX) {
17098 /* ABTS sent by responder to CT exchange, construction
17099 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17100 * field and RX_ID from ABTS for RX_ID field.
17102 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
17104 /* ABTS sent by initiator to CT exchange, construction
17105 * of BA_ACC will need to allocate a new XRI as for the
17108 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
17110 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
17111 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
17113 /* Xmit CT abts response on exchange <xid> */
17114 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17115 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17116 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
17118 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17119 if (rc == IOCB_ERROR) {
17120 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17121 "2925 Failed to issue CT ABTS RSP x%x on "
17122 "xri x%x, Data x%x\n",
17123 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17125 lpfc_nlp_put(ndlp);
17126 ctiocb->context1 = NULL;
17127 lpfc_sli_release_iocbq(phba, ctiocb);
17132 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17133 * @vport: Pointer to the vport on which this sequence was received
17134 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17136 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17137 * receive sequence is only partially assembed by the driver, it shall abort
17138 * the partially assembled frames for the sequence. Otherwise, if the
17139 * unsolicited receive sequence has been completely assembled and passed to
17140 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
17141 * unsolicited sequence has been aborted. After that, it will issue a basic
17142 * accept to accept the abort.
17145 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17146 struct hbq_dmabuf *dmabuf)
17148 struct lpfc_hba *phba = vport->phba;
17149 struct fc_frame_header fc_hdr;
17153 /* Make a copy of fc_hdr before the dmabuf being released */
17154 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
17155 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
17157 if (fctl & FC_FC_EX_CTX) {
17158 /* ABTS by responder to exchange, no cleanup needed */
17161 /* ABTS by initiator to exchange, need to do cleanup */
17162 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17163 if (aborted == false)
17164 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
17166 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17168 if (phba->nvmet_support) {
17169 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17173 /* Respond with BA_ACC or BA_RJT accordingly */
17174 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
17178 * lpfc_seq_complete - Indicates if a sequence is complete
17179 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17181 * This function checks the sequence, starting with the frame described by
17182 * @dmabuf, to see if all the frames associated with this sequence are present.
17183 * the frames associated with this sequence are linked to the @dmabuf using the
17184 * dbuf list. This function looks for two major things. 1) That the first frame
17185 * has a sequence count of zero. 2) There is a frame with last frame of sequence
17186 * set. 3) That there are no holes in the sequence count. The function will
17187 * return 1 when the sequence is complete, otherwise it will return 0.
17190 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17192 struct fc_frame_header *hdr;
17193 struct lpfc_dmabuf *d_buf;
17194 struct hbq_dmabuf *seq_dmabuf;
17198 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17199 /* make sure first fame of sequence has a sequence count of zero */
17200 if (hdr->fh_seq_cnt != seq_count)
17202 fctl = (hdr->fh_f_ctl[0] << 16 |
17203 hdr->fh_f_ctl[1] << 8 |
17205 /* If last frame of sequence we can return success. */
17206 if (fctl & FC_FC_END_SEQ)
17208 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17209 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17210 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17211 /* If there is a hole in the sequence count then fail. */
17212 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
17214 fctl = (hdr->fh_f_ctl[0] << 16 |
17215 hdr->fh_f_ctl[1] << 8 |
17217 /* If last frame of sequence we can return success. */
17218 if (fctl & FC_FC_END_SEQ)
17225 * lpfc_prep_seq - Prep sequence for ULP processing
17226 * @vport: Pointer to the vport on which this sequence was received
17227 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17229 * This function takes a sequence, described by a list of frames, and creates
17230 * a list of iocbq structures to describe the sequence. This iocbq list will be
17231 * used to issue to the generic unsolicited sequence handler. This routine
17232 * returns a pointer to the first iocbq in the list. If the function is unable
17233 * to allocate an iocbq then it throw out the received frames that were not
17234 * able to be described and return a pointer to the first iocbq. If unable to
17235 * allocate any iocbqs (including the first) this function will return NULL.
17237 static struct lpfc_iocbq *
17238 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17240 struct hbq_dmabuf *hbq_buf;
17241 struct lpfc_dmabuf *d_buf, *n_buf;
17242 struct lpfc_iocbq *first_iocbq, *iocbq;
17243 struct fc_frame_header *fc_hdr;
17245 uint32_t len, tot_len;
17246 struct ulp_bde64 *pbde;
17248 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17249 /* remove from receive buffer list */
17250 list_del_init(&seq_dmabuf->hbuf.list);
17251 lpfc_update_rcv_time_stamp(vport);
17252 /* get the Remote Port's SID */
17253 sid = sli4_sid_from_fc_hdr(fc_hdr);
17255 /* Get an iocbq struct to fill in. */
17256 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17258 /* Initialize the first IOCB. */
17259 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
17260 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
17261 first_iocbq->vport = vport;
17263 /* Check FC Header to see what TYPE of frame we are rcv'ing */
17264 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17265 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17266 first_iocbq->iocb.un.rcvels.parmRo =
17267 sli4_did_from_fc_hdr(fc_hdr);
17268 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17270 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
17271 first_iocbq->iocb.ulpContext = NO_XRI;
17272 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17273 be16_to_cpu(fc_hdr->fh_ox_id);
17274 /* iocbq is prepped for internal consumption. Physical vpi. */
17275 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17276 vport->phba->vpi_ids[vport->vpi];
17277 /* put the first buffer into the first IOCBq */
17278 tot_len = bf_get(lpfc_rcqe_length,
17279 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17281 first_iocbq->context2 = &seq_dmabuf->dbuf;
17282 first_iocbq->context3 = NULL;
17283 first_iocbq->iocb.ulpBdeCount = 1;
17284 if (tot_len > LPFC_DATA_BUF_SIZE)
17285 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17286 LPFC_DATA_BUF_SIZE;
17288 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17290 first_iocbq->iocb.un.rcvels.remoteID = sid;
17292 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17294 iocbq = first_iocbq;
17296 * Each IOCBq can have two Buffers assigned, so go through the list
17297 * of buffers for this sequence and save two buffers in each IOCBq
17299 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17301 lpfc_in_buf_free(vport->phba, d_buf);
17304 if (!iocbq->context3) {
17305 iocbq->context3 = d_buf;
17306 iocbq->iocb.ulpBdeCount++;
17307 /* We need to get the size out of the right CQE */
17308 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17309 len = bf_get(lpfc_rcqe_length,
17310 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17311 pbde = (struct ulp_bde64 *)
17312 &iocbq->iocb.unsli3.sli3Words[4];
17313 if (len > LPFC_DATA_BUF_SIZE)
17314 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17316 pbde->tus.f.bdeSize = len;
17318 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17321 iocbq = lpfc_sli_get_iocbq(vport->phba);
17324 first_iocbq->iocb.ulpStatus =
17325 IOSTAT_FCP_RSP_ERROR;
17326 first_iocbq->iocb.un.ulpWord[4] =
17327 IOERR_NO_RESOURCES;
17329 lpfc_in_buf_free(vport->phba, d_buf);
17332 /* We need to get the size out of the right CQE */
17333 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17334 len = bf_get(lpfc_rcqe_length,
17335 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17336 iocbq->context2 = d_buf;
17337 iocbq->context3 = NULL;
17338 iocbq->iocb.ulpBdeCount = 1;
17339 if (len > LPFC_DATA_BUF_SIZE)
17340 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17341 LPFC_DATA_BUF_SIZE;
17343 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
17346 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17348 iocbq->iocb.un.rcvels.remoteID = sid;
17349 list_add_tail(&iocbq->list, &first_iocbq->list);
17352 return first_iocbq;
17356 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17357 struct hbq_dmabuf *seq_dmabuf)
17359 struct fc_frame_header *fc_hdr;
17360 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17361 struct lpfc_hba *phba = vport->phba;
17363 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17364 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17366 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17367 "2707 Ring %d handler: Failed to allocate "
17368 "iocb Rctl x%x Type x%x received\n",
17370 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17373 if (!lpfc_complete_unsol_iocb(phba,
17374 phba->sli4_hba.els_wq->pring,
17375 iocbq, fc_hdr->fh_r_ctl,
17377 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17378 "2540 Ring %d handler: unexpected Rctl "
17379 "x%x Type x%x received\n",
17381 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17383 /* Free iocb created in lpfc_prep_seq */
17384 list_for_each_entry_safe(curr_iocb, next_iocb,
17385 &iocbq->list, list) {
17386 list_del_init(&curr_iocb->list);
17387 lpfc_sli_release_iocbq(phba, curr_iocb);
17389 lpfc_sli_release_iocbq(phba, iocbq);
17393 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17394 struct lpfc_iocbq *rspiocb)
17396 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17398 if (pcmd && pcmd->virt)
17399 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17401 lpfc_sli_release_iocbq(phba, cmdiocb);
17405 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17406 struct hbq_dmabuf *dmabuf)
17408 struct fc_frame_header *fc_hdr;
17409 struct lpfc_hba *phba = vport->phba;
17410 struct lpfc_iocbq *iocbq = NULL;
17411 union lpfc_wqe *wqe;
17412 struct lpfc_dmabuf *pcmd = NULL;
17413 uint32_t frame_len;
17416 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17417 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17419 /* Send the received frame back */
17420 iocbq = lpfc_sli_get_iocbq(phba);
17424 /* Allocate buffer for command payload */
17425 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17427 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
17429 if (!pcmd || !pcmd->virt)
17432 INIT_LIST_HEAD(&pcmd->list);
17434 /* copyin the payload */
17435 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17437 /* fill in BDE's for command */
17438 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17439 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17440 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17441 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17443 iocbq->context2 = pcmd;
17444 iocbq->vport = vport;
17445 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17446 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17449 * Setup rest of the iocb as though it were a WQE
17450 * Build the SEND_FRAME WQE
17452 wqe = (union lpfc_wqe *)&iocbq->iocb;
17454 wqe->send_frame.frame_len = frame_len;
17455 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17456 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17457 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17458 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17459 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17460 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17462 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17463 iocbq->iocb.ulpLe = 1;
17464 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17465 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17466 if (rc == IOCB_ERROR)
17469 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17473 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17474 "2023 Unable to process MDS loopback frame\n");
17475 if (pcmd && pcmd->virt)
17476 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17479 lpfc_sli_release_iocbq(phba, iocbq);
17480 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17484 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
17485 * @phba: Pointer to HBA context object.
17487 * This function is called with no lock held. This function processes all
17488 * the received buffers and gives it to upper layers when a received buffer
17489 * indicates that it is the final frame in the sequence. The interrupt
17490 * service routine processes received buffers at interrupt contexts.
17491 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
17492 * appropriate receive function when the final frame in a sequence is received.
17495 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
17496 struct hbq_dmabuf *dmabuf)
17498 struct hbq_dmabuf *seq_dmabuf;
17499 struct fc_frame_header *fc_hdr;
17500 struct lpfc_vport *vport;
17504 /* Process each received buffer */
17505 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17507 /* check to see if this a valid type of frame */
17508 if (lpfc_fc_frame_check(phba, fc_hdr)) {
17509 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17513 if ((bf_get(lpfc_cqe_code,
17514 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
17515 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
17516 &dmabuf->cq_event.cqe.rcqe_cmpl);
17518 fcfi = bf_get(lpfc_rcqe_fcf_id,
17519 &dmabuf->cq_event.cqe.rcqe_cmpl);
17521 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
17522 vport = phba->pport;
17523 /* Handle MDS Loopback frames */
17524 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17528 /* d_id this frame is directed to */
17529 did = sli4_did_from_fc_hdr(fc_hdr);
17531 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
17533 /* throw out the frame */
17534 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17538 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
17539 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
17540 (did != Fabric_DID)) {
17542 * Throw out the frame if we are not pt2pt.
17543 * The pt2pt protocol allows for discovery frames
17544 * to be received without a registered VPI.
17546 if (!(vport->fc_flag & FC_PT2PT) ||
17547 (phba->link_state == LPFC_HBA_READY)) {
17548 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17553 /* Handle the basic abort sequence (BA_ABTS) event */
17554 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
17555 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
17559 /* Link this frame */
17560 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
17562 /* unable to add frame to vport - throw it out */
17563 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17566 /* If not last frame in sequence continue processing frames. */
17567 if (!lpfc_seq_complete(seq_dmabuf))
17570 /* Send the complete sequence to the upper layer protocol */
17571 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
17575 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
17576 * @phba: pointer to lpfc hba data structure.
17578 * This routine is invoked to post rpi header templates to the
17579 * HBA consistent with the SLI-4 interface spec. This routine
17580 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17581 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17583 * This routine does not require any locks. It's usage is expected
17584 * to be driver load or reset recovery when the driver is
17589 * -EIO - The mailbox failed to complete successfully.
17590 * When this error occurs, the driver is not guaranteed
17591 * to have any rpi regions posted to the device and
17592 * must either attempt to repost the regions or take a
17596 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
17598 struct lpfc_rpi_hdr *rpi_page;
17602 /* SLI4 ports that support extents do not require RPI headers. */
17603 if (!phba->sli4_hba.rpi_hdrs_in_use)
17605 if (phba->sli4_hba.extents_in_use)
17608 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
17610 * Assign the rpi headers a physical rpi only if the driver
17611 * has not initialized those resources. A port reset only
17612 * needs the headers posted.
17614 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
17616 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
17618 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
17619 if (rc != MBX_SUCCESS) {
17620 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17621 "2008 Error %d posting all rpi "
17629 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
17630 LPFC_RPI_RSRC_RDY);
17635 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
17636 * @phba: pointer to lpfc hba data structure.
17637 * @rpi_page: pointer to the rpi memory region.
17639 * This routine is invoked to post a single rpi header to the
17640 * HBA consistent with the SLI-4 interface spec. This memory region
17641 * maps up to 64 rpi context regions.
17645 * -ENOMEM - No available memory
17646 * -EIO - The mailbox failed to complete successfully.
17649 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
17651 LPFC_MBOXQ_t *mboxq;
17652 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
17654 uint32_t shdr_status, shdr_add_status;
17655 union lpfc_sli4_cfg_shdr *shdr;
17657 /* SLI4 ports that support extents do not require RPI headers. */
17658 if (!phba->sli4_hba.rpi_hdrs_in_use)
17660 if (phba->sli4_hba.extents_in_use)
17663 /* The port is notified of the header region via a mailbox command. */
17664 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17666 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17667 "2001 Unable to allocate memory for issuing "
17668 "SLI_CONFIG_SPECIAL mailbox command\n");
17672 /* Post all rpi memory regions to the port. */
17673 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
17674 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
17675 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
17676 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
17677 sizeof(struct lpfc_sli4_cfg_mhdr),
17678 LPFC_SLI4_MBX_EMBED);
17681 /* Post the physical rpi to the port for this rpi header. */
17682 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
17683 rpi_page->start_rpi);
17684 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
17685 hdr_tmpl, rpi_page->page_count);
17687 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
17688 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
17689 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
17690 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
17691 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17692 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17693 if (rc != MBX_TIMEOUT)
17694 mempool_free(mboxq, phba->mbox_mem_pool);
17695 if (shdr_status || shdr_add_status || rc) {
17696 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17697 "2514 POST_RPI_HDR mailbox failed with "
17698 "status x%x add_status x%x, mbx status x%x\n",
17699 shdr_status, shdr_add_status, rc);
17703 * The next_rpi stores the next logical module-64 rpi value used
17704 * to post physical rpis in subsequent rpi postings.
17706 spin_lock_irq(&phba->hbalock);
17707 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
17708 spin_unlock_irq(&phba->hbalock);
17714 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
17715 * @phba: pointer to lpfc hba data structure.
17717 * This routine is invoked to post rpi header templates to the
17718 * HBA consistent with the SLI-4 interface spec. This routine
17719 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17720 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17723 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17724 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
17727 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
17730 uint16_t max_rpi, rpi_limit;
17731 uint16_t rpi_remaining, lrpi = 0;
17732 struct lpfc_rpi_hdr *rpi_hdr;
17733 unsigned long iflag;
17736 * Fetch the next logical rpi. Because this index is logical,
17737 * the driver starts at 0 each time.
17739 spin_lock_irqsave(&phba->hbalock, iflag);
17740 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
17741 rpi_limit = phba->sli4_hba.next_rpi;
17743 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
17744 if (rpi >= rpi_limit)
17745 rpi = LPFC_RPI_ALLOC_ERROR;
17747 set_bit(rpi, phba->sli4_hba.rpi_bmask);
17748 phba->sli4_hba.max_cfg_param.rpi_used++;
17749 phba->sli4_hba.rpi_count++;
17751 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
17752 "0001 rpi:%x max:%x lim:%x\n",
17753 (int) rpi, max_rpi, rpi_limit);
17756 * Don't try to allocate more rpi header regions if the device limit
17757 * has been exhausted.
17759 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
17760 (phba->sli4_hba.rpi_count >= max_rpi)) {
17761 spin_unlock_irqrestore(&phba->hbalock, iflag);
17766 * RPI header postings are not required for SLI4 ports capable of
17769 if (!phba->sli4_hba.rpi_hdrs_in_use) {
17770 spin_unlock_irqrestore(&phba->hbalock, iflag);
17775 * If the driver is running low on rpi resources, allocate another
17776 * page now. Note that the next_rpi value is used because
17777 * it represents how many are actually in use whereas max_rpi notes
17778 * how many are supported max by the device.
17780 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
17781 spin_unlock_irqrestore(&phba->hbalock, iflag);
17782 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
17783 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
17785 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17786 "2002 Error Could not grow rpi "
17789 lrpi = rpi_hdr->start_rpi;
17790 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
17791 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
17799 * lpfc_sli4_free_rpi - Release an rpi for reuse.
17800 * @phba: pointer to lpfc hba data structure.
17802 * This routine is invoked to release an rpi to the pool of
17803 * available rpis maintained by the driver.
17806 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
17808 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
17809 phba->sli4_hba.rpi_count--;
17810 phba->sli4_hba.max_cfg_param.rpi_used--;
17815 * lpfc_sli4_free_rpi - Release an rpi for reuse.
17816 * @phba: pointer to lpfc hba data structure.
17818 * This routine is invoked to release an rpi to the pool of
17819 * available rpis maintained by the driver.
17822 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
17824 spin_lock_irq(&phba->hbalock);
17825 __lpfc_sli4_free_rpi(phba, rpi);
17826 spin_unlock_irq(&phba->hbalock);
17830 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
17831 * @phba: pointer to lpfc hba data structure.
17833 * This routine is invoked to remove the memory region that
17834 * provided rpi via a bitmask.
17837 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
17839 kfree(phba->sli4_hba.rpi_bmask);
17840 kfree(phba->sli4_hba.rpi_ids);
17841 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
17845 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
17846 * @phba: pointer to lpfc hba data structure.
17848 * This routine is invoked to remove the memory region that
17849 * provided rpi via a bitmask.
17852 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
17853 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
17855 LPFC_MBOXQ_t *mboxq;
17856 struct lpfc_hba *phba = ndlp->phba;
17859 /* The port is notified of the header region via a mailbox command. */
17860 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17864 /* Post all rpi memory regions to the port. */
17865 lpfc_resume_rpi(mboxq, ndlp);
17867 mboxq->mbox_cmpl = cmpl;
17868 mboxq->context1 = arg;
17869 mboxq->context2 = ndlp;
17871 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17872 mboxq->vport = ndlp->vport;
17873 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17874 if (rc == MBX_NOT_FINISHED) {
17875 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17876 "2010 Resume RPI Mailbox failed "
17877 "status %d, mbxStatus x%x\n", rc,
17878 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
17879 mempool_free(mboxq, phba->mbox_mem_pool);
17886 * lpfc_sli4_init_vpi - Initialize a vpi with the port
17887 * @vport: Pointer to the vport for which the vpi is being initialized
17889 * This routine is invoked to activate a vpi with the port.
17893 * -Evalue otherwise
17896 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
17898 LPFC_MBOXQ_t *mboxq;
17900 int retval = MBX_SUCCESS;
17902 struct lpfc_hba *phba = vport->phba;
17903 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17906 lpfc_init_vpi(phba, mboxq, vport->vpi);
17907 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
17908 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
17909 if (rc != MBX_SUCCESS) {
17910 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
17911 "2022 INIT VPI Mailbox failed "
17912 "status %d, mbxStatus x%x\n", rc,
17913 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
17916 if (rc != MBX_TIMEOUT)
17917 mempool_free(mboxq, vport->phba->mbox_mem_pool);
17923 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
17924 * @phba: pointer to lpfc hba data structure.
17925 * @mboxq: Pointer to mailbox object.
17927 * This routine is invoked to manually add a single FCF record. The caller
17928 * must pass a completely initialized FCF_Record. This routine takes
17929 * care of the nonembedded mailbox operations.
17932 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
17935 union lpfc_sli4_cfg_shdr *shdr;
17936 uint32_t shdr_status, shdr_add_status;
17938 virt_addr = mboxq->sge_array->addr[0];
17939 /* The IOCTL status is embedded in the mailbox subheader. */
17940 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
17941 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17942 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17944 if ((shdr_status || shdr_add_status) &&
17945 (shdr_status != STATUS_FCF_IN_USE))
17946 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17947 "2558 ADD_FCF_RECORD mailbox failed with "
17948 "status x%x add_status x%x\n",
17949 shdr_status, shdr_add_status);
17951 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17955 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
17956 * @phba: pointer to lpfc hba data structure.
17957 * @fcf_record: pointer to the initialized fcf record to add.
17959 * This routine is invoked to manually add a single FCF record. The caller
17960 * must pass a completely initialized FCF_Record. This routine takes
17961 * care of the nonembedded mailbox operations.
17964 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
17967 LPFC_MBOXQ_t *mboxq;
17970 struct lpfc_mbx_sge sge;
17971 uint32_t alloc_len, req_len;
17974 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17977 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
17981 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
17984 /* Allocate DMA memory and set up the non-embedded mailbox command */
17985 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
17986 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
17987 req_len, LPFC_SLI4_MBX_NEMBED);
17988 if (alloc_len < req_len) {
17989 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17990 "2523 Allocated DMA memory size (x%x) is "
17991 "less than the requested DMA memory "
17992 "size (x%x)\n", alloc_len, req_len);
17993 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17998 * Get the first SGE entry from the non-embedded DMA memory. This
17999 * routine only uses a single SGE.
18001 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18002 virt_addr = mboxq->sge_array->addr[0];
18004 * Configure the FCF record for FCFI 0. This is the driver's
18005 * hardcoded default and gets used in nonFIP mode.
18007 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18008 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18009 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18012 * Copy the fcf_index and the FCF Record Data. The data starts after
18013 * the FCoE header plus word10. The data copy needs to be endian
18016 bytep += sizeof(uint32_t);
18017 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18018 mboxq->vport = phba->pport;
18019 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18020 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18021 if (rc == MBX_NOT_FINISHED) {
18022 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18023 "2515 ADD_FCF_RECORD mailbox failed with "
18024 "status 0x%x\n", rc);
18025 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18034 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18035 * @phba: pointer to lpfc hba data structure.
18036 * @fcf_record: pointer to the fcf record to write the default data.
18037 * @fcf_index: FCF table entry index.
18039 * This routine is invoked to build the driver's default FCF record. The
18040 * values used are hardcoded. This routine handles memory initialization.
18044 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18045 struct fcf_record *fcf_record,
18046 uint16_t fcf_index)
18048 memset(fcf_record, 0, sizeof(struct fcf_record));
18049 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18050 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18051 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18052 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18053 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18054 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18055 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18056 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18057 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18058 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18059 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18060 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18061 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
18062 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
18063 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18064 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18065 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18066 /* Set the VLAN bit map */
18067 if (phba->valid_vlan) {
18068 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18069 = 1 << (phba->vlan_id % 8);
18074 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
18075 * @phba: pointer to lpfc hba data structure.
18076 * @fcf_index: FCF table entry offset.
18078 * This routine is invoked to scan the entire FCF table by reading FCF
18079 * record and processing it one at a time starting from the @fcf_index
18080 * for initial FCF discovery or fast FCF failover rediscovery.
18082 * Return 0 if the mailbox command is submitted successfully, none 0
18086 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18089 LPFC_MBOXQ_t *mboxq;
18091 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
18092 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
18093 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18095 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18096 "2000 Failed to allocate mbox for "
18099 goto fail_fcf_scan;
18101 /* Construct the read FCF record mailbox command */
18102 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18105 goto fail_fcf_scan;
18107 /* Issue the mailbox command asynchronously */
18108 mboxq->vport = phba->pport;
18109 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
18111 spin_lock_irq(&phba->hbalock);
18112 phba->hba_flag |= FCF_TS_INPROG;
18113 spin_unlock_irq(&phba->hbalock);
18115 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18116 if (rc == MBX_NOT_FINISHED)
18119 /* Reset eligible FCF count for new scan */
18120 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
18121 phba->fcf.eligible_fcf_cnt = 0;
18127 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18128 /* FCF scan failed, clear FCF_TS_INPROG flag */
18129 spin_lock_irq(&phba->hbalock);
18130 phba->hba_flag &= ~FCF_TS_INPROG;
18131 spin_unlock_irq(&phba->hbalock);
18137 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
18138 * @phba: pointer to lpfc hba data structure.
18139 * @fcf_index: FCF table entry offset.
18141 * This routine is invoked to read an FCF record indicated by @fcf_index
18142 * and to use it for FLOGI roundrobin FCF failover.
18144 * Return 0 if the mailbox command is submitted successfully, none 0
18148 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18151 LPFC_MBOXQ_t *mboxq;
18153 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18155 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18156 "2763 Failed to allocate mbox for "
18159 goto fail_fcf_read;
18161 /* Construct the read FCF record mailbox command */
18162 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18165 goto fail_fcf_read;
18167 /* Issue the mailbox command asynchronously */
18168 mboxq->vport = phba->pport;
18169 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18170 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18171 if (rc == MBX_NOT_FINISHED)
18177 if (error && mboxq)
18178 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18183 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
18184 * @phba: pointer to lpfc hba data structure.
18185 * @fcf_index: FCF table entry offset.
18187 * This routine is invoked to read an FCF record indicated by @fcf_index to
18188 * determine whether it's eligible for FLOGI roundrobin failover list.
18190 * Return 0 if the mailbox command is submitted successfully, none 0
18194 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18197 LPFC_MBOXQ_t *mboxq;
18199 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18201 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18202 "2758 Failed to allocate mbox for "
18205 goto fail_fcf_read;
18207 /* Construct the read FCF record mailbox command */
18208 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18211 goto fail_fcf_read;
18213 /* Issue the mailbox command asynchronously */
18214 mboxq->vport = phba->pport;
18215 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18216 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18217 if (rc == MBX_NOT_FINISHED)
18223 if (error && mboxq)
18224 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18229 * lpfc_check_next_fcf_pri_level
18230 * phba pointer to the lpfc_hba struct for this port.
18231 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
18232 * routine when the rr_bmask is empty. The FCF indecies are put into the
18233 * rr_bmask based on their priority level. Starting from the highest priority
18234 * to the lowest. The most likely FCF candidate will be in the highest
18235 * priority group. When this routine is called it searches the fcf_pri list for
18236 * next lowest priority group and repopulates the rr_bmask with only those
18239 * 1=success 0=failure
18242 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18244 uint16_t next_fcf_pri;
18245 uint16_t last_index;
18246 struct lpfc_fcf_pri *fcf_pri;
18250 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18251 LPFC_SLI4_FCF_TBL_INDX_MAX);
18252 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18253 "3060 Last IDX %d\n", last_index);
18255 /* Verify the priority list has 2 or more entries */
18256 spin_lock_irq(&phba->hbalock);
18257 if (list_empty(&phba->fcf.fcf_pri_list) ||
18258 list_is_singular(&phba->fcf.fcf_pri_list)) {
18259 spin_unlock_irq(&phba->hbalock);
18260 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18261 "3061 Last IDX %d\n", last_index);
18262 return 0; /* Empty rr list */
18264 spin_unlock_irq(&phba->hbalock);
18268 * Clear the rr_bmask and set all of the bits that are at this
18271 memset(phba->fcf.fcf_rr_bmask, 0,
18272 sizeof(*phba->fcf.fcf_rr_bmask));
18273 spin_lock_irq(&phba->hbalock);
18274 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18275 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18278 * the 1st priority that has not FLOGI failed
18279 * will be the highest.
18282 next_fcf_pri = fcf_pri->fcf_rec.priority;
18283 spin_unlock_irq(&phba->hbalock);
18284 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18285 rc = lpfc_sli4_fcf_rr_index_set(phba,
18286 fcf_pri->fcf_rec.fcf_index);
18290 spin_lock_irq(&phba->hbalock);
18293 * if next_fcf_pri was not set above and the list is not empty then
18294 * we have failed flogis on all of them. So reset flogi failed
18295 * and start at the beginning.
18297 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18298 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18299 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18301 * the 1st priority that has not FLOGI failed
18302 * will be the highest.
18305 next_fcf_pri = fcf_pri->fcf_rec.priority;
18306 spin_unlock_irq(&phba->hbalock);
18307 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18308 rc = lpfc_sli4_fcf_rr_index_set(phba,
18309 fcf_pri->fcf_rec.fcf_index);
18313 spin_lock_irq(&phba->hbalock);
18317 spin_unlock_irq(&phba->hbalock);
18322 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
18323 * @phba: pointer to lpfc hba data structure.
18325 * This routine is to get the next eligible FCF record index in a round
18326 * robin fashion. If the next eligible FCF record index equals to the
18327 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
18328 * shall be returned, otherwise, the next eligible FCF record's index
18329 * shall be returned.
18332 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18334 uint16_t next_fcf_index;
18337 /* Search start from next bit of currently registered FCF index */
18338 next_fcf_index = phba->fcf.current_rec.fcf_indx;
18341 /* Determine the next fcf index to check */
18342 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
18343 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18344 LPFC_SLI4_FCF_TBL_INDX_MAX,
18347 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
18348 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18350 * If we have wrapped then we need to clear the bits that
18351 * have been tested so that we can detect when we should
18352 * change the priority level.
18354 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18355 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
18359 /* Check roundrobin failover list empty condition */
18360 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18361 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18363 * If next fcf index is not found check if there are lower
18364 * Priority level fcf's in the fcf_priority list.
18365 * Set up the rr_bmask with all of the avaiable fcf bits
18366 * at that level and continue the selection process.
18368 if (lpfc_check_next_fcf_pri_level(phba))
18369 goto initial_priority;
18370 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18371 "2844 No roundrobin failover FCF available\n");
18372 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
18373 return LPFC_FCOE_FCF_NEXT_NONE;
18375 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18376 "3063 Only FCF available idx %d, flag %x\n",
18378 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
18379 return next_fcf_index;
18383 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18384 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
18385 LPFC_FCF_FLOGI_FAILED) {
18386 if (list_is_singular(&phba->fcf.fcf_pri_list))
18387 return LPFC_FCOE_FCF_NEXT_NONE;
18389 goto next_priority;
18392 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18393 "2845 Get next roundrobin failover FCF (x%x)\n",
18396 return next_fcf_index;
18400 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
18401 * @phba: pointer to lpfc hba data structure.
18403 * This routine sets the FCF record index in to the eligible bmask for
18404 * roundrobin failover search. It checks to make sure that the index
18405 * does not go beyond the range of the driver allocated bmask dimension
18406 * before setting the bit.
18408 * Returns 0 if the index bit successfully set, otherwise, it returns
18412 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18414 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18415 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18416 "2610 FCF (x%x) reached driver's book "
18417 "keeping dimension:x%x\n",
18418 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18421 /* Set the eligible FCF record index bmask */
18422 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18424 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18425 "2790 Set FCF (x%x) to roundrobin FCF failover "
18426 "bmask\n", fcf_index);
18432 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
18433 * @phba: pointer to lpfc hba data structure.
18435 * This routine clears the FCF record index from the eligible bmask for
18436 * roundrobin failover search. It checks to make sure that the index
18437 * does not go beyond the range of the driver allocated bmask dimension
18438 * before clearing the bit.
18441 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
18443 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
18444 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18445 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18446 "2762 FCF (x%x) reached driver's book "
18447 "keeping dimension:x%x\n",
18448 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18451 /* Clear the eligible FCF record index bmask */
18452 spin_lock_irq(&phba->hbalock);
18453 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
18455 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
18456 list_del_init(&fcf_pri->list);
18460 spin_unlock_irq(&phba->hbalock);
18461 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18463 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18464 "2791 Clear FCF (x%x) from roundrobin failover "
18465 "bmask\n", fcf_index);
18469 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
18470 * @phba: pointer to lpfc hba data structure.
18472 * This routine is the completion routine for the rediscover FCF table mailbox
18473 * command. If the mailbox command returned failure, it will try to stop the
18474 * FCF rediscover wait timer.
18477 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
18479 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18480 uint32_t shdr_status, shdr_add_status;
18482 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18484 shdr_status = bf_get(lpfc_mbox_hdr_status,
18485 &redisc_fcf->header.cfg_shdr.response);
18486 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
18487 &redisc_fcf->header.cfg_shdr.response);
18488 if (shdr_status || shdr_add_status) {
18489 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18490 "2746 Requesting for FCF rediscovery failed "
18491 "status x%x add_status x%x\n",
18492 shdr_status, shdr_add_status);
18493 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
18494 spin_lock_irq(&phba->hbalock);
18495 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
18496 spin_unlock_irq(&phba->hbalock);
18498 * CVL event triggered FCF rediscover request failed,
18499 * last resort to re-try current registered FCF entry.
18501 lpfc_retry_pport_discovery(phba);
18503 spin_lock_irq(&phba->hbalock);
18504 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
18505 spin_unlock_irq(&phba->hbalock);
18507 * DEAD FCF event triggered FCF rediscover request
18508 * failed, last resort to fail over as a link down
18509 * to FCF registration.
18511 lpfc_sli4_fcf_dead_failthrough(phba);
18514 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18515 "2775 Start FCF rediscover quiescent timer\n");
18517 * Start FCF rediscovery wait timer for pending FCF
18518 * before rescan FCF record table.
18520 lpfc_fcf_redisc_wait_start_timer(phba);
18523 mempool_free(mbox, phba->mbox_mem_pool);
18527 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
18528 * @phba: pointer to lpfc hba data structure.
18530 * This routine is invoked to request for rediscovery of the entire FCF table
18534 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
18536 LPFC_MBOXQ_t *mbox;
18537 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18540 /* Cancel retry delay timers to all vports before FCF rediscover */
18541 lpfc_cancel_all_vport_retry_delay_timer(phba);
18543 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18545 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18546 "2745 Failed to allocate mbox for "
18547 "requesting FCF rediscover.\n");
18551 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
18552 sizeof(struct lpfc_sli4_cfg_mhdr));
18553 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18554 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
18555 length, LPFC_SLI4_MBX_EMBED);
18557 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18558 /* Set count to 0 for invalidating the entire FCF database */
18559 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
18561 /* Issue the mailbox command asynchronously */
18562 mbox->vport = phba->pport;
18563 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
18564 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
18566 if (rc == MBX_NOT_FINISHED) {
18567 mempool_free(mbox, phba->mbox_mem_pool);
18574 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
18575 * @phba: pointer to lpfc hba data structure.
18577 * This function is the failover routine as a last resort to the FCF DEAD
18578 * event when driver failed to perform fast FCF failover.
18581 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
18583 uint32_t link_state;
18586 * Last resort as FCF DEAD event failover will treat this as
18587 * a link down, but save the link state because we don't want
18588 * it to be changed to Link Down unless it is already down.
18590 link_state = phba->link_state;
18591 lpfc_linkdown(phba);
18592 phba->link_state = link_state;
18594 /* Unregister FCF if no devices connected to it */
18595 lpfc_unregister_unused_fcf(phba);
18599 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
18600 * @phba: pointer to lpfc hba data structure.
18601 * @rgn23_data: pointer to configure region 23 data.
18603 * This function gets SLI3 port configure region 23 data through memory dump
18604 * mailbox command. When it successfully retrieves data, the size of the data
18605 * will be returned, otherwise, 0 will be returned.
18608 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
18610 LPFC_MBOXQ_t *pmb = NULL;
18612 uint32_t offset = 0;
18618 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18620 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18621 "2600 failed to allocate mailbox memory\n");
18627 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
18628 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
18630 if (rc != MBX_SUCCESS) {
18631 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
18632 "2601 failed to read config "
18633 "region 23, rc 0x%x Status 0x%x\n",
18634 rc, mb->mbxStatus);
18635 mb->un.varDmp.word_cnt = 0;
18638 * dump mem may return a zero when finished or we got a
18639 * mailbox error, either way we are done.
18641 if (mb->un.varDmp.word_cnt == 0)
18643 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
18644 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
18646 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
18647 rgn23_data + offset,
18648 mb->un.varDmp.word_cnt);
18649 offset += mb->un.varDmp.word_cnt;
18650 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
18652 mempool_free(pmb, phba->mbox_mem_pool);
18657 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
18658 * @phba: pointer to lpfc hba data structure.
18659 * @rgn23_data: pointer to configure region 23 data.
18661 * This function gets SLI4 port configure region 23 data through memory dump
18662 * mailbox command. When it successfully retrieves data, the size of the data
18663 * will be returned, otherwise, 0 will be returned.
18666 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
18668 LPFC_MBOXQ_t *mboxq = NULL;
18669 struct lpfc_dmabuf *mp = NULL;
18670 struct lpfc_mqe *mqe;
18671 uint32_t data_length = 0;
18677 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18680 "3105 failed to allocate mailbox memory\n");
18684 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
18686 mqe = &mboxq->u.mqe;
18687 mp = (struct lpfc_dmabuf *) mboxq->context1;
18688 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18691 data_length = mqe->un.mb_words[5];
18692 if (data_length == 0)
18694 if (data_length > DMP_RGN23_SIZE) {
18698 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
18700 mempool_free(mboxq, phba->mbox_mem_pool);
18702 lpfc_mbuf_free(phba, mp->virt, mp->phys);
18705 return data_length;
18709 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
18710 * @phba: pointer to lpfc hba data structure.
18712 * This function read region 23 and parse TLV for port status to
18713 * decide if the user disaled the port. If the TLV indicates the
18714 * port is disabled, the hba_flag is set accordingly.
18717 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
18719 uint8_t *rgn23_data = NULL;
18720 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
18721 uint32_t offset = 0;
18723 /* Get adapter Region 23 data */
18724 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
18728 if (phba->sli_rev < LPFC_SLI_REV4)
18729 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
18731 if_type = bf_get(lpfc_sli_intf_if_type,
18732 &phba->sli4_hba.sli_intf);
18733 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
18735 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
18741 /* Check the region signature first */
18742 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
18743 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18744 "2619 Config region 23 has bad signature\n");
18749 /* Check the data structure version */
18750 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
18751 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18752 "2620 Config region 23 has bad version\n");
18757 /* Parse TLV entries in the region */
18758 while (offset < data_size) {
18759 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
18762 * If the TLV is not driver specific TLV or driver id is
18763 * not linux driver id, skip the record.
18765 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
18766 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
18767 (rgn23_data[offset + 3] != 0)) {
18768 offset += rgn23_data[offset + 1] * 4 + 4;
18772 /* Driver found a driver specific TLV in the config region */
18773 sub_tlv_len = rgn23_data[offset + 1] * 4;
18778 * Search for configured port state sub-TLV.
18780 while ((offset < data_size) &&
18781 (tlv_offset < sub_tlv_len)) {
18782 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
18787 if (rgn23_data[offset] != PORT_STE_TYPE) {
18788 offset += rgn23_data[offset + 1] * 4 + 4;
18789 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
18793 /* This HBA contains PORT_STE configured */
18794 if (!rgn23_data[offset + 2])
18795 phba->hba_flag |= LINK_DISABLED;
18807 * lpfc_wr_object - write an object to the firmware
18808 * @phba: HBA structure that indicates port to create a queue on.
18809 * @dmabuf_list: list of dmabufs to write to the port.
18810 * @size: the total byte value of the objects to write to the port.
18811 * @offset: the current offset to be used to start the transfer.
18813 * This routine will create a wr_object mailbox command to send to the port.
18814 * the mailbox command will be constructed using the dma buffers described in
18815 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
18816 * BDEs that the imbedded mailbox can support. The @offset variable will be
18817 * used to indicate the starting offset of the transfer and will also return
18818 * the offset after the write object mailbox has completed. @size is used to
18819 * determine the end of the object and whether the eof bit should be set.
18821 * Return 0 is successful and offset will contain the the new offset to use
18822 * for the next write.
18823 * Return negative value for error cases.
18826 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
18827 uint32_t size, uint32_t *offset)
18829 struct lpfc_mbx_wr_object *wr_object;
18830 LPFC_MBOXQ_t *mbox;
18832 uint32_t shdr_status, shdr_add_status;
18834 union lpfc_sli4_cfg_shdr *shdr;
18835 struct lpfc_dmabuf *dmabuf;
18836 uint32_t written = 0;
18838 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18842 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
18843 LPFC_MBOX_OPCODE_WRITE_OBJECT,
18844 sizeof(struct lpfc_mbx_wr_object) -
18845 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
18847 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
18848 wr_object->u.request.write_offset = *offset;
18849 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
18850 wr_object->u.request.object_name[0] =
18851 cpu_to_le32(wr_object->u.request.object_name[0]);
18852 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
18853 list_for_each_entry(dmabuf, dmabuf_list, list) {
18854 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
18856 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
18857 wr_object->u.request.bde[i].addrHigh =
18858 putPaddrHigh(dmabuf->phys);
18859 if (written + SLI4_PAGE_SIZE >= size) {
18860 wr_object->u.request.bde[i].tus.f.bdeSize =
18862 written += (size - written);
18863 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
18865 wr_object->u.request.bde[i].tus.f.bdeSize =
18867 written += SLI4_PAGE_SIZE;
18871 wr_object->u.request.bde_count = i;
18872 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
18873 if (!phba->sli4_hba.intr_enable)
18874 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18876 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18877 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18879 /* The IOCTL status is embedded in the mailbox subheader. */
18880 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
18881 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18882 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18883 if (rc != MBX_TIMEOUT)
18884 mempool_free(mbox, phba->mbox_mem_pool);
18885 if (shdr_status || shdr_add_status || rc) {
18886 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18887 "3025 Write Object mailbox failed with "
18888 "status x%x add_status x%x, mbx status x%x\n",
18889 shdr_status, shdr_add_status, rc);
18891 *offset = shdr_add_status;
18893 *offset += wr_object->u.response.actual_write_length;
18898 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
18899 * @vport: pointer to vport data structure.
18901 * This function iterate through the mailboxq and clean up all REG_LOGIN
18902 * and REG_VPI mailbox commands associated with the vport. This function
18903 * is called when driver want to restart discovery of the vport due to
18904 * a Clear Virtual Link event.
18907 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
18909 struct lpfc_hba *phba = vport->phba;
18910 LPFC_MBOXQ_t *mb, *nextmb;
18911 struct lpfc_dmabuf *mp;
18912 struct lpfc_nodelist *ndlp;
18913 struct lpfc_nodelist *act_mbx_ndlp = NULL;
18914 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
18915 LIST_HEAD(mbox_cmd_list);
18916 uint8_t restart_loop;
18918 /* Clean up internally queued mailbox commands with the vport */
18919 spin_lock_irq(&phba->hbalock);
18920 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
18921 if (mb->vport != vport)
18924 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
18925 (mb->u.mb.mbxCommand != MBX_REG_VPI))
18928 list_del(&mb->list);
18929 list_add_tail(&mb->list, &mbox_cmd_list);
18931 /* Clean up active mailbox command with the vport */
18932 mb = phba->sli.mbox_active;
18933 if (mb && (mb->vport == vport)) {
18934 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
18935 (mb->u.mb.mbxCommand == MBX_REG_VPI))
18936 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18937 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18938 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
18939 /* Put reference count for delayed processing */
18940 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
18941 /* Unregister the RPI when mailbox complete */
18942 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
18945 /* Cleanup any mailbox completions which are not yet processed */
18948 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
18950 * If this mailox is already processed or it is
18951 * for another vport ignore it.
18953 if ((mb->vport != vport) ||
18954 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
18957 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
18958 (mb->u.mb.mbxCommand != MBX_REG_VPI))
18961 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18962 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18963 ndlp = (struct lpfc_nodelist *)mb->context2;
18964 /* Unregister the RPI when mailbox complete */
18965 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
18967 spin_unlock_irq(&phba->hbalock);
18968 spin_lock(shost->host_lock);
18969 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
18970 spin_unlock(shost->host_lock);
18971 spin_lock_irq(&phba->hbalock);
18975 } while (restart_loop);
18977 spin_unlock_irq(&phba->hbalock);
18979 /* Release the cleaned-up mailbox commands */
18980 while (!list_empty(&mbox_cmd_list)) {
18981 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
18982 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18983 mp = (struct lpfc_dmabuf *) (mb->context1);
18985 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
18988 ndlp = (struct lpfc_nodelist *) mb->context2;
18989 mb->context2 = NULL;
18991 spin_lock(shost->host_lock);
18992 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
18993 spin_unlock(shost->host_lock);
18994 lpfc_nlp_put(ndlp);
18997 mempool_free(mb, phba->mbox_mem_pool);
19000 /* Release the ndlp with the cleaned-up active mailbox command */
19001 if (act_mbx_ndlp) {
19002 spin_lock(shost->host_lock);
19003 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19004 spin_unlock(shost->host_lock);
19005 lpfc_nlp_put(act_mbx_ndlp);
19010 * lpfc_drain_txq - Drain the txq
19011 * @phba: Pointer to HBA context object.
19013 * This function attempt to submit IOCBs on the txq
19014 * to the adapter. For SLI4 adapters, the txq contains
19015 * ELS IOCBs that have been deferred because the there
19016 * are no SGLs. This congestion can occur with large
19017 * vport counts during node discovery.
19021 lpfc_drain_txq(struct lpfc_hba *phba)
19023 LIST_HEAD(completions);
19024 struct lpfc_sli_ring *pring;
19025 struct lpfc_iocbq *piocbq = NULL;
19026 unsigned long iflags = 0;
19027 char *fail_msg = NULL;
19028 struct lpfc_sglq *sglq;
19029 union lpfc_wqe128 wqe128;
19030 union lpfc_wqe *wqe = (union lpfc_wqe *) &wqe128;
19031 uint32_t txq_cnt = 0;
19033 pring = lpfc_phba_elsring(phba);
19034 if (unlikely(!pring))
19037 spin_lock_irqsave(&pring->ring_lock, iflags);
19038 list_for_each_entry(piocbq, &pring->txq, list) {
19042 if (txq_cnt > pring->txq_max)
19043 pring->txq_max = txq_cnt;
19045 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19047 while (!list_empty(&pring->txq)) {
19048 spin_lock_irqsave(&pring->ring_lock, iflags);
19050 piocbq = lpfc_sli_ringtx_get(phba, pring);
19052 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19053 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19054 "2823 txq empty and txq_cnt is %d\n ",
19058 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
19060 __lpfc_sli_ringtx_put(phba, pring, piocbq);
19061 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19066 /* The xri and iocb resources secured,
19067 * attempt to issue request
19069 piocbq->sli4_lxritag = sglq->sli4_lxritag;
19070 piocbq->sli4_xritag = sglq->sli4_xritag;
19071 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19072 fail_msg = "to convert bpl to sgl";
19073 else if (lpfc_sli4_iocb2wqe(phba, piocbq, wqe))
19074 fail_msg = "to convert iocb to wqe";
19075 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe))
19076 fail_msg = " - Wq is full";
19078 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19081 /* Failed means we can't issue and need to cancel */
19082 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19083 "2822 IOCB failed %s iotag 0x%x "
19086 piocbq->iotag, piocbq->sli4_xritag);
19087 list_add_tail(&piocbq->list, &completions);
19089 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19092 /* Cancel all the IOCBs that cannot be issued */
19093 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19094 IOERR_SLI_ABORTED);
19100 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
19101 * @phba: Pointer to HBA context object.
19102 * @pwqe: Pointer to command WQE.
19103 * @sglq: Pointer to the scatter gather queue object.
19105 * This routine converts the bpl or bde that is in the WQE
19106 * to a sgl list for the sli4 hardware. The physical address
19107 * of the bpl/bde is converted back to a virtual address.
19108 * If the WQE contains a BPL then the list of BDE's is
19109 * converted to sli4_sge's. If the WQE contains a single
19110 * BDE then it is converted to a single sli_sge.
19111 * The WQE is still in cpu endianness so the contents of
19112 * the bpl can be used without byte swapping.
19114 * Returns valid XRI = Success, NO_XRI = Failure.
19117 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19118 struct lpfc_sglq *sglq)
19120 uint16_t xritag = NO_XRI;
19121 struct ulp_bde64 *bpl = NULL;
19122 struct ulp_bde64 bde;
19123 struct sli4_sge *sgl = NULL;
19124 struct lpfc_dmabuf *dmabuf;
19125 union lpfc_wqe *wqe;
19128 uint32_t offset = 0; /* accumulated offset in the sg request list */
19129 int inbound = 0; /* number of sg reply entries inbound from firmware */
19132 if (!pwqeq || !sglq)
19135 sgl = (struct sli4_sge *)sglq->sgl;
19137 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19139 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19140 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19141 return sglq->sli4_xritag;
19142 numBdes = pwqeq->rsvd2;
19144 /* The addrHigh and addrLow fields within the WQE
19145 * have not been byteswapped yet so there is no
19146 * need to swap them back.
19148 if (pwqeq->context3)
19149 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19153 bpl = (struct ulp_bde64 *)dmabuf->virt;
19157 for (i = 0; i < numBdes; i++) {
19158 /* Should already be byte swapped. */
19159 sgl->addr_hi = bpl->addrHigh;
19160 sgl->addr_lo = bpl->addrLow;
19162 sgl->word2 = le32_to_cpu(sgl->word2);
19163 if ((i+1) == numBdes)
19164 bf_set(lpfc_sli4_sge_last, sgl, 1);
19166 bf_set(lpfc_sli4_sge_last, sgl, 0);
19167 /* swap the size field back to the cpu so we
19168 * can assign it to the sgl.
19170 bde.tus.w = le32_to_cpu(bpl->tus.w);
19171 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19172 /* The offsets in the sgl need to be accumulated
19173 * separately for the request and reply lists.
19174 * The request is always first, the reply follows.
19177 case CMD_GEN_REQUEST64_WQE:
19178 /* add up the reply sg entries */
19179 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19181 /* first inbound? reset the offset */
19184 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19185 bf_set(lpfc_sli4_sge_type, sgl,
19186 LPFC_SGE_TYPE_DATA);
19187 offset += bde.tus.f.bdeSize;
19189 case CMD_FCP_TRSP64_WQE:
19190 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19191 bf_set(lpfc_sli4_sge_type, sgl,
19192 LPFC_SGE_TYPE_DATA);
19194 case CMD_FCP_TSEND64_WQE:
19195 case CMD_FCP_TRECEIVE64_WQE:
19196 bf_set(lpfc_sli4_sge_type, sgl,
19197 bpl->tus.f.bdeFlags);
19201 offset += bde.tus.f.bdeSize;
19202 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19205 sgl->word2 = cpu_to_le32(sgl->word2);
19209 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19210 /* The addrHigh and addrLow fields of the BDE have not
19211 * been byteswapped yet so they need to be swapped
19212 * before putting them in the sgl.
19214 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19215 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19216 sgl->word2 = le32_to_cpu(sgl->word2);
19217 bf_set(lpfc_sli4_sge_last, sgl, 1);
19218 sgl->word2 = cpu_to_le32(sgl->word2);
19219 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19221 return sglq->sli4_xritag;
19225 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
19226 * @phba: Pointer to HBA context object.
19227 * @ring_number: Base sli ring number
19228 * @pwqe: Pointer to command WQE.
19231 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
19232 struct lpfc_iocbq *pwqe)
19234 union lpfc_wqe *wqe = &pwqe->wqe;
19235 struct lpfc_nvmet_rcv_ctx *ctxp;
19236 struct lpfc_queue *wq;
19237 struct lpfc_sglq *sglq;
19238 struct lpfc_sli_ring *pring;
19239 unsigned long iflags;
19242 /* NVME_LS and NVME_LS ABTS requests. */
19243 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19244 pring = phba->sli4_hba.nvmels_wq->pring;
19245 spin_lock_irqsave(&pring->ring_lock, iflags);
19246 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19248 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19251 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19252 pwqe->sli4_xritag = sglq->sli4_xritag;
19253 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19254 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19257 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19258 pwqe->sli4_xritag);
19259 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19261 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19265 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19266 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19270 /* NVME_FCREQ and NVME_ABTS requests */
19271 if (pwqe->iocb_flag & LPFC_IO_NVME) {
19272 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19273 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
19275 spin_lock_irqsave(&pring->ring_lock, iflags);
19276 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
19277 bf_set(wqe_cqid, &wqe->generic.wqe_com,
19278 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
19279 ret = lpfc_sli4_wq_put(wq, wqe);
19281 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19284 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19285 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19289 /* NVMET requests */
19290 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19291 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19292 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
19294 spin_lock_irqsave(&pring->ring_lock, iflags);
19295 ctxp = pwqe->context2;
19296 sglq = ctxp->ctxbuf->sglq;
19297 if (pwqe->sli4_xritag == NO_XRI) {
19298 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19299 pwqe->sli4_xritag = sglq->sli4_xritag;
19301 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19302 pwqe->sli4_xritag);
19303 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
19304 bf_set(wqe_cqid, &wqe->generic.wqe_com,
19305 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
19306 ret = lpfc_sli4_wq_put(wq, wqe);
19308 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19311 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19312 spin_unlock_irqrestore(&pring->ring_lock, iflags);