1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
38 #include <linux/crash_dump.h>
40 #include <asm/set_memory.h>
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_compat.h"
55 #include "lpfc_debugfs.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_version.h"
59 /* There are only four IOCB completion types. */
60 typedef enum _lpfc_iocb_type {
68 /* Provide function prototypes local to this module. */
69 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
71 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint8_t *, uint32_t *);
73 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
75 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
77 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
78 struct hbq_dmabuf *dmabuf);
79 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
80 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
81 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
83 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
84 struct lpfc_queue *eq,
85 struct lpfc_eqe *eqe);
86 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
87 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
88 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
89 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
90 struct lpfc_queue *cq,
91 struct lpfc_cqe *cqe);
93 union lpfc_wqe128 lpfc_iread_cmd_template;
94 union lpfc_wqe128 lpfc_iwrite_cmd_template;
95 union lpfc_wqe128 lpfc_icmnd_cmd_template;
98 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
103 /* Setup WQE templates for IOs */
104 void lpfc_wqe_cmd_template(void)
106 union lpfc_wqe128 *wqe;
109 wqe = &lpfc_iread_cmd_template;
110 memset(wqe, 0, sizeof(union lpfc_wqe128));
112 /* Word 0, 1, 2 - BDE is variable */
114 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
116 /* Word 4 - total_xfer_len is variable */
118 /* Word 5 - is zero */
120 /* Word 6 - ctxt_tag, xri_tag is variable */
123 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
124 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
125 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
126 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
128 /* Word 8 - abort_tag is variable */
130 /* Word 9 - reqtag is variable */
132 /* Word 10 - dbde, wqes is variable */
133 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
134 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
135 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
136 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
137 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
139 /* Word 11 - pbde is variable */
140 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
141 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
142 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
144 /* Word 12 - is zero */
146 /* Word 13, 14, 15 - PBDE is variable */
148 /* IWRITE template */
149 wqe = &lpfc_iwrite_cmd_template;
150 memset(wqe, 0, sizeof(union lpfc_wqe128));
152 /* Word 0, 1, 2 - BDE is variable */
154 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
156 /* Word 4 - total_xfer_len is variable */
158 /* Word 5 - initial_xfer_len is variable */
160 /* Word 6 - ctxt_tag, xri_tag is variable */
163 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
164 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
165 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
166 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
168 /* Word 8 - abort_tag is variable */
170 /* Word 9 - reqtag is variable */
172 /* Word 10 - dbde, wqes is variable */
173 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
174 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
175 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
176 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
177 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
179 /* Word 11 - pbde is variable */
180 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
181 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
182 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
184 /* Word 12 - is zero */
186 /* Word 13, 14, 15 - PBDE is variable */
189 wqe = &lpfc_icmnd_cmd_template;
190 memset(wqe, 0, sizeof(union lpfc_wqe128));
192 /* Word 0, 1, 2 - BDE is variable */
194 /* Word 3 - payload_offset_len is variable */
196 /* Word 4, 5 - is zero */
198 /* Word 6 - ctxt_tag, xri_tag is variable */
201 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
202 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
203 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
204 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
206 /* Word 8 - abort_tag is variable */
208 /* Word 9 - reqtag is variable */
210 /* Word 10 - dbde, wqes is variable */
211 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
212 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
213 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
214 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
215 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
218 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
219 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
220 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
222 /* Word 12, 13, 14, 15 - is zero */
225 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
227 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
228 * @srcp: Source memory pointer.
229 * @destp: Destination memory pointer.
230 * @cnt: Number of words required to be copied.
231 * Must be a multiple of sizeof(uint64_t)
233 * This function is used for copying data between driver memory
234 * and the SLI WQ. This function also changes the endianness
235 * of each word if native endianness is different from SLI
236 * endianness. This function can be called with or without
240 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
242 uint64_t *src = srcp;
243 uint64_t *dest = destp;
246 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
250 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
254 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
255 * @q: The Work Queue to operate on.
256 * @wqe: The work Queue Entry to put on the Work queue.
258 * This routine will copy the contents of @wqe to the next available entry on
259 * the @q. This function will then ring the Work Queue Doorbell to signal the
260 * HBA to start processing the Work Queue Entry. This function returns 0 if
261 * successful. If no entries are available on @q then this function will return
263 * The caller is expected to hold the hbalock when calling this routine.
266 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
268 union lpfc_wqe *temp_wqe;
269 struct lpfc_register doorbell;
276 /* sanity check on queue memory */
280 temp_wqe = lpfc_sli4_qe(q, q->host_index);
282 /* If the host has not yet processed the next entry then we are done */
283 idx = ((q->host_index + 1) % q->entry_count);
284 if (idx == q->hba_index) {
289 /* set consumption flag every once in a while */
290 if (!((q->host_index + 1) % q->notify_interval))
291 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
293 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
294 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
295 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
296 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
297 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
298 /* write to DPP aperture taking advatage of Combined Writes */
299 tmp = (uint8_t *)temp_wqe;
301 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
302 __raw_writeq(*((uint64_t *)(tmp + i)),
305 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
306 __raw_writel(*((uint32_t *)(tmp + i)),
310 /* ensure WQE bcopy and DPP flushed before doorbell write */
313 /* Update the host index before invoking device */
314 host_index = q->host_index;
320 if (q->db_format == LPFC_DB_LIST_FORMAT) {
321 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
322 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
323 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
324 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
326 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
329 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
330 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
332 /* Leave bits <23:16> clear for if_type 6 dpp */
333 if_type = bf_get(lpfc_sli_intf_if_type,
334 &q->phba->sli4_hba.sli_intf);
335 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
336 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
339 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
340 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
341 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
345 writel(doorbell.word0, q->db_regaddr);
351 * lpfc_sli4_wq_release - Updates internal hba index for WQ
352 * @q: The Work Queue to operate on.
353 * @index: The index to advance the hba index to.
355 * This routine will update the HBA index of a queue to reflect consumption of
356 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
357 * an entry the host calls this function to update the queue's internal
361 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
363 /* sanity check on queue memory */
367 q->hba_index = index;
371 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
372 * @q: The Mailbox Queue to operate on.
373 * @mqe: The Mailbox Queue Entry to put on the Work queue.
375 * This routine will copy the contents of @mqe to the next available entry on
376 * the @q. This function will then ring the Work Queue Doorbell to signal the
377 * HBA to start processing the Work Queue Entry. This function returns 0 if
378 * successful. If no entries are available on @q then this function will return
380 * The caller is expected to hold the hbalock when calling this routine.
383 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
385 struct lpfc_mqe *temp_mqe;
386 struct lpfc_register doorbell;
388 /* sanity check on queue memory */
391 temp_mqe = lpfc_sli4_qe(q, q->host_index);
393 /* If the host has not yet processed the next entry then we are done */
394 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
396 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
397 /* Save off the mailbox pointer for completion */
398 q->phba->mbox = (MAILBOX_t *)temp_mqe;
400 /* Update the host index before invoking device */
401 q->host_index = ((q->host_index + 1) % q->entry_count);
405 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
406 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
407 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
412 * lpfc_sli4_mq_release - Updates internal hba index for MQ
413 * @q: The Mailbox Queue to operate on.
415 * This routine will update the HBA index of a queue to reflect consumption of
416 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
417 * an entry the host calls this function to update the queue's internal
418 * pointers. This routine returns the number of entries that were consumed by
422 lpfc_sli4_mq_release(struct lpfc_queue *q)
424 /* sanity check on queue memory */
428 /* Clear the mailbox pointer for completion */
429 q->phba->mbox = NULL;
430 q->hba_index = ((q->hba_index + 1) % q->entry_count);
435 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
436 * @q: The Event Queue to get the first valid EQE from
438 * This routine will get the first valid Event Queue Entry from @q, update
439 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
440 * the Queue (no more work to do), or the Queue is full of EQEs that have been
441 * processed, but not popped back to the HBA then this routine will return NULL.
443 static struct lpfc_eqe *
444 lpfc_sli4_eq_get(struct lpfc_queue *q)
446 struct lpfc_eqe *eqe;
448 /* sanity check on queue memory */
451 eqe = lpfc_sli4_qe(q, q->host_index);
453 /* If the next EQE is not valid then we are done */
454 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
458 * insert barrier for instruction interlock : data from the hardware
459 * must have the valid bit checked before it can be copied and acted
460 * upon. Speculative instructions were allowing a bcopy at the start
461 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
462 * after our return, to copy data before the valid bit check above
463 * was done. As such, some of the copied data was stale. The barrier
464 * ensures the check is before any data is copied.
471 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
472 * @q: The Event Queue to disable interrupts
476 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
478 struct lpfc_register doorbell;
481 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
482 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
483 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
484 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
485 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
486 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
490 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
491 * @q: The Event Queue to disable interrupts
495 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
497 struct lpfc_register doorbell;
500 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
501 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
505 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
506 * @phba: adapter with EQ
507 * @q: The Event Queue that the host has completed processing for.
508 * @count: Number of elements that have been consumed
509 * @arm: Indicates whether the host wants to arms this CQ.
511 * This routine will notify the HBA, by ringing the doorbell, that count
512 * number of EQEs have been processed. The @arm parameter indicates whether
513 * the queue should be rearmed when ringing the doorbell.
516 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
517 uint32_t count, bool arm)
519 struct lpfc_register doorbell;
521 /* sanity check on queue memory */
522 if (unlikely(!q || (count == 0 && !arm)))
525 /* ring doorbell for number popped */
528 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
529 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
531 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
532 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
533 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
534 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
535 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
536 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
537 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
538 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
539 readl(q->phba->sli4_hba.EQDBregaddr);
543 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
544 * @phba: adapter with EQ
545 * @q: The Event Queue that the host has completed processing for.
546 * @count: Number of elements that have been consumed
547 * @arm: Indicates whether the host wants to arms this CQ.
549 * This routine will notify the HBA, by ringing the doorbell, that count
550 * number of EQEs have been processed. The @arm parameter indicates whether
551 * the queue should be rearmed when ringing the doorbell.
554 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
555 uint32_t count, bool arm)
557 struct lpfc_register doorbell;
559 /* sanity check on queue memory */
560 if (unlikely(!q || (count == 0 && !arm)))
563 /* ring doorbell for number popped */
566 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
567 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
568 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
569 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
570 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
571 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
572 readl(q->phba->sli4_hba.EQDBregaddr);
576 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
577 struct lpfc_eqe *eqe)
579 if (!phba->sli4_hba.pc_sli4_params.eqav)
580 bf_set_le32(lpfc_eqe_valid, eqe, 0);
582 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
584 /* if the index wrapped around, toggle the valid bit */
585 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
586 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
590 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
592 struct lpfc_eqe *eqe = NULL;
593 u32 eq_count = 0, cq_count = 0;
594 struct lpfc_cqe *cqe = NULL;
595 struct lpfc_queue *cq = NULL, *childq = NULL;
598 /* walk all the EQ entries and drop on the floor */
599 eqe = lpfc_sli4_eq_get(eq);
601 /* Get the reference to the corresponding CQ */
602 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
605 list_for_each_entry(childq, &eq->child_list, list) {
606 if (childq->queue_id == cqid) {
611 /* If CQ is valid, iterate through it and drop all the CQEs */
613 cqe = lpfc_sli4_cq_get(cq);
615 __lpfc_sli4_consume_cqe(phba, cq, cqe);
617 cqe = lpfc_sli4_cq_get(cq);
619 /* Clear and re-arm the CQ */
620 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
624 __lpfc_sli4_consume_eqe(phba, eq, eqe);
626 eqe = lpfc_sli4_eq_get(eq);
629 /* Clear and re-arm the EQ */
630 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
634 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
637 struct lpfc_eqe *eqe;
638 int count = 0, consumed = 0;
640 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
643 eqe = lpfc_sli4_eq_get(eq);
645 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
646 __lpfc_sli4_consume_eqe(phba, eq, eqe);
649 if (!(++count % eq->max_proc_limit))
652 if (!(count % eq->notify_interval)) {
653 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
658 eqe = lpfc_sli4_eq_get(eq);
660 eq->EQ_processed += count;
662 /* Track the max number of EQEs processed in 1 intr */
663 if (count > eq->EQ_max_eqe)
664 eq->EQ_max_eqe = count;
666 xchg(&eq->queue_claimed, 0);
669 /* Always clear the EQ. */
670 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
676 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
677 * @q: The Completion Queue to get the first valid CQE from
679 * This routine will get the first valid Completion Queue Entry from @q, update
680 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
681 * the Queue (no more work to do), or the Queue is full of CQEs that have been
682 * processed, but not popped back to the HBA then this routine will return NULL.
684 static struct lpfc_cqe *
685 lpfc_sli4_cq_get(struct lpfc_queue *q)
687 struct lpfc_cqe *cqe;
689 /* sanity check on queue memory */
692 cqe = lpfc_sli4_qe(q, q->host_index);
694 /* If the next CQE is not valid then we are done */
695 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
699 * insert barrier for instruction interlock : data from the hardware
700 * must have the valid bit checked before it can be copied and acted
701 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
702 * instructions allowing action on content before valid bit checked,
703 * add barrier here as well. May not be needed as "content" is a
704 * single 32-bit entity here (vs multi word structure for cq's).
711 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
712 struct lpfc_cqe *cqe)
714 if (!phba->sli4_hba.pc_sli4_params.cqav)
715 bf_set_le32(lpfc_cqe_valid, cqe, 0);
717 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
719 /* if the index wrapped around, toggle the valid bit */
720 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
721 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
725 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
726 * @phba: the adapter with the CQ
727 * @q: The Completion Queue that the host has completed processing for.
728 * @count: the number of elements that were consumed
729 * @arm: Indicates whether the host wants to arms this CQ.
731 * This routine will notify the HBA, by ringing the doorbell, that the
732 * CQEs have been processed. The @arm parameter specifies whether the
733 * queue should be rearmed when ringing the doorbell.
736 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
737 uint32_t count, bool arm)
739 struct lpfc_register doorbell;
741 /* sanity check on queue memory */
742 if (unlikely(!q || (count == 0 && !arm)))
745 /* ring doorbell for number popped */
748 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
749 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
750 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
751 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
752 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
753 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
754 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
758 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
759 * @phba: the adapter with the CQ
760 * @q: The Completion Queue that the host has completed processing for.
761 * @count: the number of elements that were consumed
762 * @arm: Indicates whether the host wants to arms this CQ.
764 * This routine will notify the HBA, by ringing the doorbell, that the
765 * CQEs have been processed. The @arm parameter specifies whether the
766 * queue should be rearmed when ringing the doorbell.
769 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
770 uint32_t count, bool arm)
772 struct lpfc_register doorbell;
774 /* sanity check on queue memory */
775 if (unlikely(!q || (count == 0 && !arm)))
778 /* ring doorbell for number popped */
781 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
782 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
783 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
784 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
788 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
790 * This routine will copy the contents of @wqe to the next available entry on
791 * the @q. This function will then ring the Receive Queue Doorbell to signal the
792 * HBA to start processing the Receive Queue Entry. This function returns the
793 * index that the rqe was copied to if successful. If no entries are available
794 * on @q then this function will return -ENOMEM.
795 * The caller is expected to hold the hbalock when calling this routine.
798 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
799 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
801 struct lpfc_rqe *temp_hrqe;
802 struct lpfc_rqe *temp_drqe;
803 struct lpfc_register doorbell;
807 /* sanity check on queue memory */
808 if (unlikely(!hq) || unlikely(!dq))
810 hq_put_index = hq->host_index;
811 dq_put_index = dq->host_index;
812 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
813 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
815 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
817 if (hq_put_index != dq_put_index)
819 /* If the host has not yet processed the next entry then we are done */
820 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
822 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
823 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
825 /* Update the host index to point to the next slot */
826 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
827 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
830 /* Ring The Header Receive Queue Doorbell */
831 if (!(hq->host_index % hq->notify_interval)) {
833 if (hq->db_format == LPFC_DB_RING_FORMAT) {
834 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
835 hq->notify_interval);
836 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
837 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
838 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
839 hq->notify_interval);
840 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
842 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
846 writel(doorbell.word0, hq->db_regaddr);
852 * lpfc_sli4_rq_release - Updates internal hba index for RQ
854 * This routine will update the HBA index of a queue to reflect consumption of
855 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
856 * consumed an entry the host calls this function to update the queue's
857 * internal pointers. This routine returns the number of entries that were
858 * consumed by the HBA.
861 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
863 /* sanity check on queue memory */
864 if (unlikely(!hq) || unlikely(!dq))
867 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
869 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
870 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
875 * lpfc_cmd_iocb - Get next command iocb entry in the ring
876 * @phba: Pointer to HBA context object.
877 * @pring: Pointer to driver SLI ring object.
879 * This function returns pointer to next command iocb entry
880 * in the command ring. The caller must hold hbalock to prevent
881 * other threads consume the next command iocb.
882 * SLI-2/SLI-3 provide different sized iocbs.
884 static inline IOCB_t *
885 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
887 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
888 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
892 * lpfc_resp_iocb - Get next response iocb entry in the ring
893 * @phba: Pointer to HBA context object.
894 * @pring: Pointer to driver SLI ring object.
896 * This function returns pointer to next response iocb entry
897 * in the response ring. The caller must hold hbalock to make sure
898 * that no other thread consume the next response iocb.
899 * SLI-2/SLI-3 provide different sized iocbs.
901 static inline IOCB_t *
902 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
904 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
905 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
909 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
910 * @phba: Pointer to HBA context object.
912 * This function is called with hbalock held. This function
913 * allocates a new driver iocb object from the iocb pool. If the
914 * allocation is successful, it returns pointer to the newly
915 * allocated iocb object else it returns NULL.
918 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
920 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
921 struct lpfc_iocbq * iocbq = NULL;
923 lockdep_assert_held(&phba->hbalock);
925 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
928 if (phba->iocb_cnt > phba->iocb_max)
929 phba->iocb_max = phba->iocb_cnt;
934 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
935 * @phba: Pointer to HBA context object.
936 * @xritag: XRI value.
938 * This function clears the sglq pointer from the array of active
939 * sglq's. The xritag that is passed in is used to index into the
940 * array. Before the xritag can be used it needs to be adjusted
941 * by subtracting the xribase.
943 * Returns sglq ponter = success, NULL = Failure.
946 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
948 struct lpfc_sglq *sglq;
950 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
951 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
956 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
957 * @phba: Pointer to HBA context object.
958 * @xritag: XRI value.
960 * This function returns the sglq pointer from the array of active
961 * sglq's. The xritag that is passed in is used to index into the
962 * array. Before the xritag can be used it needs to be adjusted
963 * by subtracting the xribase.
965 * Returns sglq ponter = success, NULL = Failure.
968 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
970 struct lpfc_sglq *sglq;
972 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
977 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
978 * @phba: Pointer to HBA context object.
979 * @xritag: xri used in this exchange.
980 * @rrq: The RRQ to be cleared.
984 lpfc_clr_rrq_active(struct lpfc_hba *phba,
986 struct lpfc_node_rrq *rrq)
988 struct lpfc_nodelist *ndlp = NULL;
990 /* Lookup did to verify if did is still active on this vport */
992 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
997 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
1000 rrq->rrq_stop_time = 0;
1003 mempool_free(rrq, phba->rrq_pool);
1007 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1008 * @phba: Pointer to HBA context object.
1010 * This function is called with hbalock held. This function
1011 * Checks if stop_time (ratov from setting rrq active) has
1012 * been reached, if it has and the send_rrq flag is set then
1013 * it will call lpfc_send_rrq. If the send_rrq flag is not set
1014 * then it will just call the routine to clear the rrq and
1015 * free the rrq resource.
1016 * The timer is set to the next rrq that is going to expire before
1017 * leaving the routine.
1021 lpfc_handle_rrq_active(struct lpfc_hba *phba)
1023 struct lpfc_node_rrq *rrq;
1024 struct lpfc_node_rrq *nextrrq;
1025 unsigned long next_time;
1026 unsigned long iflags;
1027 LIST_HEAD(send_rrq);
1029 spin_lock_irqsave(&phba->hbalock, iflags);
1030 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1031 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1032 list_for_each_entry_safe(rrq, nextrrq,
1033 &phba->active_rrq_list, list) {
1034 if (time_after(jiffies, rrq->rrq_stop_time))
1035 list_move(&rrq->list, &send_rrq);
1036 else if (time_before(rrq->rrq_stop_time, next_time))
1037 next_time = rrq->rrq_stop_time;
1039 spin_unlock_irqrestore(&phba->hbalock, iflags);
1040 if ((!list_empty(&phba->active_rrq_list)) &&
1041 (!(phba->pport->load_flag & FC_UNLOADING)))
1042 mod_timer(&phba->rrq_tmr, next_time);
1043 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1044 list_del(&rrq->list);
1045 if (!rrq->send_rrq) {
1046 /* this call will free the rrq */
1047 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1048 } else if (lpfc_send_rrq(phba, rrq)) {
1049 /* if we send the rrq then the completion handler
1050 * will clear the bit in the xribitmap.
1052 lpfc_clr_rrq_active(phba, rrq->xritag,
1059 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1060 * @vport: Pointer to vport context object.
1061 * @xri: The xri used in the exchange.
1062 * @did: The targets DID for this exchange.
1064 * returns NULL = rrq not found in the phba->active_rrq_list.
1065 * rrq = rrq for this xri and target.
1067 struct lpfc_node_rrq *
1068 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1070 struct lpfc_hba *phba = vport->phba;
1071 struct lpfc_node_rrq *rrq;
1072 struct lpfc_node_rrq *nextrrq;
1073 unsigned long iflags;
1075 if (phba->sli_rev != LPFC_SLI_REV4)
1077 spin_lock_irqsave(&phba->hbalock, iflags);
1078 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1079 if (rrq->vport == vport && rrq->xritag == xri &&
1080 rrq->nlp_DID == did){
1081 list_del(&rrq->list);
1082 spin_unlock_irqrestore(&phba->hbalock, iflags);
1086 spin_unlock_irqrestore(&phba->hbalock, iflags);
1091 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1092 * @vport: Pointer to vport context object.
1093 * @ndlp: Pointer to the lpfc_node_list structure.
1094 * If ndlp is NULL Remove all active RRQs for this vport from the
1095 * phba->active_rrq_list and clear the rrq.
1096 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1099 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1102 struct lpfc_hba *phba = vport->phba;
1103 struct lpfc_node_rrq *rrq;
1104 struct lpfc_node_rrq *nextrrq;
1105 unsigned long iflags;
1106 LIST_HEAD(rrq_list);
1108 if (phba->sli_rev != LPFC_SLI_REV4)
1111 lpfc_sli4_vport_delete_els_xri_aborted(vport);
1112 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1114 spin_lock_irqsave(&phba->hbalock, iflags);
1115 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1116 if (rrq->vport != vport)
1119 if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1120 list_move(&rrq->list, &rrq_list);
1123 spin_unlock_irqrestore(&phba->hbalock, iflags);
1125 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1126 list_del(&rrq->list);
1127 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1132 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1133 * @phba: Pointer to HBA context object.
1134 * @ndlp: Targets nodelist pointer for this exchange.
1135 * @xritag: the xri in the bitmap to test.
1137 * This function returns:
1138 * 0 = rrq not active for this xri
1139 * 1 = rrq is valid for this xri.
1142 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1147 if (!ndlp->active_rrqs_xri_bitmap)
1149 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1156 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1157 * @phba: Pointer to HBA context object.
1158 * @ndlp: nodelist pointer for this target.
1159 * @xritag: xri used in this exchange.
1160 * @rxid: Remote Exchange ID.
1161 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1163 * This function takes the hbalock.
1164 * The active bit is always set in the active rrq xri_bitmap even
1165 * if there is no slot avaiable for the other rrq information.
1167 * returns 0 rrq actived for this xri
1168 * < 0 No memory or invalid ndlp.
1171 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1172 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1174 unsigned long iflags;
1175 struct lpfc_node_rrq *rrq;
1181 if (!phba->cfg_enable_rrq)
1184 spin_lock_irqsave(&phba->hbalock, iflags);
1185 if (phba->pport->load_flag & FC_UNLOADING) {
1186 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1190 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1193 if (!ndlp->active_rrqs_xri_bitmap)
1196 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1199 spin_unlock_irqrestore(&phba->hbalock, iflags);
1200 rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1202 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1203 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1204 " DID:0x%x Send:%d\n",
1205 xritag, rxid, ndlp->nlp_DID, send_rrq);
1208 if (phba->cfg_enable_rrq == 1)
1209 rrq->send_rrq = send_rrq;
1212 rrq->xritag = xritag;
1213 rrq->rrq_stop_time = jiffies +
1214 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1215 rrq->nlp_DID = ndlp->nlp_DID;
1216 rrq->vport = ndlp->vport;
1218 spin_lock_irqsave(&phba->hbalock, iflags);
1219 empty = list_empty(&phba->active_rrq_list);
1220 list_add_tail(&rrq->list, &phba->active_rrq_list);
1221 phba->hba_flag |= HBA_RRQ_ACTIVE;
1223 lpfc_worker_wake_up(phba);
1224 spin_unlock_irqrestore(&phba->hbalock, iflags);
1227 spin_unlock_irqrestore(&phba->hbalock, iflags);
1228 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1229 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1230 " DID:0x%x Send:%d\n",
1231 xritag, rxid, ndlp->nlp_DID, send_rrq);
1236 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1237 * @phba: Pointer to HBA context object.
1238 * @piocbq: Pointer to the iocbq.
1240 * The driver calls this function with either the nvme ls ring lock
1241 * or the fc els ring lock held depending on the iocb usage. This function
1242 * gets a new driver sglq object from the sglq list. If the list is not empty
1243 * then it is successful, it returns pointer to the newly allocated sglq
1244 * object else it returns NULL.
1246 static struct lpfc_sglq *
1247 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1249 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1250 struct lpfc_sglq *sglq = NULL;
1251 struct lpfc_sglq *start_sglq = NULL;
1252 struct lpfc_io_buf *lpfc_cmd;
1253 struct lpfc_nodelist *ndlp;
1254 struct lpfc_sli_ring *pring = NULL;
1257 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1258 pring = phba->sli4_hba.nvmels_wq->pring;
1260 pring = lpfc_phba_elsring(phba);
1262 lockdep_assert_held(&pring->ring_lock);
1264 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1265 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1266 ndlp = lpfc_cmd->rdata->pnode;
1267 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1268 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1269 ndlp = piocbq->context_un.ndlp;
1270 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1271 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1274 ndlp = piocbq->context_un.ndlp;
1276 ndlp = piocbq->context1;
1279 spin_lock(&phba->sli4_hba.sgl_list_lock);
1280 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1285 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1286 test_bit(sglq->sli4_lxritag,
1287 ndlp->active_rrqs_xri_bitmap)) {
1288 /* This xri has an rrq outstanding for this DID.
1289 * put it back in the list and get another xri.
1291 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1293 list_remove_head(lpfc_els_sgl_list, sglq,
1294 struct lpfc_sglq, list);
1295 if (sglq == start_sglq) {
1296 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1304 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1305 sglq->state = SGL_ALLOCATED;
1307 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1312 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1313 * @phba: Pointer to HBA context object.
1314 * @piocbq: Pointer to the iocbq.
1316 * This function is called with the sgl_list lock held. This function
1317 * gets a new driver sglq object from the sglq list. If the
1318 * list is not empty then it is successful, it returns pointer to the newly
1319 * allocated sglq object else it returns NULL.
1322 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1324 struct list_head *lpfc_nvmet_sgl_list;
1325 struct lpfc_sglq *sglq = NULL;
1327 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1329 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1331 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1334 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1335 sglq->state = SGL_ALLOCATED;
1340 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1341 * @phba: Pointer to HBA context object.
1343 * This function is called with no lock held. This function
1344 * allocates a new driver iocb object from the iocb pool. If the
1345 * allocation is successful, it returns pointer to the newly
1346 * allocated iocb object else it returns NULL.
1349 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1351 struct lpfc_iocbq * iocbq = NULL;
1352 unsigned long iflags;
1354 spin_lock_irqsave(&phba->hbalock, iflags);
1355 iocbq = __lpfc_sli_get_iocbq(phba);
1356 spin_unlock_irqrestore(&phba->hbalock, iflags);
1361 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1362 * @phba: Pointer to HBA context object.
1363 * @iocbq: Pointer to driver iocb object.
1365 * This function is called to release the driver iocb object
1366 * to the iocb pool. The iotag in the iocb object
1367 * does not change for each use of the iocb object. This function
1368 * clears all other fields of the iocb object when it is freed.
1369 * The sqlq structure that holds the xritag and phys and virtual
1370 * mappings for the scatter gather list is retrieved from the
1371 * active array of sglq. The get of the sglq pointer also clears
1372 * the entry in the array. If the status of the IO indiactes that
1373 * this IO was aborted then the sglq entry it put on the
1374 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1375 * IO has good status or fails for any other reason then the sglq
1376 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1377 * asserted held in the code path calling this routine.
1380 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1382 struct lpfc_sglq *sglq;
1383 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1384 unsigned long iflag = 0;
1385 struct lpfc_sli_ring *pring;
1387 if (iocbq->sli4_xritag == NO_XRI)
1390 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1394 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1395 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1397 sglq->state = SGL_FREED;
1399 list_add_tail(&sglq->list,
1400 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1401 spin_unlock_irqrestore(
1402 &phba->sli4_hba.sgl_list_lock, iflag);
1406 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1407 (sglq->state != SGL_XRI_ABORTED)) {
1408 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1411 /* Check if we can get a reference on ndlp */
1412 if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1415 list_add(&sglq->list,
1416 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1417 spin_unlock_irqrestore(
1418 &phba->sli4_hba.sgl_list_lock, iflag);
1420 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1422 sglq->state = SGL_FREED;
1424 list_add_tail(&sglq->list,
1425 &phba->sli4_hba.lpfc_els_sgl_list);
1426 spin_unlock_irqrestore(
1427 &phba->sli4_hba.sgl_list_lock, iflag);
1428 pring = lpfc_phba_elsring(phba);
1429 /* Check if TXQ queue needs to be serviced */
1430 if (pring && (!list_empty(&pring->txq)))
1431 lpfc_worker_wake_up(phba);
1437 * Clean all volatile data fields, preserve iotag and node struct.
1439 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1440 iocbq->sli4_lxritag = NO_XRI;
1441 iocbq->sli4_xritag = NO_XRI;
1442 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1444 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1449 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1450 * @phba: Pointer to HBA context object.
1451 * @iocbq: Pointer to driver iocb object.
1453 * This function is called to release the driver iocb object to the
1454 * iocb pool. The iotag in the iocb object does not change for each
1455 * use of the iocb object. This function clears all other fields of
1456 * the iocb object when it is freed. The hbalock is asserted held in
1457 * the code path calling this routine.
1460 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1462 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1465 * Clean all volatile data fields, preserve iotag and node struct.
1467 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1468 iocbq->sli4_xritag = NO_XRI;
1469 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1473 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1474 * @phba: Pointer to HBA context object.
1475 * @iocbq: Pointer to driver iocb object.
1477 * This function is called with hbalock held to release driver
1478 * iocb object to the iocb pool. The iotag in the iocb object
1479 * does not change for each use of the iocb object. This function
1480 * clears all other fields of the iocb object when it is freed.
1483 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1485 lockdep_assert_held(&phba->hbalock);
1487 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1492 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1493 * @phba: Pointer to HBA context object.
1494 * @iocbq: Pointer to driver iocb object.
1496 * This function is called with no lock held to release the iocb to
1500 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1502 unsigned long iflags;
1505 * Clean all volatile data fields, preserve iotag and node struct.
1507 spin_lock_irqsave(&phba->hbalock, iflags);
1508 __lpfc_sli_release_iocbq(phba, iocbq);
1509 spin_unlock_irqrestore(&phba->hbalock, iflags);
1513 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1514 * @phba: Pointer to HBA context object.
1515 * @iocblist: List of IOCBs.
1516 * @ulpstatus: ULP status in IOCB command field.
1517 * @ulpWord4: ULP word-4 in IOCB command field.
1519 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1520 * on the list by invoking the complete callback function associated with the
1521 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1525 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1526 uint32_t ulpstatus, uint32_t ulpWord4)
1528 struct lpfc_iocbq *piocb;
1530 while (!list_empty(iocblist)) {
1531 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1532 if (piocb->wqe_cmpl) {
1533 if (piocb->iocb_flag & LPFC_IO_NVME)
1534 lpfc_nvme_cancel_iocb(phba, piocb,
1535 ulpstatus, ulpWord4);
1537 lpfc_sli_release_iocbq(phba, piocb);
1539 } else if (piocb->iocb_cmpl) {
1540 piocb->iocb.ulpStatus = ulpstatus;
1541 piocb->iocb.un.ulpWord[4] = ulpWord4;
1542 (piocb->iocb_cmpl) (phba, piocb, piocb);
1544 lpfc_sli_release_iocbq(phba, piocb);
1551 * lpfc_sli_iocb_cmd_type - Get the iocb type
1552 * @iocb_cmnd: iocb command code.
1554 * This function is called by ring event handler function to get the iocb type.
1555 * This function translates the iocb command to an iocb command type used to
1556 * decide the final disposition of each completed IOCB.
1557 * The function returns
1558 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1559 * LPFC_SOL_IOCB if it is a solicited iocb completion
1560 * LPFC_ABORT_IOCB if it is an abort iocb
1561 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1563 * The caller is not required to hold any lock.
1565 static lpfc_iocb_type
1566 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1568 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1570 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1573 switch (iocb_cmnd) {
1574 case CMD_XMIT_SEQUENCE_CR:
1575 case CMD_XMIT_SEQUENCE_CX:
1576 case CMD_XMIT_BCAST_CN:
1577 case CMD_XMIT_BCAST_CX:
1578 case CMD_ELS_REQUEST_CR:
1579 case CMD_ELS_REQUEST_CX:
1580 case CMD_CREATE_XRI_CR:
1581 case CMD_CREATE_XRI_CX:
1582 case CMD_GET_RPI_CN:
1583 case CMD_XMIT_ELS_RSP_CX:
1584 case CMD_GET_RPI_CR:
1585 case CMD_FCP_IWRITE_CR:
1586 case CMD_FCP_IWRITE_CX:
1587 case CMD_FCP_IREAD_CR:
1588 case CMD_FCP_IREAD_CX:
1589 case CMD_FCP_ICMND_CR:
1590 case CMD_FCP_ICMND_CX:
1591 case CMD_FCP_TSEND_CX:
1592 case CMD_FCP_TRSP_CX:
1593 case CMD_FCP_TRECEIVE_CX:
1594 case CMD_FCP_AUTO_TRSP_CX:
1595 case CMD_ADAPTER_MSG:
1596 case CMD_ADAPTER_DUMP:
1597 case CMD_XMIT_SEQUENCE64_CR:
1598 case CMD_XMIT_SEQUENCE64_CX:
1599 case CMD_XMIT_BCAST64_CN:
1600 case CMD_XMIT_BCAST64_CX:
1601 case CMD_ELS_REQUEST64_CR:
1602 case CMD_ELS_REQUEST64_CX:
1603 case CMD_FCP_IWRITE64_CR:
1604 case CMD_FCP_IWRITE64_CX:
1605 case CMD_FCP_IREAD64_CR:
1606 case CMD_FCP_IREAD64_CX:
1607 case CMD_FCP_ICMND64_CR:
1608 case CMD_FCP_ICMND64_CX:
1609 case CMD_FCP_TSEND64_CX:
1610 case CMD_FCP_TRSP64_CX:
1611 case CMD_FCP_TRECEIVE64_CX:
1612 case CMD_GEN_REQUEST64_CR:
1613 case CMD_GEN_REQUEST64_CX:
1614 case CMD_XMIT_ELS_RSP64_CX:
1615 case DSSCMD_IWRITE64_CR:
1616 case DSSCMD_IWRITE64_CX:
1617 case DSSCMD_IREAD64_CR:
1618 case DSSCMD_IREAD64_CX:
1619 case CMD_SEND_FRAME:
1620 type = LPFC_SOL_IOCB;
1622 case CMD_ABORT_XRI_CN:
1623 case CMD_ABORT_XRI_CX:
1624 case CMD_CLOSE_XRI_CN:
1625 case CMD_CLOSE_XRI_CX:
1626 case CMD_XRI_ABORTED_CX:
1627 case CMD_ABORT_MXRI64_CN:
1628 case CMD_XMIT_BLS_RSP64_CX:
1629 type = LPFC_ABORT_IOCB;
1631 case CMD_RCV_SEQUENCE_CX:
1632 case CMD_RCV_ELS_REQ_CX:
1633 case CMD_RCV_SEQUENCE64_CX:
1634 case CMD_RCV_ELS_REQ64_CX:
1635 case CMD_ASYNC_STATUS:
1636 case CMD_IOCB_RCV_SEQ64_CX:
1637 case CMD_IOCB_RCV_ELS64_CX:
1638 case CMD_IOCB_RCV_CONT64_CX:
1639 case CMD_IOCB_RET_XRI64_CX:
1640 type = LPFC_UNSOL_IOCB;
1642 case CMD_IOCB_XMIT_MSEQ64_CR:
1643 case CMD_IOCB_XMIT_MSEQ64_CX:
1644 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1645 case CMD_IOCB_RCV_ELS_LIST64_CX:
1646 case CMD_IOCB_CLOSE_EXTENDED_CN:
1647 case CMD_IOCB_ABORT_EXTENDED_CN:
1648 case CMD_IOCB_RET_HBQE64_CN:
1649 case CMD_IOCB_FCP_IBIDIR64_CR:
1650 case CMD_IOCB_FCP_IBIDIR64_CX:
1651 case CMD_IOCB_FCP_ITASKMGT64_CX:
1652 case CMD_IOCB_LOGENTRY_CN:
1653 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1654 printk("%s - Unhandled SLI-3 Command x%x\n",
1655 __func__, iocb_cmnd);
1656 type = LPFC_UNKNOWN_IOCB;
1659 type = LPFC_UNKNOWN_IOCB;
1667 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1668 * @phba: Pointer to HBA context object.
1670 * This function is called from SLI initialization code
1671 * to configure every ring of the HBA's SLI interface. The
1672 * caller is not required to hold any lock. This function issues
1673 * a config_ring mailbox command for each ring.
1674 * This function returns zero if successful else returns a negative
1678 lpfc_sli_ring_map(struct lpfc_hba *phba)
1680 struct lpfc_sli *psli = &phba->sli;
1685 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1689 phba->link_state = LPFC_INIT_MBX_CMDS;
1690 for (i = 0; i < psli->num_rings; i++) {
1691 lpfc_config_ring(phba, i, pmb);
1692 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1693 if (rc != MBX_SUCCESS) {
1694 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1695 "0446 Adapter failed to init (%d), "
1696 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1698 rc, pmbox->mbxCommand,
1699 pmbox->mbxStatus, i);
1700 phba->link_state = LPFC_HBA_ERROR;
1705 mempool_free(pmb, phba->mbox_mem_pool);
1710 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1711 * @phba: Pointer to HBA context object.
1712 * @pring: Pointer to driver SLI ring object.
1713 * @piocb: Pointer to the driver iocb object.
1715 * The driver calls this function with the hbalock held for SLI3 ports or
1716 * the ring lock held for SLI4 ports. The function adds the
1717 * new iocb to txcmplq of the given ring. This function always returns
1718 * 0. If this function is called for ELS ring, this function checks if
1719 * there is a vport associated with the ELS command. This function also
1720 * starts els_tmofunc timer if this is an ELS command.
1723 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1724 struct lpfc_iocbq *piocb)
1726 if (phba->sli_rev == LPFC_SLI_REV4)
1727 lockdep_assert_held(&pring->ring_lock);
1729 lockdep_assert_held(&phba->hbalock);
1733 list_add_tail(&piocb->list, &pring->txcmplq);
1734 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1735 pring->txcmplq_cnt++;
1737 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1738 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1739 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1740 BUG_ON(!piocb->vport);
1741 if (!(piocb->vport->load_flag & FC_UNLOADING))
1742 mod_timer(&piocb->vport->els_tmofunc,
1744 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1751 * lpfc_sli_ringtx_get - Get first element of the txq
1752 * @phba: Pointer to HBA context object.
1753 * @pring: Pointer to driver SLI ring object.
1755 * This function is called with hbalock held to get next
1756 * iocb in txq of the given ring. If there is any iocb in
1757 * the txq, the function returns first iocb in the list after
1758 * removing the iocb from the list, else it returns NULL.
1761 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1763 struct lpfc_iocbq *cmd_iocb;
1765 lockdep_assert_held(&phba->hbalock);
1767 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1772 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1773 * @phba: Pointer to HBA context object.
1774 * @pring: Pointer to driver SLI ring object.
1776 * This function is called with hbalock held and the caller must post the
1777 * iocb without releasing the lock. If the caller releases the lock,
1778 * iocb slot returned by the function is not guaranteed to be available.
1779 * The function returns pointer to the next available iocb slot if there
1780 * is available slot in the ring, else it returns NULL.
1781 * If the get index of the ring is ahead of the put index, the function
1782 * will post an error attention event to the worker thread to take the
1783 * HBA to offline state.
1786 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1788 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1789 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1791 lockdep_assert_held(&phba->hbalock);
1793 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1794 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1795 pring->sli.sli3.next_cmdidx = 0;
1797 if (unlikely(pring->sli.sli3.local_getidx ==
1798 pring->sli.sli3.next_cmdidx)) {
1800 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1802 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1803 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1804 "0315 Ring %d issue: portCmdGet %d "
1805 "is bigger than cmd ring %d\n",
1807 pring->sli.sli3.local_getidx,
1810 phba->link_state = LPFC_HBA_ERROR;
1812 * All error attention handlers are posted to
1815 phba->work_ha |= HA_ERATT;
1816 phba->work_hs = HS_FFER3;
1818 lpfc_worker_wake_up(phba);
1823 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1827 return lpfc_cmd_iocb(phba, pring);
1831 * lpfc_sli_next_iotag - Get an iotag for the iocb
1832 * @phba: Pointer to HBA context object.
1833 * @iocbq: Pointer to driver iocb object.
1835 * This function gets an iotag for the iocb. If there is no unused iotag and
1836 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1837 * array and assigns a new iotag.
1838 * The function returns the allocated iotag if successful, else returns zero.
1839 * Zero is not a valid iotag.
1840 * The caller is not required to hold any lock.
1843 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1845 struct lpfc_iocbq **new_arr;
1846 struct lpfc_iocbq **old_arr;
1848 struct lpfc_sli *psli = &phba->sli;
1851 spin_lock_irq(&phba->hbalock);
1852 iotag = psli->last_iotag;
1853 if(++iotag < psli->iocbq_lookup_len) {
1854 psli->last_iotag = iotag;
1855 psli->iocbq_lookup[iotag] = iocbq;
1856 spin_unlock_irq(&phba->hbalock);
1857 iocbq->iotag = iotag;
1859 } else if (psli->iocbq_lookup_len < (0xffff
1860 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1861 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1862 spin_unlock_irq(&phba->hbalock);
1863 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1866 spin_lock_irq(&phba->hbalock);
1867 old_arr = psli->iocbq_lookup;
1868 if (new_len <= psli->iocbq_lookup_len) {
1869 /* highly unprobable case */
1871 iotag = psli->last_iotag;
1872 if(++iotag < psli->iocbq_lookup_len) {
1873 psli->last_iotag = iotag;
1874 psli->iocbq_lookup[iotag] = iocbq;
1875 spin_unlock_irq(&phba->hbalock);
1876 iocbq->iotag = iotag;
1879 spin_unlock_irq(&phba->hbalock);
1882 if (psli->iocbq_lookup)
1883 memcpy(new_arr, old_arr,
1884 ((psli->last_iotag + 1) *
1885 sizeof (struct lpfc_iocbq *)));
1886 psli->iocbq_lookup = new_arr;
1887 psli->iocbq_lookup_len = new_len;
1888 psli->last_iotag = iotag;
1889 psli->iocbq_lookup[iotag] = iocbq;
1890 spin_unlock_irq(&phba->hbalock);
1891 iocbq->iotag = iotag;
1896 spin_unlock_irq(&phba->hbalock);
1898 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1899 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1906 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1907 * @phba: Pointer to HBA context object.
1908 * @pring: Pointer to driver SLI ring object.
1909 * @iocb: Pointer to iocb slot in the ring.
1910 * @nextiocb: Pointer to driver iocb object which need to be
1911 * posted to firmware.
1913 * This function is called to post a new iocb to the firmware. This
1914 * function copies the new iocb to ring iocb slot and updates the
1915 * ring pointers. It adds the new iocb to txcmplq if there is
1916 * a completion call back for this iocb else the function will free the
1917 * iocb object. The hbalock is asserted held in the code path calling
1921 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1922 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1927 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1930 if (pring->ringno == LPFC_ELS_RING) {
1931 lpfc_debugfs_slow_ring_trc(phba,
1932 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1933 *(((uint32_t *) &nextiocb->iocb) + 4),
1934 *(((uint32_t *) &nextiocb->iocb) + 6),
1935 *(((uint32_t *) &nextiocb->iocb) + 7));
1939 * Issue iocb command to adapter
1941 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1943 pring->stats.iocb_cmd++;
1946 * If there is no completion routine to call, we can release the
1947 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1948 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1950 if (nextiocb->iocb_cmpl)
1951 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1953 __lpfc_sli_release_iocbq(phba, nextiocb);
1956 * Let the HBA know what IOCB slot will be the next one the
1957 * driver will put a command into.
1959 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1960 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1964 * lpfc_sli_update_full_ring - Update the chip attention register
1965 * @phba: Pointer to HBA context object.
1966 * @pring: Pointer to driver SLI ring object.
1968 * The caller is not required to hold any lock for calling this function.
1969 * This function updates the chip attention bits for the ring to inform firmware
1970 * that there are pending work to be done for this ring and requests an
1971 * interrupt when there is space available in the ring. This function is
1972 * called when the driver is unable to post more iocbs to the ring due
1973 * to unavailability of space in the ring.
1976 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1978 int ringno = pring->ringno;
1980 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1985 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1986 * The HBA will tell us when an IOCB entry is available.
1988 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1989 readl(phba->CAregaddr); /* flush */
1991 pring->stats.iocb_cmd_full++;
1995 * lpfc_sli_update_ring - Update chip attention register
1996 * @phba: Pointer to HBA context object.
1997 * @pring: Pointer to driver SLI ring object.
1999 * This function updates the chip attention register bit for the
2000 * given ring to inform HBA that there is more work to be done
2001 * in this ring. The caller is not required to hold any lock.
2004 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2006 int ringno = pring->ringno;
2009 * Tell the HBA that there is work to do in this ring.
2011 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2013 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2014 readl(phba->CAregaddr); /* flush */
2019 * lpfc_sli_resume_iocb - Process iocbs in the txq
2020 * @phba: Pointer to HBA context object.
2021 * @pring: Pointer to driver SLI ring object.
2023 * This function is called with hbalock held to post pending iocbs
2024 * in the txq to the firmware. This function is called when driver
2025 * detects space available in the ring.
2028 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2031 struct lpfc_iocbq *nextiocb;
2033 lockdep_assert_held(&phba->hbalock);
2037 * (a) there is anything on the txq to send
2039 * (c) link attention events can be processed (fcp ring only)
2040 * (d) IOCB processing is not blocked by the outstanding mbox command.
2043 if (lpfc_is_link_up(phba) &&
2044 (!list_empty(&pring->txq)) &&
2045 (pring->ringno != LPFC_FCP_RING ||
2046 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2048 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2049 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2050 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2053 lpfc_sli_update_ring(phba, pring);
2055 lpfc_sli_update_full_ring(phba, pring);
2062 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
2063 * @phba: Pointer to HBA context object.
2064 * @hbqno: HBQ number.
2066 * This function is called with hbalock held to get the next
2067 * available slot for the given HBQ. If there is free slot
2068 * available for the HBQ it will return pointer to the next available
2069 * HBQ entry else it will return NULL.
2071 static struct lpfc_hbq_entry *
2072 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2074 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2076 lockdep_assert_held(&phba->hbalock);
2078 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2079 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2080 hbqp->next_hbqPutIdx = 0;
2082 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2083 uint32_t raw_index = phba->hbq_get[hbqno];
2084 uint32_t getidx = le32_to_cpu(raw_index);
2086 hbqp->local_hbqGetIdx = getidx;
2088 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2089 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2090 "1802 HBQ %d: local_hbqGetIdx "
2091 "%u is > than hbqp->entry_count %u\n",
2092 hbqno, hbqp->local_hbqGetIdx,
2095 phba->link_state = LPFC_HBA_ERROR;
2099 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2103 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2108 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
2109 * @phba: Pointer to HBA context object.
2111 * This function is called with no lock held to free all the
2112 * hbq buffers while uninitializing the SLI interface. It also
2113 * frees the HBQ buffers returned by the firmware but not yet
2114 * processed by the upper layers.
2117 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2119 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2120 struct hbq_dmabuf *hbq_buf;
2121 unsigned long flags;
2124 hbq_count = lpfc_sli_hbq_count();
2125 /* Return all memory used by all HBQs */
2126 spin_lock_irqsave(&phba->hbalock, flags);
2127 for (i = 0; i < hbq_count; ++i) {
2128 list_for_each_entry_safe(dmabuf, next_dmabuf,
2129 &phba->hbqs[i].hbq_buffer_list, list) {
2130 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2131 list_del(&hbq_buf->dbuf.list);
2132 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2134 phba->hbqs[i].buffer_count = 0;
2137 /* Mark the HBQs not in use */
2138 phba->hbq_in_use = 0;
2139 spin_unlock_irqrestore(&phba->hbalock, flags);
2143 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2144 * @phba: Pointer to HBA context object.
2145 * @hbqno: HBQ number.
2146 * @hbq_buf: Pointer to HBQ buffer.
2148 * This function is called with the hbalock held to post a
2149 * hbq buffer to the firmware. If the function finds an empty
2150 * slot in the HBQ, it will post the buffer. The function will return
2151 * pointer to the hbq entry if it successfully post the buffer
2152 * else it will return NULL.
2155 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2156 struct hbq_dmabuf *hbq_buf)
2158 lockdep_assert_held(&phba->hbalock);
2159 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2163 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2164 * @phba: Pointer to HBA context object.
2165 * @hbqno: HBQ number.
2166 * @hbq_buf: Pointer to HBQ buffer.
2168 * This function is called with the hbalock held to post a hbq buffer to the
2169 * firmware. If the function finds an empty slot in the HBQ, it will post the
2170 * buffer and place it on the hbq_buffer_list. The function will return zero if
2171 * it successfully post the buffer else it will return an error.
2174 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2175 struct hbq_dmabuf *hbq_buf)
2177 struct lpfc_hbq_entry *hbqe;
2178 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2180 lockdep_assert_held(&phba->hbalock);
2181 /* Get next HBQ entry slot to use */
2182 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2184 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2186 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2187 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2188 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2189 hbqe->bde.tus.f.bdeFlags = 0;
2190 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2191 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2193 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2194 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2196 readl(phba->hbq_put + hbqno);
2197 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2204 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2205 * @phba: Pointer to HBA context object.
2206 * @hbqno: HBQ number.
2207 * @hbq_buf: Pointer to HBQ buffer.
2209 * This function is called with the hbalock held to post an RQE to the SLI4
2210 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2211 * the hbq_buffer_list and return zero, otherwise it will return an error.
2214 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2215 struct hbq_dmabuf *hbq_buf)
2218 struct lpfc_rqe hrqe;
2219 struct lpfc_rqe drqe;
2220 struct lpfc_queue *hrq;
2221 struct lpfc_queue *drq;
2223 if (hbqno != LPFC_ELS_HBQ)
2225 hrq = phba->sli4_hba.hdr_rq;
2226 drq = phba->sli4_hba.dat_rq;
2228 lockdep_assert_held(&phba->hbalock);
2229 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2230 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2231 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2232 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2233 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2236 hbq_buf->tag = (rc | (hbqno << 16));
2237 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2241 /* HBQ for ELS and CT traffic. */
2242 static struct lpfc_hbq_init lpfc_els_hbq = {
2247 .ring_mask = (1 << LPFC_ELS_RING),
2254 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2259 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2260 * @phba: Pointer to HBA context object.
2261 * @hbqno: HBQ number.
2262 * @count: Number of HBQ buffers to be posted.
2264 * This function is called with no lock held to post more hbq buffers to the
2265 * given HBQ. The function returns the number of HBQ buffers successfully
2269 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2271 uint32_t i, posted = 0;
2272 unsigned long flags;
2273 struct hbq_dmabuf *hbq_buffer;
2274 LIST_HEAD(hbq_buf_list);
2275 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2278 if ((phba->hbqs[hbqno].buffer_count + count) >
2279 lpfc_hbq_defs[hbqno]->entry_count)
2280 count = lpfc_hbq_defs[hbqno]->entry_count -
2281 phba->hbqs[hbqno].buffer_count;
2284 /* Allocate HBQ entries */
2285 for (i = 0; i < count; i++) {
2286 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2289 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2291 /* Check whether HBQ is still in use */
2292 spin_lock_irqsave(&phba->hbalock, flags);
2293 if (!phba->hbq_in_use)
2295 while (!list_empty(&hbq_buf_list)) {
2296 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2298 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2300 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2301 phba->hbqs[hbqno].buffer_count++;
2304 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2306 spin_unlock_irqrestore(&phba->hbalock, flags);
2309 spin_unlock_irqrestore(&phba->hbalock, flags);
2310 while (!list_empty(&hbq_buf_list)) {
2311 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2313 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2319 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2320 * @phba: Pointer to HBA context object.
2323 * This function posts more buffers to the HBQ. This function
2324 * is called with no lock held. The function returns the number of HBQ entries
2325 * successfully allocated.
2328 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2330 if (phba->sli_rev == LPFC_SLI_REV4)
2333 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2334 lpfc_hbq_defs[qno]->add_count);
2338 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2339 * @phba: Pointer to HBA context object.
2340 * @qno: HBQ queue number.
2342 * This function is called from SLI initialization code path with
2343 * no lock held to post initial HBQ buffers to firmware. The
2344 * function returns the number of HBQ entries successfully allocated.
2347 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2349 if (phba->sli_rev == LPFC_SLI_REV4)
2350 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2351 lpfc_hbq_defs[qno]->entry_count);
2353 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2354 lpfc_hbq_defs[qno]->init_count);
2358 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2360 * This function removes the first hbq buffer on an hbq list and returns a
2361 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2363 static struct hbq_dmabuf *
2364 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2366 struct lpfc_dmabuf *d_buf;
2368 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2371 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2375 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2376 * @phba: Pointer to HBA context object.
2379 * This function removes the first RQ buffer on an RQ buffer list and returns a
2380 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2382 static struct rqb_dmabuf *
2383 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2385 struct lpfc_dmabuf *h_buf;
2386 struct lpfc_rqb *rqbp;
2389 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2390 struct lpfc_dmabuf, list);
2393 rqbp->buffer_count--;
2394 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2398 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2399 * @phba: Pointer to HBA context object.
2400 * @tag: Tag of the hbq buffer.
2402 * This function searches for the hbq buffer associated with the given tag in
2403 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2404 * otherwise it returns NULL.
2406 static struct hbq_dmabuf *
2407 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2409 struct lpfc_dmabuf *d_buf;
2410 struct hbq_dmabuf *hbq_buf;
2414 if (hbqno >= LPFC_MAX_HBQS)
2417 spin_lock_irq(&phba->hbalock);
2418 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2419 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2420 if (hbq_buf->tag == tag) {
2421 spin_unlock_irq(&phba->hbalock);
2425 spin_unlock_irq(&phba->hbalock);
2426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2427 "1803 Bad hbq tag. Data: x%x x%x\n",
2428 tag, phba->hbqs[tag >> 16].buffer_count);
2433 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2434 * @phba: Pointer to HBA context object.
2435 * @hbq_buffer: Pointer to HBQ buffer.
2437 * This function is called with hbalock. This function gives back
2438 * the hbq buffer to firmware. If the HBQ does not have space to
2439 * post the buffer, it will free the buffer.
2442 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2447 hbqno = hbq_buffer->tag >> 16;
2448 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2449 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2454 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2455 * @mbxCommand: mailbox command code.
2457 * This function is called by the mailbox event handler function to verify
2458 * that the completed mailbox command is a legitimate mailbox command. If the
2459 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2460 * and the mailbox event handler will take the HBA offline.
2463 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2467 switch (mbxCommand) {
2471 case MBX_WRITE_VPARMS:
2472 case MBX_RUN_BIU_DIAG:
2475 case MBX_CONFIG_LINK:
2476 case MBX_CONFIG_RING:
2477 case MBX_RESET_RING:
2478 case MBX_READ_CONFIG:
2479 case MBX_READ_RCONFIG:
2480 case MBX_READ_SPARM:
2481 case MBX_READ_STATUS:
2485 case MBX_READ_LNK_STAT:
2487 case MBX_UNREG_LOGIN:
2489 case MBX_DUMP_MEMORY:
2490 case MBX_DUMP_CONTEXT:
2493 case MBX_UPDATE_CFG:
2495 case MBX_DEL_LD_ENTRY:
2496 case MBX_RUN_PROGRAM:
2498 case MBX_SET_VARIABLE:
2499 case MBX_UNREG_D_ID:
2500 case MBX_KILL_BOARD:
2501 case MBX_CONFIG_FARP:
2504 case MBX_RUN_BIU_DIAG64:
2505 case MBX_CONFIG_PORT:
2506 case MBX_READ_SPARM64:
2507 case MBX_READ_RPI64:
2508 case MBX_REG_LOGIN64:
2509 case MBX_READ_TOPOLOGY:
2512 case MBX_LOAD_EXP_ROM:
2513 case MBX_ASYNCEVT_ENABLE:
2517 case MBX_PORT_CAPABILITIES:
2518 case MBX_PORT_IOV_CONTROL:
2519 case MBX_SLI4_CONFIG:
2520 case MBX_SLI4_REQ_FTRS:
2522 case MBX_UNREG_FCFI:
2527 case MBX_RESUME_RPI:
2528 case MBX_READ_EVENT_LOG_STATUS:
2529 case MBX_READ_EVENT_LOG:
2530 case MBX_SECURITY_MGMT:
2532 case MBX_ACCESS_VDATA:
2543 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2544 * @phba: Pointer to HBA context object.
2545 * @pmboxq: Pointer to mailbox command.
2547 * This is completion handler function for mailbox commands issued from
2548 * lpfc_sli_issue_mbox_wait function. This function is called by the
2549 * mailbox event handler function with no lock held. This function
2550 * will wake up thread waiting on the wait queue pointed by context1
2554 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2556 unsigned long drvr_flag;
2557 struct completion *pmbox_done;
2560 * If pmbox_done is empty, the driver thread gave up waiting and
2561 * continued running.
2563 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2564 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2565 pmbox_done = (struct completion *)pmboxq->context3;
2567 complete(pmbox_done);
2568 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2573 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2575 unsigned long iflags;
2577 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2578 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2579 spin_lock_irqsave(&ndlp->lock, iflags);
2580 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2581 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2582 spin_unlock_irqrestore(&ndlp->lock, iflags);
2584 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2588 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2589 * @phba: Pointer to HBA context object.
2590 * @pmb: Pointer to mailbox object.
2592 * This function is the default mailbox completion handler. It
2593 * frees the memory resources associated with the completed mailbox
2594 * command. If the completed command is a REG_LOGIN mailbox command,
2595 * this function will issue a UREG_LOGIN to re-claim the RPI.
2598 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2600 struct lpfc_vport *vport = pmb->vport;
2601 struct lpfc_dmabuf *mp;
2602 struct lpfc_nodelist *ndlp;
2603 struct Scsi_Host *shost;
2607 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2610 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2615 * If a REG_LOGIN succeeded after node is destroyed or node
2616 * is in re-discovery driver need to cleanup the RPI.
2618 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2619 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2620 !pmb->u.mb.mbxStatus) {
2621 rpi = pmb->u.mb.un.varWords[0];
2622 vpi = pmb->u.mb.un.varRegLogin.vpi;
2623 if (phba->sli_rev == LPFC_SLI_REV4)
2624 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2625 lpfc_unreg_login(phba, vpi, rpi, pmb);
2627 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2628 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2629 if (rc != MBX_NOT_FINISHED)
2633 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2634 !(phba->pport->load_flag & FC_UNLOADING) &&
2635 !pmb->u.mb.mbxStatus) {
2636 shost = lpfc_shost_from_vport(vport);
2637 spin_lock_irq(shost->host_lock);
2638 vport->vpi_state |= LPFC_VPI_REGISTERED;
2639 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2640 spin_unlock_irq(shost->host_lock);
2643 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2644 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2646 pmb->ctx_buf = NULL;
2647 pmb->ctx_ndlp = NULL;
2650 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2651 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2653 /* Check to see if there are any deferred events to process */
2657 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2658 "1438 UNREG cmpl deferred mbox x%x "
2659 "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
2660 ndlp->nlp_rpi, ndlp->nlp_DID,
2661 ndlp->nlp_flag, ndlp->nlp_defer_did,
2662 ndlp, vport->load_flag, kref_read(&ndlp->kref));
2664 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2665 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2666 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2667 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2668 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2670 __lpfc_sli_rpi_release(vport, ndlp);
2673 /* The unreg_login mailbox is complete and had a
2674 * reference that has to be released. The PLOGI
2678 pmb->ctx_ndlp = NULL;
2682 /* This nlp_put pairs with lpfc_sli4_resume_rpi */
2683 if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2684 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2688 /* Check security permission status on INIT_LINK mailbox command */
2689 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2690 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2691 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2692 "2860 SLI authentication is required "
2693 "for INIT_LINK but has not done yet\n");
2695 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2696 lpfc_sli4_mbox_cmd_free(phba, pmb);
2698 mempool_free(pmb, phba->mbox_mem_pool);
2701 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2702 * @phba: Pointer to HBA context object.
2703 * @pmb: Pointer to mailbox object.
2705 * This function is the unreg rpi mailbox completion handler. It
2706 * frees the memory resources associated with the completed mailbox
2707 * command. An additional reference is put on the ndlp to prevent
2708 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2709 * the unreg mailbox command completes, this routine puts the
2714 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2716 struct lpfc_vport *vport = pmb->vport;
2717 struct lpfc_nodelist *ndlp;
2719 ndlp = pmb->ctx_ndlp;
2720 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2721 if (phba->sli_rev == LPFC_SLI_REV4 &&
2722 (bf_get(lpfc_sli_intf_if_type,
2723 &phba->sli4_hba.sli_intf) >=
2724 LPFC_SLI_INTF_IF_TYPE_2)) {
2727 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2728 "0010 UNREG_LOGIN vpi:%x "
2729 "rpi:%x DID:%x defer x%x flg x%x "
2731 vport->vpi, ndlp->nlp_rpi,
2732 ndlp->nlp_DID, ndlp->nlp_defer_did,
2735 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2737 /* Check to see if there are any deferred
2740 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2741 (ndlp->nlp_defer_did !=
2742 NLP_EVT_NOTHING_PENDING)) {
2744 vport, KERN_INFO, LOG_DISCOVERY,
2745 "4111 UNREG cmpl deferred "
2747 "NPort x%x Data: x%x x%px\n",
2748 ndlp->nlp_rpi, ndlp->nlp_DID,
2749 ndlp->nlp_defer_did, ndlp);
2750 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2751 ndlp->nlp_defer_did =
2752 NLP_EVT_NOTHING_PENDING;
2753 lpfc_issue_els_plogi(
2754 vport, ndlp->nlp_DID, 0);
2756 __lpfc_sli_rpi_release(vport, ndlp);
2763 mempool_free(pmb, phba->mbox_mem_pool);
2767 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2768 * @phba: Pointer to HBA context object.
2770 * This function is called with no lock held. This function processes all
2771 * the completed mailbox commands and gives it to upper layers. The interrupt
2772 * service routine processes mailbox completion interrupt and adds completed
2773 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2774 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2775 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2776 * function returns the mailbox commands to the upper layer by calling the
2777 * completion handler function of each mailbox.
2780 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2787 phba->sli.slistat.mbox_event++;
2789 /* Get all completed mailboxe buffers into the cmplq */
2790 spin_lock_irq(&phba->hbalock);
2791 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2792 spin_unlock_irq(&phba->hbalock);
2794 /* Get a Mailbox buffer to setup mailbox commands for callback */
2796 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2802 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2804 lpfc_debugfs_disc_trc(pmb->vport,
2805 LPFC_DISC_TRC_MBOX_VPORT,
2806 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2807 (uint32_t)pmbox->mbxCommand,
2808 pmbox->un.varWords[0],
2809 pmbox->un.varWords[1]);
2812 lpfc_debugfs_disc_trc(phba->pport,
2814 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2815 (uint32_t)pmbox->mbxCommand,
2816 pmbox->un.varWords[0],
2817 pmbox->un.varWords[1]);
2822 * It is a fatal error if unknown mbox command completion.
2824 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2826 /* Unknown mailbox command compl */
2827 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2828 "(%d):0323 Unknown Mailbox command "
2829 "x%x (x%x/x%x) Cmpl\n",
2830 pmb->vport ? pmb->vport->vpi :
2833 lpfc_sli_config_mbox_subsys_get(phba,
2835 lpfc_sli_config_mbox_opcode_get(phba,
2837 phba->link_state = LPFC_HBA_ERROR;
2838 phba->work_hs = HS_FFER3;
2839 lpfc_handle_eratt(phba);
2843 if (pmbox->mbxStatus) {
2844 phba->sli.slistat.mbox_stat_err++;
2845 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2846 /* Mbox cmd cmpl error - RETRYing */
2847 lpfc_printf_log(phba, KERN_INFO,
2849 "(%d):0305 Mbox cmd cmpl "
2850 "error - RETRYing Data: x%x "
2851 "(x%x/x%x) x%x x%x x%x\n",
2852 pmb->vport ? pmb->vport->vpi :
2855 lpfc_sli_config_mbox_subsys_get(phba,
2857 lpfc_sli_config_mbox_opcode_get(phba,
2860 pmbox->un.varWords[0],
2861 pmb->vport ? pmb->vport->port_state :
2862 LPFC_VPORT_UNKNOWN);
2863 pmbox->mbxStatus = 0;
2864 pmbox->mbxOwner = OWN_HOST;
2865 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2866 if (rc != MBX_NOT_FINISHED)
2871 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2872 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2873 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
2874 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2876 pmb->vport ? pmb->vport->vpi : 0,
2878 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2879 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2881 *((uint32_t *) pmbox),
2882 pmbox->un.varWords[0],
2883 pmbox->un.varWords[1],
2884 pmbox->un.varWords[2],
2885 pmbox->un.varWords[3],
2886 pmbox->un.varWords[4],
2887 pmbox->un.varWords[5],
2888 pmbox->un.varWords[6],
2889 pmbox->un.varWords[7],
2890 pmbox->un.varWords[8],
2891 pmbox->un.varWords[9],
2892 pmbox->un.varWords[10]);
2895 pmb->mbox_cmpl(phba,pmb);
2901 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2902 * @phba: Pointer to HBA context object.
2903 * @pring: Pointer to driver SLI ring object.
2906 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2907 * is set in the tag the buffer is posted for a particular exchange,
2908 * the function will return the buffer without replacing the buffer.
2909 * If the buffer is for unsolicited ELS or CT traffic, this function
2910 * returns the buffer and also posts another buffer to the firmware.
2912 static struct lpfc_dmabuf *
2913 lpfc_sli_get_buff(struct lpfc_hba *phba,
2914 struct lpfc_sli_ring *pring,
2917 struct hbq_dmabuf *hbq_entry;
2919 if (tag & QUE_BUFTAG_BIT)
2920 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2921 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2924 return &hbq_entry->dbuf;
2928 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
2929 * containing a NVME LS request.
2930 * @phba: pointer to lpfc hba data structure.
2931 * @piocb: pointer to the iocbq struct representing the sequence starting
2934 * This routine initially validates the NVME LS, validates there is a login
2935 * with the port that sent the LS, and then calls the appropriate nvme host
2936 * or target LS request handler.
2939 lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
2941 struct lpfc_nodelist *ndlp;
2942 struct lpfc_dmabuf *d_buf;
2943 struct hbq_dmabuf *nvmebuf;
2944 struct fc_frame_header *fc_hdr;
2945 struct lpfc_async_xchg_ctx *axchg = NULL;
2946 char *failwhy = NULL;
2947 uint32_t oxid, sid, did, fctl, size;
2950 d_buf = piocb->context2;
2952 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2953 fc_hdr = nvmebuf->hbuf.virt;
2954 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2955 sid = sli4_sid_from_fc_hdr(fc_hdr);
2956 did = sli4_did_from_fc_hdr(fc_hdr);
2957 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
2958 fc_hdr->fh_f_ctl[1] << 8 |
2959 fc_hdr->fh_f_ctl[2]);
2960 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
2962 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
2965 if (phba->pport->load_flag & FC_UNLOADING) {
2966 failwhy = "Driver Unloading";
2967 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
2968 failwhy = "NVME FC4 Disabled";
2969 } else if (!phba->nvmet_support && !phba->pport->localport) {
2970 failwhy = "No Localport";
2971 } else if (phba->nvmet_support && !phba->targetport) {
2972 failwhy = "No Targetport";
2973 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
2974 failwhy = "Bad NVME LS R_CTL";
2975 } else if (unlikely((fctl & 0x00FF0000) !=
2976 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
2977 failwhy = "Bad NVME LS F_CTL";
2979 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
2981 failwhy = "No CTX memory";
2984 if (unlikely(failwhy)) {
2985 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2986 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
2987 sid, oxid, failwhy);
2991 /* validate the source of the LS is logged in */
2992 ndlp = lpfc_findnode_did(phba->pport, sid);
2994 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2995 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2996 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2997 "6216 NVME Unsol rcv: No ndlp: "
2998 "NPort_ID x%x oxid x%x\n",
3009 axchg->state = LPFC_NVME_STE_LS_RCV;
3010 axchg->entry_cnt = 1;
3011 axchg->rqb_buffer = (void *)nvmebuf;
3012 axchg->hdwq = &phba->sli4_hba.hdwq[0];
3013 axchg->payload = nvmebuf->dbuf.virt;
3014 INIT_LIST_HEAD(&axchg->list);
3016 if (phba->nvmet_support) {
3017 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3018 spin_lock_irq(&ndlp->lock);
3019 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3020 ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3021 spin_unlock_irq(&ndlp->lock);
3023 /* This reference is a single occurrence to hold the
3024 * node valid until the nvmet transport calls
3027 if (!lpfc_nlp_get(ndlp))
3030 lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3031 "6206 NVMET unsol ls_req ndlp x%px "
3032 "DID x%x xflags x%x refcnt %d\n",
3033 ndlp, ndlp->nlp_DID,
3034 ndlp->fc4_xpt_flags,
3035 kref_read(&ndlp->kref));
3037 spin_unlock_irq(&ndlp->lock);
3040 ret = lpfc_nvme_handle_lsreq(phba, axchg);
3043 /* if zero, LS was successfully handled. If non-zero, LS not handled */
3048 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3049 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3050 "NVMe%s handler failed %d\n",
3052 (phba->nvmet_support) ? "T" : "I", ret);
3054 /* recycle receive buffer */
3055 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3057 /* If start of new exchange, abort it */
3058 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3059 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3066 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3067 * @phba: Pointer to HBA context object.
3068 * @pring: Pointer to driver SLI ring object.
3069 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3070 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3071 * @fch_type: the type for the first frame of the sequence.
3073 * This function is called with no lock held. This function uses the r_ctl and
3074 * type of the received sequence to find the correct callback function to call
3075 * to process the sequence.
3078 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3079 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3086 lpfc_nvme_unsol_ls_handler(phba, saveq);
3092 /* unSolicited Responses */
3093 if (pring->prt[0].profile) {
3094 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3095 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3099 /* We must search, based on rctl / type
3100 for the right routine */
3101 for (i = 0; i < pring->num_mask; i++) {
3102 if ((pring->prt[i].rctl == fch_r_ctl) &&
3103 (pring->prt[i].type == fch_type)) {
3104 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3105 (pring->prt[i].lpfc_sli_rcv_unsol_event)
3106 (phba, pring, saveq);
3114 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
3115 * @phba: Pointer to HBA context object.
3116 * @pring: Pointer to driver SLI ring object.
3117 * @saveq: Pointer to the unsolicited iocb.
3119 * This function is called with no lock held by the ring event handler
3120 * when there is an unsolicited iocb posted to the response ring by the
3121 * firmware. This function gets the buffer associated with the iocbs
3122 * and calls the event handler for the ring. This function handles both
3123 * qring buffers and hbq buffers.
3124 * When the function returns 1 the caller can free the iocb object otherwise
3125 * upper layer functions will free the iocb objects.
3128 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3129 struct lpfc_iocbq *saveq)
3133 uint32_t Rctl, Type;
3134 struct lpfc_iocbq *iocbq;
3135 struct lpfc_dmabuf *dmzbuf;
3137 irsp = &(saveq->iocb);
3139 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3140 if (pring->lpfc_sli_rcv_async_status)
3141 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3143 lpfc_printf_log(phba,
3146 "0316 Ring %d handler: unexpected "
3147 "ASYNC_STATUS iocb received evt_code "
3150 irsp->un.asyncstat.evt_code);
3154 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3155 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3156 if (irsp->ulpBdeCount > 0) {
3157 dmzbuf = lpfc_sli_get_buff(phba, pring,
3158 irsp->un.ulpWord[3]);
3159 lpfc_in_buf_free(phba, dmzbuf);
3162 if (irsp->ulpBdeCount > 1) {
3163 dmzbuf = lpfc_sli_get_buff(phba, pring,
3164 irsp->unsli3.sli3Words[3]);
3165 lpfc_in_buf_free(phba, dmzbuf);
3168 if (irsp->ulpBdeCount > 2) {
3169 dmzbuf = lpfc_sli_get_buff(phba, pring,
3170 irsp->unsli3.sli3Words[7]);
3171 lpfc_in_buf_free(phba, dmzbuf);
3177 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3178 if (irsp->ulpBdeCount != 0) {
3179 saveq->context2 = lpfc_sli_get_buff(phba, pring,
3180 irsp->un.ulpWord[3]);
3181 if (!saveq->context2)
3182 lpfc_printf_log(phba,
3185 "0341 Ring %d Cannot find buffer for "
3186 "an unsolicited iocb. tag 0x%x\n",
3188 irsp->un.ulpWord[3]);
3190 if (irsp->ulpBdeCount == 2) {
3191 saveq->context3 = lpfc_sli_get_buff(phba, pring,
3192 irsp->unsli3.sli3Words[7]);
3193 if (!saveq->context3)
3194 lpfc_printf_log(phba,
3197 "0342 Ring %d Cannot find buffer for an"
3198 " unsolicited iocb. tag 0x%x\n",
3200 irsp->unsli3.sli3Words[7]);
3202 list_for_each_entry(iocbq, &saveq->list, list) {
3203 irsp = &(iocbq->iocb);
3204 if (irsp->ulpBdeCount != 0) {
3205 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
3206 irsp->un.ulpWord[3]);
3207 if (!iocbq->context2)
3208 lpfc_printf_log(phba,
3211 "0343 Ring %d Cannot find "
3212 "buffer for an unsolicited iocb"
3213 ". tag 0x%x\n", pring->ringno,
3214 irsp->un.ulpWord[3]);
3216 if (irsp->ulpBdeCount == 2) {
3217 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
3218 irsp->unsli3.sli3Words[7]);
3219 if (!iocbq->context3)
3220 lpfc_printf_log(phba,
3223 "0344 Ring %d Cannot find "
3224 "buffer for an unsolicited "
3227 irsp->unsli3.sli3Words[7]);
3231 if (irsp->ulpBdeCount != 0 &&
3232 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3233 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3236 /* search continue save q for same XRI */
3237 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3238 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3239 saveq->iocb.unsli3.rcvsli3.ox_id) {
3240 list_add_tail(&saveq->list, &iocbq->list);
3246 list_add_tail(&saveq->clist,
3247 &pring->iocb_continue_saveq);
3248 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3249 list_del_init(&iocbq->clist);
3251 irsp = &(saveq->iocb);
3255 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3256 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3257 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3258 Rctl = FC_RCTL_ELS_REQ;
3261 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3262 Rctl = w5p->hcsw.Rctl;
3263 Type = w5p->hcsw.Type;
3265 /* Firmware Workaround */
3266 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3267 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3268 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3269 Rctl = FC_RCTL_ELS_REQ;
3271 w5p->hcsw.Rctl = Rctl;
3272 w5p->hcsw.Type = Type;
3276 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3277 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3278 "0313 Ring %d handler: unexpected Rctl x%x "
3279 "Type x%x received\n",
3280 pring->ringno, Rctl, Type);
3286 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3287 * @phba: Pointer to HBA context object.
3288 * @pring: Pointer to driver SLI ring object.
3289 * @prspiocb: Pointer to response iocb object.
3291 * This function looks up the iocb_lookup table to get the command iocb
3292 * corresponding to the given response iocb using the iotag of the
3293 * response iocb. The driver calls this function with the hbalock held
3294 * for SLI3 ports or the ring lock held for SLI4 ports.
3295 * This function returns the command iocb object if it finds the command
3296 * iocb else returns NULL.
3298 static struct lpfc_iocbq *
3299 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3300 struct lpfc_sli_ring *pring,
3301 struct lpfc_iocbq *prspiocb)
3303 struct lpfc_iocbq *cmd_iocb = NULL;
3305 spinlock_t *temp_lock = NULL;
3306 unsigned long iflag = 0;
3308 if (phba->sli_rev == LPFC_SLI_REV4)
3309 temp_lock = &pring->ring_lock;
3311 temp_lock = &phba->hbalock;
3313 spin_lock_irqsave(temp_lock, iflag);
3314 iotag = prspiocb->iocb.ulpIoTag;
3316 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3317 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3318 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3319 /* remove from txcmpl queue list */
3320 list_del_init(&cmd_iocb->list);
3321 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3322 pring->txcmplq_cnt--;
3323 spin_unlock_irqrestore(temp_lock, iflag);
3328 spin_unlock_irqrestore(temp_lock, iflag);
3329 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3330 "0317 iotag x%x is out of "
3331 "range: max iotag x%x wd0 x%x\n",
3332 iotag, phba->sli.last_iotag,
3333 *(((uint32_t *) &prspiocb->iocb) + 7));
3338 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3339 * @phba: Pointer to HBA context object.
3340 * @pring: Pointer to driver SLI ring object.
3343 * This function looks up the iocb_lookup table to get the command iocb
3344 * corresponding to the given iotag. The driver calls this function with
3345 * the ring lock held because this function is an SLI4 port only helper.
3346 * This function returns the command iocb object if it finds the command
3347 * iocb else returns NULL.
3349 static struct lpfc_iocbq *
3350 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3351 struct lpfc_sli_ring *pring, uint16_t iotag)
3353 struct lpfc_iocbq *cmd_iocb = NULL;
3354 spinlock_t *temp_lock = NULL;
3355 unsigned long iflag = 0;
3357 if (phba->sli_rev == LPFC_SLI_REV4)
3358 temp_lock = &pring->ring_lock;
3360 temp_lock = &phba->hbalock;
3362 spin_lock_irqsave(temp_lock, iflag);
3363 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3364 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3365 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3366 /* remove from txcmpl queue list */
3367 list_del_init(&cmd_iocb->list);
3368 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3369 pring->txcmplq_cnt--;
3370 spin_unlock_irqrestore(temp_lock, iflag);
3375 spin_unlock_irqrestore(temp_lock, iflag);
3376 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3377 "0372 iotag x%x lookup error: max iotag (x%x) "
3379 iotag, phba->sli.last_iotag,
3380 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3385 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3386 * @phba: Pointer to HBA context object.
3387 * @pring: Pointer to driver SLI ring object.
3388 * @saveq: Pointer to the response iocb to be processed.
3390 * This function is called by the ring event handler for non-fcp
3391 * rings when there is a new response iocb in the response ring.
3392 * The caller is not required to hold any locks. This function
3393 * gets the command iocb associated with the response iocb and
3394 * calls the completion handler for the command iocb. If there
3395 * is no completion handler, the function will free the resources
3396 * associated with command iocb. If the response iocb is for
3397 * an already aborted command iocb, the status of the completion
3398 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3399 * This function always returns 1.
3402 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3403 struct lpfc_iocbq *saveq)
3405 struct lpfc_iocbq *cmdiocbp;
3407 unsigned long iflag;
3409 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3411 if (cmdiocbp->iocb_cmpl) {
3413 * If an ELS command failed send an event to mgmt
3416 if (saveq->iocb.ulpStatus &&
3417 (pring->ringno == LPFC_ELS_RING) &&
3418 (cmdiocbp->iocb.ulpCommand ==
3419 CMD_ELS_REQUEST64_CR))
3420 lpfc_send_els_failure_event(phba,
3424 * Post all ELS completions to the worker thread.
3425 * All other are passed to the completion callback.
3427 if (pring->ringno == LPFC_ELS_RING) {
3428 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3429 (cmdiocbp->iocb_flag &
3430 LPFC_DRIVER_ABORTED)) {
3431 spin_lock_irqsave(&phba->hbalock,
3433 cmdiocbp->iocb_flag &=
3434 ~LPFC_DRIVER_ABORTED;
3435 spin_unlock_irqrestore(&phba->hbalock,
3437 saveq->iocb.ulpStatus =
3438 IOSTAT_LOCAL_REJECT;
3439 saveq->iocb.un.ulpWord[4] =
3442 /* Firmware could still be in progress
3443 * of DMAing payload, so don't free data
3444 * buffer till after a hbeat.
3446 spin_lock_irqsave(&phba->hbalock,
3448 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3449 spin_unlock_irqrestore(&phba->hbalock,
3452 if (phba->sli_rev == LPFC_SLI_REV4) {
3453 if (saveq->iocb_flag &
3454 LPFC_EXCHANGE_BUSY) {
3455 /* Set cmdiocb flag for the
3456 * exchange busy so sgl (xri)
3457 * will not be released until
3458 * the abort xri is received
3462 &phba->hbalock, iflag);
3463 cmdiocbp->iocb_flag |=
3465 spin_unlock_irqrestore(
3466 &phba->hbalock, iflag);
3468 if (cmdiocbp->iocb_flag &
3469 LPFC_DRIVER_ABORTED) {
3471 * Clear LPFC_DRIVER_ABORTED
3472 * bit in case it was driver
3476 &phba->hbalock, iflag);
3477 cmdiocbp->iocb_flag &=
3478 ~LPFC_DRIVER_ABORTED;
3479 spin_unlock_irqrestore(
3480 &phba->hbalock, iflag);
3481 cmdiocbp->iocb.ulpStatus =
3482 IOSTAT_LOCAL_REJECT;
3483 cmdiocbp->iocb.un.ulpWord[4] =
3484 IOERR_ABORT_REQUESTED;
3486 * For SLI4, irsiocb contains
3487 * NO_XRI in sli_xritag, it
3488 * shall not affect releasing
3489 * sgl (xri) process.
3491 saveq->iocb.ulpStatus =
3492 IOSTAT_LOCAL_REJECT;
3493 saveq->iocb.un.ulpWord[4] =
3496 &phba->hbalock, iflag);
3498 LPFC_DELAY_MEM_FREE;
3499 spin_unlock_irqrestore(
3500 &phba->hbalock, iflag);
3504 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3506 lpfc_sli_release_iocbq(phba, cmdiocbp);
3509 * Unknown initiating command based on the response iotag.
3510 * This could be the case on the ELS ring because of
3513 if (pring->ringno != LPFC_ELS_RING) {
3515 * Ring <ringno> handler: unexpected completion IoTag
3518 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3519 "0322 Ring %d handler: "
3520 "unexpected completion IoTag x%x "
3521 "Data: x%x x%x x%x x%x\n",
3523 saveq->iocb.ulpIoTag,
3524 saveq->iocb.ulpStatus,
3525 saveq->iocb.un.ulpWord[4],
3526 saveq->iocb.ulpCommand,
3527 saveq->iocb.ulpContext);
3535 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3536 * @phba: Pointer to HBA context object.
3537 * @pring: Pointer to driver SLI ring object.
3539 * This function is called from the iocb ring event handlers when
3540 * put pointer is ahead of the get pointer for a ring. This function signal
3541 * an error attention condition to the worker thread and the worker
3542 * thread will transition the HBA to offline state.
3545 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3547 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3549 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3550 * rsp ring <portRspMax>
3552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3553 "0312 Ring %d handler: portRspPut %d "
3554 "is bigger than rsp ring %d\n",
3555 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3556 pring->sli.sli3.numRiocb);
3558 phba->link_state = LPFC_HBA_ERROR;
3561 * All error attention handlers are posted to
3564 phba->work_ha |= HA_ERATT;
3565 phba->work_hs = HS_FFER3;
3567 lpfc_worker_wake_up(phba);
3573 * lpfc_poll_eratt - Error attention polling timer timeout handler
3574 * @t: Context to fetch pointer to address of HBA context object from.
3576 * This function is invoked by the Error Attention polling timer when the
3577 * timer times out. It will check the SLI Error Attention register for
3578 * possible attention events. If so, it will post an Error Attention event
3579 * and wake up worker thread to process it. Otherwise, it will set up the
3580 * Error Attention polling timer for the next poll.
3582 void lpfc_poll_eratt(struct timer_list *t)
3584 struct lpfc_hba *phba;
3586 uint64_t sli_intr, cnt;
3588 phba = from_timer(phba, t, eratt_poll);
3590 /* Here we will also keep track of interrupts per sec of the hba */
3591 sli_intr = phba->sli.slistat.sli_intr;
3593 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3594 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3597 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3599 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3600 do_div(cnt, phba->eratt_poll_interval);
3601 phba->sli.slistat.sli_ips = cnt;
3603 phba->sli.slistat.sli_prev_intr = sli_intr;
3605 /* Check chip HA register for error event */
3606 eratt = lpfc_sli_check_eratt(phba);
3609 /* Tell the worker thread there is work to do */
3610 lpfc_worker_wake_up(phba);
3612 /* Restart the timer for next eratt poll */
3613 mod_timer(&phba->eratt_poll,
3615 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3621 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3622 * @phba: Pointer to HBA context object.
3623 * @pring: Pointer to driver SLI ring object.
3624 * @mask: Host attention register mask for this ring.
3626 * This function is called from the interrupt context when there is a ring
3627 * event for the fcp ring. The caller does not hold any lock.
3628 * The function processes each response iocb in the response ring until it
3629 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3630 * LE bit set. The function will call the completion handler of the command iocb
3631 * if the response iocb indicates a completion for a command iocb or it is
3632 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3633 * function if this is an unsolicited iocb.
3634 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3635 * to check it explicitly.
3638 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3639 struct lpfc_sli_ring *pring, uint32_t mask)
3641 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3642 IOCB_t *irsp = NULL;
3643 IOCB_t *entry = NULL;
3644 struct lpfc_iocbq *cmdiocbq = NULL;
3645 struct lpfc_iocbq rspiocbq;
3647 uint32_t portRspPut, portRspMax;
3649 lpfc_iocb_type type;
3650 unsigned long iflag;
3651 uint32_t rsp_cmpl = 0;
3653 spin_lock_irqsave(&phba->hbalock, iflag);
3654 pring->stats.iocb_event++;
3657 * The next available response entry should never exceed the maximum
3658 * entries. If it does, treat it as an adapter hardware error.
3660 portRspMax = pring->sli.sli3.numRiocb;
3661 portRspPut = le32_to_cpu(pgp->rspPutInx);
3662 if (unlikely(portRspPut >= portRspMax)) {
3663 lpfc_sli_rsp_pointers_error(phba, pring);
3664 spin_unlock_irqrestore(&phba->hbalock, iflag);
3667 if (phba->fcp_ring_in_use) {
3668 spin_unlock_irqrestore(&phba->hbalock, iflag);
3671 phba->fcp_ring_in_use = 1;
3674 while (pring->sli.sli3.rspidx != portRspPut) {
3676 * Fetch an entry off the ring and copy it into a local data
3677 * structure. The copy involves a byte-swap since the
3678 * network byte order and pci byte orders are different.
3680 entry = lpfc_resp_iocb(phba, pring);
3681 phba->last_completion_time = jiffies;
3683 if (++pring->sli.sli3.rspidx >= portRspMax)
3684 pring->sli.sli3.rspidx = 0;
3686 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3687 (uint32_t *) &rspiocbq.iocb,
3688 phba->iocb_rsp_size);
3689 INIT_LIST_HEAD(&(rspiocbq.list));
3690 irsp = &rspiocbq.iocb;
3692 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3693 pring->stats.iocb_rsp++;
3696 if (unlikely(irsp->ulpStatus)) {
3698 * If resource errors reported from HBA, reduce
3699 * queuedepths of the SCSI device.
3701 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3702 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3703 IOERR_NO_RESOURCES)) {
3704 spin_unlock_irqrestore(&phba->hbalock, iflag);
3705 phba->lpfc_rampdown_queue_depth(phba);
3706 spin_lock_irqsave(&phba->hbalock, iflag);
3709 /* Rsp ring <ringno> error: IOCB */
3710 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3711 "0336 Rsp Ring %d error: IOCB Data: "
3712 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3714 irsp->un.ulpWord[0],
3715 irsp->un.ulpWord[1],
3716 irsp->un.ulpWord[2],
3717 irsp->un.ulpWord[3],
3718 irsp->un.ulpWord[4],
3719 irsp->un.ulpWord[5],
3720 *(uint32_t *)&irsp->un1,
3721 *((uint32_t *)&irsp->un1 + 1));
3725 case LPFC_ABORT_IOCB:
3728 * Idle exchange closed via ABTS from port. No iocb
3729 * resources need to be recovered.
3731 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3732 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3733 "0333 IOCB cmd 0x%x"
3734 " processed. Skipping"
3740 spin_unlock_irqrestore(&phba->hbalock, iflag);
3741 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3743 spin_lock_irqsave(&phba->hbalock, iflag);
3744 if (unlikely(!cmdiocbq))
3746 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3747 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3748 if (cmdiocbq->iocb_cmpl) {
3749 spin_unlock_irqrestore(&phba->hbalock, iflag);
3750 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3752 spin_lock_irqsave(&phba->hbalock, iflag);
3755 case LPFC_UNSOL_IOCB:
3756 spin_unlock_irqrestore(&phba->hbalock, iflag);
3757 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3758 spin_lock_irqsave(&phba->hbalock, iflag);
3761 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3762 char adaptermsg[LPFC_MAX_ADPTMSG];
3763 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3764 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3766 dev_warn(&((phba->pcidev)->dev),
3768 phba->brd_no, adaptermsg);
3770 /* Unknown IOCB command */
3771 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3772 "0334 Unknown IOCB command "
3773 "Data: x%x, x%x x%x x%x x%x\n",
3774 type, irsp->ulpCommand,
3783 * The response IOCB has been processed. Update the ring
3784 * pointer in SLIM. If the port response put pointer has not
3785 * been updated, sync the pgp->rspPutInx and fetch the new port
3786 * response put pointer.
3788 writel(pring->sli.sli3.rspidx,
3789 &phba->host_gp[pring->ringno].rspGetInx);
3791 if (pring->sli.sli3.rspidx == portRspPut)
3792 portRspPut = le32_to_cpu(pgp->rspPutInx);
3795 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3796 pring->stats.iocb_rsp_full++;
3797 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3798 writel(status, phba->CAregaddr);
3799 readl(phba->CAregaddr);
3801 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3802 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3803 pring->stats.iocb_cmd_empty++;
3805 /* Force update of the local copy of cmdGetInx */
3806 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3807 lpfc_sli_resume_iocb(phba, pring);
3809 if ((pring->lpfc_sli_cmd_available))
3810 (pring->lpfc_sli_cmd_available) (phba, pring);
3814 phba->fcp_ring_in_use = 0;
3815 spin_unlock_irqrestore(&phba->hbalock, iflag);
3820 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3821 * @phba: Pointer to HBA context object.
3822 * @pring: Pointer to driver SLI ring object.
3823 * @rspiocbp: Pointer to driver response IOCB object.
3825 * This function is called from the worker thread when there is a slow-path
3826 * response IOCB to process. This function chains all the response iocbs until
3827 * seeing the iocb with the LE bit set. The function will call
3828 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3829 * completion of a command iocb. The function will call the
3830 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3831 * The function frees the resources or calls the completion handler if this
3832 * iocb is an abort completion. The function returns NULL when the response
3833 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3834 * this function shall chain the iocb on to the iocb_continueq and return the
3835 * response iocb passed in.
3837 static struct lpfc_iocbq *
3838 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3839 struct lpfc_iocbq *rspiocbp)
3841 struct lpfc_iocbq *saveq;
3842 struct lpfc_iocbq *cmdiocbp;
3843 struct lpfc_iocbq *next_iocb;
3844 IOCB_t *irsp = NULL;
3845 uint32_t free_saveq;
3846 uint8_t iocb_cmd_type;
3847 lpfc_iocb_type type;
3848 unsigned long iflag;
3851 spin_lock_irqsave(&phba->hbalock, iflag);
3852 /* First add the response iocb to the countinueq list */
3853 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3854 pring->iocb_continueq_cnt++;
3856 /* Now, determine whether the list is completed for processing */
3857 irsp = &rspiocbp->iocb;
3860 * By default, the driver expects to free all resources
3861 * associated with this iocb completion.
3864 saveq = list_get_first(&pring->iocb_continueq,
3865 struct lpfc_iocbq, list);
3866 irsp = &(saveq->iocb);
3867 list_del_init(&pring->iocb_continueq);
3868 pring->iocb_continueq_cnt = 0;
3870 pring->stats.iocb_rsp++;
3873 * If resource errors reported from HBA, reduce
3874 * queuedepths of the SCSI device.
3876 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3877 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3878 IOERR_NO_RESOURCES)) {
3879 spin_unlock_irqrestore(&phba->hbalock, iflag);
3880 phba->lpfc_rampdown_queue_depth(phba);
3881 spin_lock_irqsave(&phba->hbalock, iflag);
3884 if (irsp->ulpStatus) {
3885 /* Rsp ring <ringno> error: IOCB */
3886 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3887 "0328 Rsp Ring %d error: "
3892 "x%x x%x x%x x%x\n",
3894 irsp->un.ulpWord[0],
3895 irsp->un.ulpWord[1],
3896 irsp->un.ulpWord[2],
3897 irsp->un.ulpWord[3],
3898 irsp->un.ulpWord[4],
3899 irsp->un.ulpWord[5],
3900 *(((uint32_t *) irsp) + 6),
3901 *(((uint32_t *) irsp) + 7),
3902 *(((uint32_t *) irsp) + 8),
3903 *(((uint32_t *) irsp) + 9),
3904 *(((uint32_t *) irsp) + 10),
3905 *(((uint32_t *) irsp) + 11),
3906 *(((uint32_t *) irsp) + 12),
3907 *(((uint32_t *) irsp) + 13),
3908 *(((uint32_t *) irsp) + 14),
3909 *(((uint32_t *) irsp) + 15));
3913 * Fetch the IOCB command type and call the correct completion
3914 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3915 * get freed back to the lpfc_iocb_list by the discovery
3918 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3919 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3922 spin_unlock_irqrestore(&phba->hbalock, iflag);
3923 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3924 spin_lock_irqsave(&phba->hbalock, iflag);
3927 case LPFC_UNSOL_IOCB:
3928 spin_unlock_irqrestore(&phba->hbalock, iflag);
3929 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3930 spin_lock_irqsave(&phba->hbalock, iflag);
3935 case LPFC_ABORT_IOCB:
3937 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3938 spin_unlock_irqrestore(&phba->hbalock, iflag);
3939 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3941 spin_lock_irqsave(&phba->hbalock, iflag);
3944 /* Call the specified completion routine */
3945 if (cmdiocbp->iocb_cmpl) {
3946 spin_unlock_irqrestore(&phba->hbalock,
3948 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3950 spin_lock_irqsave(&phba->hbalock,
3953 __lpfc_sli_release_iocbq(phba,
3958 case LPFC_UNKNOWN_IOCB:
3959 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3960 char adaptermsg[LPFC_MAX_ADPTMSG];
3961 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3962 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3964 dev_warn(&((phba->pcidev)->dev),
3966 phba->brd_no, adaptermsg);
3968 /* Unknown IOCB command */
3969 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3970 "0335 Unknown IOCB "
3971 "command Data: x%x "
3982 list_for_each_entry_safe(rspiocbp, next_iocb,
3983 &saveq->list, list) {
3984 list_del_init(&rspiocbp->list);
3985 __lpfc_sli_release_iocbq(phba, rspiocbp);
3987 __lpfc_sli_release_iocbq(phba, saveq);
3991 spin_unlock_irqrestore(&phba->hbalock, iflag);
3996 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3997 * @phba: Pointer to HBA context object.
3998 * @pring: Pointer to driver SLI ring object.
3999 * @mask: Host attention register mask for this ring.
4001 * This routine wraps the actual slow_ring event process routine from the
4002 * API jump table function pointer from the lpfc_hba struct.
4005 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4006 struct lpfc_sli_ring *pring, uint32_t mask)
4008 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4012 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
4013 * @phba: Pointer to HBA context object.
4014 * @pring: Pointer to driver SLI ring object.
4015 * @mask: Host attention register mask for this ring.
4017 * This function is called from the worker thread when there is a ring event
4018 * for non-fcp rings. The caller does not hold any lock. The function will
4019 * remove each response iocb in the response ring and calls the handle
4020 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4023 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4024 struct lpfc_sli_ring *pring, uint32_t mask)
4026 struct lpfc_pgp *pgp;
4028 IOCB_t *irsp = NULL;
4029 struct lpfc_iocbq *rspiocbp = NULL;
4030 uint32_t portRspPut, portRspMax;
4031 unsigned long iflag;
4034 pgp = &phba->port_gp[pring->ringno];
4035 spin_lock_irqsave(&phba->hbalock, iflag);
4036 pring->stats.iocb_event++;
4039 * The next available response entry should never exceed the maximum
4040 * entries. If it does, treat it as an adapter hardware error.
4042 portRspMax = pring->sli.sli3.numRiocb;
4043 portRspPut = le32_to_cpu(pgp->rspPutInx);
4044 if (portRspPut >= portRspMax) {
4046 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
4047 * rsp ring <portRspMax>
4049 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4050 "0303 Ring %d handler: portRspPut %d "
4051 "is bigger than rsp ring %d\n",
4052 pring->ringno, portRspPut, portRspMax);
4054 phba->link_state = LPFC_HBA_ERROR;
4055 spin_unlock_irqrestore(&phba->hbalock, iflag);
4057 phba->work_hs = HS_FFER3;
4058 lpfc_handle_eratt(phba);
4064 while (pring->sli.sli3.rspidx != portRspPut) {
4066 * Build a completion list and call the appropriate handler.
4067 * The process is to get the next available response iocb, get
4068 * a free iocb from the list, copy the response data into the
4069 * free iocb, insert to the continuation list, and update the
4070 * next response index to slim. This process makes response
4071 * iocb's in the ring available to DMA as fast as possible but
4072 * pays a penalty for a copy operation. Since the iocb is
4073 * only 32 bytes, this penalty is considered small relative to
4074 * the PCI reads for register values and a slim write. When
4075 * the ulpLe field is set, the entire Command has been
4078 entry = lpfc_resp_iocb(phba, pring);
4080 phba->last_completion_time = jiffies;
4081 rspiocbp = __lpfc_sli_get_iocbq(phba);
4082 if (rspiocbp == NULL) {
4083 printk(KERN_ERR "%s: out of buffers! Failing "
4084 "completion.\n", __func__);
4088 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4089 phba->iocb_rsp_size);
4090 irsp = &rspiocbp->iocb;
4092 if (++pring->sli.sli3.rspidx >= portRspMax)
4093 pring->sli.sli3.rspidx = 0;
4095 if (pring->ringno == LPFC_ELS_RING) {
4096 lpfc_debugfs_slow_ring_trc(phba,
4097 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
4098 *(((uint32_t *) irsp) + 4),
4099 *(((uint32_t *) irsp) + 6),
4100 *(((uint32_t *) irsp) + 7));
4103 writel(pring->sli.sli3.rspidx,
4104 &phba->host_gp[pring->ringno].rspGetInx);
4106 spin_unlock_irqrestore(&phba->hbalock, iflag);
4107 /* Handle the response IOCB */
4108 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4109 spin_lock_irqsave(&phba->hbalock, iflag);
4112 * If the port response put pointer has not been updated, sync
4113 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4114 * response put pointer.
4116 if (pring->sli.sli3.rspidx == portRspPut) {
4117 portRspPut = le32_to_cpu(pgp->rspPutInx);
4119 } /* while (pring->sli.sli3.rspidx != portRspPut) */
4121 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4122 /* At least one response entry has been freed */
4123 pring->stats.iocb_rsp_full++;
4124 /* SET RxRE_RSP in Chip Att register */
4125 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4126 writel(status, phba->CAregaddr);
4127 readl(phba->CAregaddr); /* flush */
4129 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4130 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4131 pring->stats.iocb_cmd_empty++;
4133 /* Force update of the local copy of cmdGetInx */
4134 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4135 lpfc_sli_resume_iocb(phba, pring);
4137 if ((pring->lpfc_sli_cmd_available))
4138 (pring->lpfc_sli_cmd_available) (phba, pring);
4142 spin_unlock_irqrestore(&phba->hbalock, iflag);
4147 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4148 * @phba: Pointer to HBA context object.
4149 * @pring: Pointer to driver SLI ring object.
4150 * @mask: Host attention register mask for this ring.
4152 * This function is called from the worker thread when there is a pending
4153 * ELS response iocb on the driver internal slow-path response iocb worker
4154 * queue. The caller does not hold any lock. The function will remove each
4155 * response iocb from the response worker queue and calls the handle
4156 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4159 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4160 struct lpfc_sli_ring *pring, uint32_t mask)
4162 struct lpfc_iocbq *irspiocbq;
4163 struct hbq_dmabuf *dmabuf;
4164 struct lpfc_cq_event *cq_event;
4165 unsigned long iflag;
4168 spin_lock_irqsave(&phba->hbalock, iflag);
4169 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4170 spin_unlock_irqrestore(&phba->hbalock, iflag);
4171 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4172 /* Get the response iocb from the head of work queue */
4173 spin_lock_irqsave(&phba->hbalock, iflag);
4174 list_remove_head(&phba->sli4_hba.sp_queue_event,
4175 cq_event, struct lpfc_cq_event, list);
4176 spin_unlock_irqrestore(&phba->hbalock, iflag);
4178 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4179 case CQE_CODE_COMPL_WQE:
4180 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4182 /* Translate ELS WCQE to response IOCBQ */
4183 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
4186 lpfc_sli_sp_handle_rspiocb(phba, pring,
4190 case CQE_CODE_RECEIVE:
4191 case CQE_CODE_RECEIVE_V1:
4192 dmabuf = container_of(cq_event, struct hbq_dmabuf,
4194 lpfc_sli4_handle_received_buffer(phba, dmabuf);
4201 /* Limit the number of events to 64 to avoid soft lockups */
4208 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4209 * @phba: Pointer to HBA context object.
4210 * @pring: Pointer to driver SLI ring object.
4212 * This function aborts all iocbs in the given ring and frees all the iocb
4213 * objects in txq. This function issues an abort iocb for all the iocb commands
4214 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4215 * the return of this function. The caller is not required to hold any locks.
4218 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4220 LIST_HEAD(completions);
4221 struct lpfc_iocbq *iocb, *next_iocb;
4223 if (pring->ringno == LPFC_ELS_RING) {
4224 lpfc_fabric_abort_hba(phba);
4227 /* Error everything on txq and txcmplq
4230 if (phba->sli_rev >= LPFC_SLI_REV4) {
4231 spin_lock_irq(&pring->ring_lock);
4232 list_splice_init(&pring->txq, &completions);
4234 spin_unlock_irq(&pring->ring_lock);
4236 spin_lock_irq(&phba->hbalock);
4237 /* Next issue ABTS for everything on the txcmplq */
4238 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4239 lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4240 spin_unlock_irq(&phba->hbalock);
4242 spin_lock_irq(&phba->hbalock);
4243 list_splice_init(&pring->txq, &completions);
4246 /* Next issue ABTS for everything on the txcmplq */
4247 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4248 lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4249 spin_unlock_irq(&phba->hbalock);
4251 /* Make sure HBA is alive */
4252 lpfc_issue_hb_tmo(phba);
4254 /* Cancel all the IOCBs from the completions list */
4255 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4260 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4261 * @phba: Pointer to HBA context object.
4263 * This function aborts all iocbs in FCP rings and frees all the iocb
4264 * objects in txq. This function issues an abort iocb for all the iocb commands
4265 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4266 * the return of this function. The caller is not required to hold any locks.
4269 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4271 struct lpfc_sli *psli = &phba->sli;
4272 struct lpfc_sli_ring *pring;
4275 /* Look on all the FCP Rings for the iotag */
4276 if (phba->sli_rev >= LPFC_SLI_REV4) {
4277 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4278 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4279 lpfc_sli_abort_iocb_ring(phba, pring);
4282 pring = &psli->sli3_ring[LPFC_FCP_RING];
4283 lpfc_sli_abort_iocb_ring(phba, pring);
4288 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4289 * @phba: Pointer to HBA context object.
4291 * This function flushes all iocbs in the IO ring and frees all the iocb
4292 * objects in txq and txcmplq. This function will not issue abort iocbs
4293 * for all the iocb commands in txcmplq, they will just be returned with
4294 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4295 * slot has been permanently disabled.
4298 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4302 struct lpfc_sli *psli = &phba->sli;
4303 struct lpfc_sli_ring *pring;
4305 struct lpfc_iocbq *piocb, *next_iocb;
4307 spin_lock_irq(&phba->hbalock);
4308 if (phba->hba_flag & HBA_IOQ_FLUSH ||
4309 !phba->sli4_hba.hdwq) {
4310 spin_unlock_irq(&phba->hbalock);
4313 /* Indicate the I/O queues are flushed */
4314 phba->hba_flag |= HBA_IOQ_FLUSH;
4315 spin_unlock_irq(&phba->hbalock);
4317 /* Look on all the FCP Rings for the iotag */
4318 if (phba->sli_rev >= LPFC_SLI_REV4) {
4319 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4320 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4322 spin_lock_irq(&pring->ring_lock);
4323 /* Retrieve everything on txq */
4324 list_splice_init(&pring->txq, &txq);
4325 list_for_each_entry_safe(piocb, next_iocb,
4326 &pring->txcmplq, list)
4327 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4328 /* Retrieve everything on the txcmplq */
4329 list_splice_init(&pring->txcmplq, &txcmplq);
4331 pring->txcmplq_cnt = 0;
4332 spin_unlock_irq(&pring->ring_lock);
4335 lpfc_sli_cancel_iocbs(phba, &txq,
4336 IOSTAT_LOCAL_REJECT,
4338 /* Flush the txcmpq */
4339 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4340 IOSTAT_LOCAL_REJECT,
4344 pring = &psli->sli3_ring[LPFC_FCP_RING];
4346 spin_lock_irq(&phba->hbalock);
4347 /* Retrieve everything on txq */
4348 list_splice_init(&pring->txq, &txq);
4349 list_for_each_entry_safe(piocb, next_iocb,
4350 &pring->txcmplq, list)
4351 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4352 /* Retrieve everything on the txcmplq */
4353 list_splice_init(&pring->txcmplq, &txcmplq);
4355 pring->txcmplq_cnt = 0;
4356 spin_unlock_irq(&phba->hbalock);
4359 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4361 /* Flush the txcmpq */
4362 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4368 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4369 * @phba: Pointer to HBA context object.
4370 * @mask: Bit mask to be checked.
4372 * This function reads the host status register and compares
4373 * with the provided bit mask to check if HBA completed
4374 * the restart. This function will wait in a loop for the
4375 * HBA to complete restart. If the HBA does not restart within
4376 * 15 iterations, the function will reset the HBA again. The
4377 * function returns 1 when HBA fail to restart otherwise returns
4381 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4387 /* Read the HBA Host Status Register */
4388 if (lpfc_readl(phba->HSregaddr, &status))
4391 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4394 * Check status register every 100ms for 5 retries, then every
4395 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4396 * every 2.5 sec for 4.
4397 * Break our of the loop if errors occurred during init.
4399 while (((status & mask) != mask) &&
4400 !(status & HS_FFERM) &&
4412 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4413 lpfc_sli_brdrestart(phba);
4415 /* Read the HBA Host Status Register */
4416 if (lpfc_readl(phba->HSregaddr, &status)) {
4422 /* Check to see if any errors occurred during init */
4423 if ((status & HS_FFERM) || (i >= 20)) {
4424 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4425 "2751 Adapter failed to restart, "
4426 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4428 readl(phba->MBslimaddr + 0xa8),
4429 readl(phba->MBslimaddr + 0xac));
4430 phba->link_state = LPFC_HBA_ERROR;
4438 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4439 * @phba: Pointer to HBA context object.
4440 * @mask: Bit mask to be checked.
4442 * This function checks the host status register to check if HBA is
4443 * ready. This function will wait in a loop for the HBA to be ready
4444 * If the HBA is not ready , the function will will reset the HBA PCI
4445 * function again. The function returns 1 when HBA fail to be ready
4446 * otherwise returns zero.
4449 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4454 /* Read the HBA Host Status Register */
4455 status = lpfc_sli4_post_status_check(phba);
4458 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4459 lpfc_sli_brdrestart(phba);
4460 status = lpfc_sli4_post_status_check(phba);
4463 /* Check to see if any errors occurred during init */
4465 phba->link_state = LPFC_HBA_ERROR;
4468 phba->sli4_hba.intr_enable = 0;
4474 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4475 * @phba: Pointer to HBA context object.
4476 * @mask: Bit mask to be checked.
4478 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4479 * from the API jump table function pointer from the lpfc_hba struct.
4482 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4484 return phba->lpfc_sli_brdready(phba, mask);
4487 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4490 * lpfc_reset_barrier - Make HBA ready for HBA reset
4491 * @phba: Pointer to HBA context object.
4493 * This function is called before resetting an HBA. This function is called
4494 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4496 void lpfc_reset_barrier(struct lpfc_hba *phba)
4498 uint32_t __iomem *resp_buf;
4499 uint32_t __iomem *mbox_buf;
4500 volatile uint32_t mbox;
4501 uint32_t hc_copy, ha_copy, resp_data;
4505 lockdep_assert_held(&phba->hbalock);
4507 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4508 if (hdrtype != 0x80 ||
4509 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4510 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4514 * Tell the other part of the chip to suspend temporarily all
4517 resp_buf = phba->MBslimaddr;
4519 /* Disable the error attention */
4520 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4522 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4523 readl(phba->HCregaddr); /* flush */
4524 phba->link_flag |= LS_IGNORE_ERATT;
4526 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4528 if (ha_copy & HA_ERATT) {
4529 /* Clear Chip error bit */
4530 writel(HA_ERATT, phba->HAregaddr);
4531 phba->pport->stopped = 1;
4535 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4536 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4538 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4539 mbox_buf = phba->MBslimaddr;
4540 writel(mbox, mbox_buf);
4542 for (i = 0; i < 50; i++) {
4543 if (lpfc_readl((resp_buf + 1), &resp_data))
4545 if (resp_data != ~(BARRIER_TEST_PATTERN))
4551 if (lpfc_readl((resp_buf + 1), &resp_data))
4553 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4554 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4555 phba->pport->stopped)
4561 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4563 for (i = 0; i < 500; i++) {
4564 if (lpfc_readl(resp_buf, &resp_data))
4566 if (resp_data != mbox)
4575 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4577 if (!(ha_copy & HA_ERATT))
4583 if (readl(phba->HAregaddr) & HA_ERATT) {
4584 writel(HA_ERATT, phba->HAregaddr);
4585 phba->pport->stopped = 1;
4589 phba->link_flag &= ~LS_IGNORE_ERATT;
4590 writel(hc_copy, phba->HCregaddr);
4591 readl(phba->HCregaddr); /* flush */
4595 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4596 * @phba: Pointer to HBA context object.
4598 * This function issues a kill_board mailbox command and waits for
4599 * the error attention interrupt. This function is called for stopping
4600 * the firmware processing. The caller is not required to hold any
4601 * locks. This function calls lpfc_hba_down_post function to free
4602 * any pending commands after the kill. The function will return 1 when it
4603 * fails to kill the board else will return 0.
4606 lpfc_sli_brdkill(struct lpfc_hba *phba)
4608 struct lpfc_sli *psli;
4618 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4619 "0329 Kill HBA Data: x%x x%x\n",
4620 phba->pport->port_state, psli->sli_flag);
4622 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4626 /* Disable the error attention */
4627 spin_lock_irq(&phba->hbalock);
4628 if (lpfc_readl(phba->HCregaddr, &status)) {
4629 spin_unlock_irq(&phba->hbalock);
4630 mempool_free(pmb, phba->mbox_mem_pool);
4633 status &= ~HC_ERINT_ENA;
4634 writel(status, phba->HCregaddr);
4635 readl(phba->HCregaddr); /* flush */
4636 phba->link_flag |= LS_IGNORE_ERATT;
4637 spin_unlock_irq(&phba->hbalock);
4639 lpfc_kill_board(phba, pmb);
4640 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4641 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4643 if (retval != MBX_SUCCESS) {
4644 if (retval != MBX_BUSY)
4645 mempool_free(pmb, phba->mbox_mem_pool);
4646 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4647 "2752 KILL_BOARD command failed retval %d\n",
4649 spin_lock_irq(&phba->hbalock);
4650 phba->link_flag &= ~LS_IGNORE_ERATT;
4651 spin_unlock_irq(&phba->hbalock);
4655 spin_lock_irq(&phba->hbalock);
4656 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4657 spin_unlock_irq(&phba->hbalock);
4659 mempool_free(pmb, phba->mbox_mem_pool);
4661 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4662 * attention every 100ms for 3 seconds. If we don't get ERATT after
4663 * 3 seconds we still set HBA_ERROR state because the status of the
4664 * board is now undefined.
4666 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4668 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4670 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4674 del_timer_sync(&psli->mbox_tmo);
4675 if (ha_copy & HA_ERATT) {
4676 writel(HA_ERATT, phba->HAregaddr);
4677 phba->pport->stopped = 1;
4679 spin_lock_irq(&phba->hbalock);
4680 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4681 psli->mbox_active = NULL;
4682 phba->link_flag &= ~LS_IGNORE_ERATT;
4683 spin_unlock_irq(&phba->hbalock);
4685 lpfc_hba_down_post(phba);
4686 phba->link_state = LPFC_HBA_ERROR;
4688 return ha_copy & HA_ERATT ? 0 : 1;
4692 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4693 * @phba: Pointer to HBA context object.
4695 * This function resets the HBA by writing HC_INITFF to the control
4696 * register. After the HBA resets, this function resets all the iocb ring
4697 * indices. This function disables PCI layer parity checking during
4699 * This function returns 0 always.
4700 * The caller is not required to hold any locks.
4703 lpfc_sli_brdreset(struct lpfc_hba *phba)
4705 struct lpfc_sli *psli;
4706 struct lpfc_sli_ring *pring;
4713 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4714 "0325 Reset HBA Data: x%x x%x\n",
4715 (phba->pport) ? phba->pport->port_state : 0,
4718 /* perform board reset */
4719 phba->fc_eventTag = 0;
4720 phba->link_events = 0;
4721 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4723 phba->pport->fc_myDID = 0;
4724 phba->pport->fc_prevDID = 0;
4727 /* Turn off parity checking and serr during the physical reset */
4728 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4731 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4733 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4735 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4737 /* Now toggle INITFF bit in the Host Control Register */
4738 writel(HC_INITFF, phba->HCregaddr);
4740 readl(phba->HCregaddr); /* flush */
4741 writel(0, phba->HCregaddr);
4742 readl(phba->HCregaddr); /* flush */
4744 /* Restore PCI cmd register */
4745 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4747 /* Initialize relevant SLI info */
4748 for (i = 0; i < psli->num_rings; i++) {
4749 pring = &psli->sli3_ring[i];
4751 pring->sli.sli3.rspidx = 0;
4752 pring->sli.sli3.next_cmdidx = 0;
4753 pring->sli.sli3.local_getidx = 0;
4754 pring->sli.sli3.cmdidx = 0;
4755 pring->missbufcnt = 0;
4758 phba->link_state = LPFC_WARM_START;
4763 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4764 * @phba: Pointer to HBA context object.
4766 * This function resets a SLI4 HBA. This function disables PCI layer parity
4767 * checking during resets the device. The caller is not required to hold
4770 * This function returns 0 on success else returns negative error code.
4773 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4775 struct lpfc_sli *psli = &phba->sli;
4780 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4781 "0295 Reset HBA Data: x%x x%x x%x\n",
4782 phba->pport->port_state, psli->sli_flag,
4785 /* perform board reset */
4786 phba->fc_eventTag = 0;
4787 phba->link_events = 0;
4788 phba->pport->fc_myDID = 0;
4789 phba->pport->fc_prevDID = 0;
4791 spin_lock_irq(&phba->hbalock);
4792 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4793 phba->fcf.fcf_flag = 0;
4794 spin_unlock_irq(&phba->hbalock);
4796 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4797 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4798 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4802 /* Now physically reset the device */
4803 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4804 "0389 Performing PCI function reset!\n");
4806 /* Turn off parity checking and serr during the physical reset */
4807 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4808 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4809 "3205 PCI read Config failed\n");
4813 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4814 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4816 /* Perform FCoE PCI function reset before freeing queue memory */
4817 rc = lpfc_pci_function_reset(phba);
4819 /* Restore PCI cmd register */
4820 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4826 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4827 * @phba: Pointer to HBA context object.
4829 * This function is called in the SLI initialization code path to
4830 * restart the HBA. The caller is not required to hold any lock.
4831 * This function writes MBX_RESTART mailbox command to the SLIM and
4832 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4833 * function to free any pending commands. The function enables
4834 * POST only during the first initialization. The function returns zero.
4835 * The function does not guarantee completion of MBX_RESTART mailbox
4836 * command before the return of this function.
4839 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4842 struct lpfc_sli *psli;
4843 volatile uint32_t word0;
4844 void __iomem *to_slim;
4845 uint32_t hba_aer_enabled;
4847 spin_lock_irq(&phba->hbalock);
4849 /* Take PCIe device Advanced Error Reporting (AER) state */
4850 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4855 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4856 "0337 Restart HBA Data: x%x x%x\n",
4857 (phba->pport) ? phba->pport->port_state : 0,
4861 mb = (MAILBOX_t *) &word0;
4862 mb->mbxCommand = MBX_RESTART;
4865 lpfc_reset_barrier(phba);
4867 to_slim = phba->MBslimaddr;
4868 writel(*(uint32_t *) mb, to_slim);
4869 readl(to_slim); /* flush */
4871 /* Only skip post after fc_ffinit is completed */
4872 if (phba->pport && phba->pport->port_state)
4873 word0 = 1; /* This is really setting up word1 */
4875 word0 = 0; /* This is really setting up word1 */
4876 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4877 writel(*(uint32_t *) mb, to_slim);
4878 readl(to_slim); /* flush */
4880 lpfc_sli_brdreset(phba);
4882 phba->pport->stopped = 0;
4883 phba->link_state = LPFC_INIT_START;
4885 spin_unlock_irq(&phba->hbalock);
4887 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4888 psli->stats_start = ktime_get_seconds();
4890 /* Give the INITFF and Post time to settle. */
4893 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4894 if (hba_aer_enabled)
4895 pci_disable_pcie_error_reporting(phba->pcidev);
4897 lpfc_hba_down_post(phba);
4903 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4904 * @phba: Pointer to HBA context object.
4906 * This function is called in the SLI initialization code path to restart
4907 * a SLI4 HBA. The caller is not required to hold any lock.
4908 * At the end of the function, it calls lpfc_hba_down_post function to
4909 * free any pending commands.
4912 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4914 struct lpfc_sli *psli = &phba->sli;
4915 uint32_t hba_aer_enabled;
4919 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4920 "0296 Restart HBA Data: x%x x%x\n",
4921 phba->pport->port_state, psli->sli_flag);
4923 /* Take PCIe device Advanced Error Reporting (AER) state */
4924 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4926 rc = lpfc_sli4_brdreset(phba);
4928 phba->link_state = LPFC_HBA_ERROR;
4929 goto hba_down_queue;
4932 spin_lock_irq(&phba->hbalock);
4933 phba->pport->stopped = 0;
4934 phba->link_state = LPFC_INIT_START;
4936 spin_unlock_irq(&phba->hbalock);
4938 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4939 psli->stats_start = ktime_get_seconds();
4941 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4942 if (hba_aer_enabled)
4943 pci_disable_pcie_error_reporting(phba->pcidev);
4946 lpfc_hba_down_post(phba);
4947 lpfc_sli4_queue_destroy(phba);
4953 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4954 * @phba: Pointer to HBA context object.
4956 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4957 * API jump table function pointer from the lpfc_hba struct.
4960 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4962 return phba->lpfc_sli_brdrestart(phba);
4966 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4967 * @phba: Pointer to HBA context object.
4969 * This function is called after a HBA restart to wait for successful
4970 * restart of the HBA. Successful restart of the HBA is indicated by
4971 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4972 * iteration, the function will restart the HBA again. The function returns
4973 * zero if HBA successfully restarted else returns negative error code.
4976 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4978 uint32_t status, i = 0;
4980 /* Read the HBA Host Status Register */
4981 if (lpfc_readl(phba->HSregaddr, &status))
4984 /* Check status register to see what current state is */
4986 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4988 /* Check every 10ms for 10 retries, then every 100ms for 90
4989 * retries, then every 1 sec for 50 retires for a total of
4990 * ~60 seconds before reset the board again and check every
4991 * 1 sec for 50 retries. The up to 60 seconds before the
4992 * board ready is required by the Falcon FIPS zeroization
4993 * complete, and any reset the board in between shall cause
4994 * restart of zeroization, further delay the board ready.
4997 /* Adapter failed to init, timeout, status reg
4999 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5000 "0436 Adapter failed to init, "
5001 "timeout, status reg x%x, "
5002 "FW Data: A8 x%x AC x%x\n", status,
5003 readl(phba->MBslimaddr + 0xa8),
5004 readl(phba->MBslimaddr + 0xac));
5005 phba->link_state = LPFC_HBA_ERROR;
5009 /* Check to see if any errors occurred during init */
5010 if (status & HS_FFERM) {
5011 /* ERROR: During chipset initialization */
5012 /* Adapter failed to init, chipset, status reg
5014 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5015 "0437 Adapter failed to init, "
5016 "chipset, status reg x%x, "
5017 "FW Data: A8 x%x AC x%x\n", status,
5018 readl(phba->MBslimaddr + 0xa8),
5019 readl(phba->MBslimaddr + 0xac));
5020 phba->link_state = LPFC_HBA_ERROR;
5033 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5034 lpfc_sli_brdrestart(phba);
5036 /* Read the HBA Host Status Register */
5037 if (lpfc_readl(phba->HSregaddr, &status))
5041 /* Check to see if any errors occurred during init */
5042 if (status & HS_FFERM) {
5043 /* ERROR: During chipset initialization */
5044 /* Adapter failed to init, chipset, status reg <status> */
5045 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5046 "0438 Adapter failed to init, chipset, "
5048 "FW Data: A8 x%x AC x%x\n", status,
5049 readl(phba->MBslimaddr + 0xa8),
5050 readl(phba->MBslimaddr + 0xac));
5051 phba->link_state = LPFC_HBA_ERROR;
5055 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5057 /* Clear all interrupt enable conditions */
5058 writel(0, phba->HCregaddr);
5059 readl(phba->HCregaddr); /* flush */
5061 /* setup host attn register */
5062 writel(0xffffffff, phba->HAregaddr);
5063 readl(phba->HAregaddr); /* flush */
5068 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
5070 * This function calculates and returns the number of HBQs required to be
5074 lpfc_sli_hbq_count(void)
5076 return ARRAY_SIZE(lpfc_hbq_defs);
5080 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
5082 * This function adds the number of hbq entries in every HBQ to get
5083 * the total number of hbq entries required for the HBA and returns
5087 lpfc_sli_hbq_entry_count(void)
5089 int hbq_count = lpfc_sli_hbq_count();
5093 for (i = 0; i < hbq_count; ++i)
5094 count += lpfc_hbq_defs[i]->entry_count;
5099 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
5101 * This function calculates amount of memory required for all hbq entries
5102 * to be configured and returns the total memory required.
5105 lpfc_sli_hbq_size(void)
5107 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5111 * lpfc_sli_hbq_setup - configure and initialize HBQs
5112 * @phba: Pointer to HBA context object.
5114 * This function is called during the SLI initialization to configure
5115 * all the HBQs and post buffers to the HBQ. The caller is not
5116 * required to hold any locks. This function will return zero if successful
5117 * else it will return negative error code.
5120 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5122 int hbq_count = lpfc_sli_hbq_count();
5126 uint32_t hbq_entry_index;
5128 /* Get a Mailbox buffer to setup mailbox
5129 * commands for HBA initialization
5131 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5138 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
5139 phba->link_state = LPFC_INIT_MBX_CMDS;
5140 phba->hbq_in_use = 1;
5142 hbq_entry_index = 0;
5143 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5144 phba->hbqs[hbqno].next_hbqPutIdx = 0;
5145 phba->hbqs[hbqno].hbqPutIdx = 0;
5146 phba->hbqs[hbqno].local_hbqGetIdx = 0;
5147 phba->hbqs[hbqno].entry_count =
5148 lpfc_hbq_defs[hbqno]->entry_count;
5149 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5150 hbq_entry_index, pmb);
5151 hbq_entry_index += phba->hbqs[hbqno].entry_count;
5153 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5154 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
5155 mbxStatus <status>, ring <num> */
5157 lpfc_printf_log(phba, KERN_ERR,
5158 LOG_SLI | LOG_VPORT,
5159 "1805 Adapter failed to init. "
5160 "Data: x%x x%x x%x\n",
5162 pmbox->mbxStatus, hbqno);
5164 phba->link_state = LPFC_HBA_ERROR;
5165 mempool_free(pmb, phba->mbox_mem_pool);
5169 phba->hbq_count = hbq_count;
5171 mempool_free(pmb, phba->mbox_mem_pool);
5173 /* Initially populate or replenish the HBQs */
5174 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5175 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5180 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5181 * @phba: Pointer to HBA context object.
5183 * This function is called during the SLI initialization to configure
5184 * all the HBQs and post buffers to the HBQ. The caller is not
5185 * required to hold any locks. This function will return zero if successful
5186 * else it will return negative error code.
5189 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5191 phba->hbq_in_use = 1;
5193 * Specific case when the MDS diagnostics is enabled and supported.
5194 * The receive buffer count is truncated to manage the incoming
5197 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5198 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5199 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5201 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5202 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5203 phba->hbq_count = 1;
5204 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5205 /* Initially populate or replenish the HBQs */
5210 * lpfc_sli_config_port - Issue config port mailbox command
5211 * @phba: Pointer to HBA context object.
5212 * @sli_mode: sli mode - 2/3
5214 * This function is called by the sli initialization code path
5215 * to issue config_port mailbox command. This function restarts the
5216 * HBA firmware and issues a config_port mailbox command to configure
5217 * the SLI interface in the sli mode specified by sli_mode
5218 * variable. The caller is not required to hold any locks.
5219 * The function returns 0 if successful, else returns negative error
5223 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5226 uint32_t resetcount = 0, rc = 0, done = 0;
5228 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5230 phba->link_state = LPFC_HBA_ERROR;
5234 phba->sli_rev = sli_mode;
5235 while (resetcount < 2 && !done) {
5236 spin_lock_irq(&phba->hbalock);
5237 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5238 spin_unlock_irq(&phba->hbalock);
5239 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5240 lpfc_sli_brdrestart(phba);
5241 rc = lpfc_sli_chipset_init(phba);
5245 spin_lock_irq(&phba->hbalock);
5246 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5247 spin_unlock_irq(&phba->hbalock);
5250 /* Call pre CONFIG_PORT mailbox command initialization. A
5251 * value of 0 means the call was successful. Any other
5252 * nonzero value is a failure, but if ERESTART is returned,
5253 * the driver may reset the HBA and try again.
5255 rc = lpfc_config_port_prep(phba);
5256 if (rc == -ERESTART) {
5257 phba->link_state = LPFC_LINK_UNKNOWN;
5262 phba->link_state = LPFC_INIT_MBX_CMDS;
5263 lpfc_config_port(phba, pmb);
5264 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5265 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5266 LPFC_SLI3_HBQ_ENABLED |
5267 LPFC_SLI3_CRP_ENABLED |
5268 LPFC_SLI3_DSS_ENABLED);
5269 if (rc != MBX_SUCCESS) {
5270 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5271 "0442 Adapter failed to init, mbxCmd x%x "
5272 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5273 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5274 spin_lock_irq(&phba->hbalock);
5275 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5276 spin_unlock_irq(&phba->hbalock);
5279 /* Allow asynchronous mailbox command to go through */
5280 spin_lock_irq(&phba->hbalock);
5281 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5282 spin_unlock_irq(&phba->hbalock);
5285 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5286 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5287 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5288 "3110 Port did not grant ASABT\n");
5293 goto do_prep_failed;
5295 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5296 if (!pmb->u.mb.un.varCfgPort.cMA) {
5298 goto do_prep_failed;
5300 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5301 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5302 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5303 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5304 phba->max_vpi : phba->max_vports;
5308 if (pmb->u.mb.un.varCfgPort.gerbm)
5309 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5310 if (pmb->u.mb.un.varCfgPort.gcrp)
5311 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5313 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5314 phba->port_gp = phba->mbox->us.s3_pgp.port;
5316 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5317 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5318 phba->cfg_enable_bg = 0;
5319 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5320 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5321 "0443 Adapter did not grant "
5326 phba->hbq_get = NULL;
5327 phba->port_gp = phba->mbox->us.s2.port;
5331 mempool_free(pmb, phba->mbox_mem_pool);
5337 * lpfc_sli_hba_setup - SLI initialization function
5338 * @phba: Pointer to HBA context object.
5340 * This function is the main SLI initialization function. This function
5341 * is called by the HBA initialization code, HBA reset code and HBA
5342 * error attention handler code. Caller is not required to hold any
5343 * locks. This function issues config_port mailbox command to configure
5344 * the SLI, setup iocb rings and HBQ rings. In the end the function
5345 * calls the config_port_post function to issue init_link mailbox
5346 * command and to start the discovery. The function will return zero
5347 * if successful, else it will return negative error code.
5350 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5356 /* Enable ISR already does config_port because of config_msi mbx */
5357 if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
5358 rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5361 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
5363 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5365 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5366 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5367 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5369 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5370 "2709 This device supports "
5371 "Advanced Error Reporting (AER)\n");
5372 spin_lock_irq(&phba->hbalock);
5373 phba->hba_flag |= HBA_AER_ENABLED;
5374 spin_unlock_irq(&phba->hbalock);
5376 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5377 "2708 This device does not support "
5378 "Advanced Error Reporting (AER): %d\n",
5380 phba->cfg_aer_support = 0;
5384 if (phba->sli_rev == 3) {
5385 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5386 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5388 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5389 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5390 phba->sli3_options = 0;
5393 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5394 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5395 phba->sli_rev, phba->max_vpi);
5396 rc = lpfc_sli_ring_map(phba);
5399 goto lpfc_sli_hba_setup_error;
5401 /* Initialize VPIs. */
5402 if (phba->sli_rev == LPFC_SLI_REV3) {
5404 * The VPI bitmask and physical ID array are allocated
5405 * and initialized once only - at driver load. A port
5406 * reset doesn't need to reinitialize this memory.
5408 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5409 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5410 phba->vpi_bmask = kcalloc(longs,
5411 sizeof(unsigned long),
5413 if (!phba->vpi_bmask) {
5415 goto lpfc_sli_hba_setup_error;
5418 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5421 if (!phba->vpi_ids) {
5422 kfree(phba->vpi_bmask);
5424 goto lpfc_sli_hba_setup_error;
5426 for (i = 0; i < phba->max_vpi; i++)
5427 phba->vpi_ids[i] = i;
5432 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5433 rc = lpfc_sli_hbq_setup(phba);
5435 goto lpfc_sli_hba_setup_error;
5437 spin_lock_irq(&phba->hbalock);
5438 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5439 spin_unlock_irq(&phba->hbalock);
5441 rc = lpfc_config_port_post(phba);
5443 goto lpfc_sli_hba_setup_error;
5447 lpfc_sli_hba_setup_error:
5448 phba->link_state = LPFC_HBA_ERROR;
5449 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5450 "0445 Firmware initialization failed\n");
5455 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5456 * @phba: Pointer to HBA context object.
5458 * This function issue a dump mailbox command to read config region
5459 * 23 and parse the records in the region and populate driver
5463 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5465 LPFC_MBOXQ_t *mboxq;
5466 struct lpfc_dmabuf *mp;
5467 struct lpfc_mqe *mqe;
5468 uint32_t data_length;
5471 /* Program the default value of vlan_id and fc_map */
5472 phba->valid_vlan = 0;
5473 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5474 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5475 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5477 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5481 mqe = &mboxq->u.mqe;
5482 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5484 goto out_free_mboxq;
5487 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5488 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5490 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5491 "(%d):2571 Mailbox cmd x%x Status x%x "
5492 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5493 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5494 "CQ: x%x x%x x%x x%x\n",
5495 mboxq->vport ? mboxq->vport->vpi : 0,
5496 bf_get(lpfc_mqe_command, mqe),
5497 bf_get(lpfc_mqe_status, mqe),
5498 mqe->un.mb_words[0], mqe->un.mb_words[1],
5499 mqe->un.mb_words[2], mqe->un.mb_words[3],
5500 mqe->un.mb_words[4], mqe->un.mb_words[5],
5501 mqe->un.mb_words[6], mqe->un.mb_words[7],
5502 mqe->un.mb_words[8], mqe->un.mb_words[9],
5503 mqe->un.mb_words[10], mqe->un.mb_words[11],
5504 mqe->un.mb_words[12], mqe->un.mb_words[13],
5505 mqe->un.mb_words[14], mqe->un.mb_words[15],
5506 mqe->un.mb_words[16], mqe->un.mb_words[50],
5508 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5509 mboxq->mcqe.trailer);
5512 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5515 goto out_free_mboxq;
5517 data_length = mqe->un.mb_words[5];
5518 if (data_length > DMP_RGN23_SIZE) {
5519 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5522 goto out_free_mboxq;
5525 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5526 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5531 mempool_free(mboxq, phba->mbox_mem_pool);
5536 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5537 * @phba: pointer to lpfc hba data structure.
5538 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5539 * @vpd: pointer to the memory to hold resulting port vpd data.
5540 * @vpd_size: On input, the number of bytes allocated to @vpd.
5541 * On output, the number of data bytes in @vpd.
5543 * This routine executes a READ_REV SLI4 mailbox command. In
5544 * addition, this routine gets the port vpd data.
5548 * -ENOMEM - could not allocated memory.
5551 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5552 uint8_t *vpd, uint32_t *vpd_size)
5556 struct lpfc_dmabuf *dmabuf;
5557 struct lpfc_mqe *mqe;
5559 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5564 * Get a DMA buffer for the vpd data resulting from the READ_REV
5567 dma_size = *vpd_size;
5568 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5569 &dmabuf->phys, GFP_KERNEL);
5570 if (!dmabuf->virt) {
5576 * The SLI4 implementation of READ_REV conflicts at word1,
5577 * bits 31:16 and SLI4 adds vpd functionality not present
5578 * in SLI3. This code corrects the conflicts.
5580 lpfc_read_rev(phba, mboxq);
5581 mqe = &mboxq->u.mqe;
5582 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5583 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5584 mqe->un.read_rev.word1 &= 0x0000FFFF;
5585 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5586 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5588 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5590 dma_free_coherent(&phba->pcidev->dev, dma_size,
5591 dmabuf->virt, dmabuf->phys);
5597 * The available vpd length cannot be bigger than the
5598 * DMA buffer passed to the port. Catch the less than
5599 * case and update the caller's size.
5601 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5602 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5604 memcpy(vpd, dmabuf->virt, *vpd_size);
5606 dma_free_coherent(&phba->pcidev->dev, dma_size,
5607 dmabuf->virt, dmabuf->phys);
5613 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5614 * @phba: pointer to lpfc hba data structure.
5616 * This routine retrieves SLI4 device physical port name this PCI function
5621 * otherwise - failed to retrieve controller attributes
5624 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5626 LPFC_MBOXQ_t *mboxq;
5627 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5628 struct lpfc_controller_attribute *cntl_attr;
5629 void *virtaddr = NULL;
5630 uint32_t alloclen, reqlen;
5631 uint32_t shdr_status, shdr_add_status;
5632 union lpfc_sli4_cfg_shdr *shdr;
5635 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5639 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5640 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5641 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5642 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5643 LPFC_SLI4_MBX_NEMBED);
5645 if (alloclen < reqlen) {
5646 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5647 "3084 Allocated DMA memory size (%d) is "
5648 "less than the requested DMA memory size "
5649 "(%d)\n", alloclen, reqlen);
5651 goto out_free_mboxq;
5653 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5654 virtaddr = mboxq->sge_array->addr[0];
5655 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5656 shdr = &mbx_cntl_attr->cfg_shdr;
5657 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5658 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5659 if (shdr_status || shdr_add_status || rc) {
5660 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5661 "3085 Mailbox x%x (x%x/x%x) failed, "
5662 "rc:x%x, status:x%x, add_status:x%x\n",
5663 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5664 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5665 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5666 rc, shdr_status, shdr_add_status);
5668 goto out_free_mboxq;
5671 cntl_attr = &mbx_cntl_attr->cntl_attr;
5672 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5673 phba->sli4_hba.lnk_info.lnk_tp =
5674 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5675 phba->sli4_hba.lnk_info.lnk_no =
5676 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5678 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5679 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5680 sizeof(phba->BIOSVersion));
5682 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5683 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5684 phba->sli4_hba.lnk_info.lnk_tp,
5685 phba->sli4_hba.lnk_info.lnk_no,
5688 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5689 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5691 mempool_free(mboxq, phba->mbox_mem_pool);
5696 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5697 * @phba: pointer to lpfc hba data structure.
5699 * This routine retrieves SLI4 device physical port name this PCI function
5704 * otherwise - failed to retrieve physical port name
5707 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5709 LPFC_MBOXQ_t *mboxq;
5710 struct lpfc_mbx_get_port_name *get_port_name;
5711 uint32_t shdr_status, shdr_add_status;
5712 union lpfc_sli4_cfg_shdr *shdr;
5713 char cport_name = 0;
5716 /* We assume nothing at this point */
5717 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5718 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5720 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5723 /* obtain link type and link number via READ_CONFIG */
5724 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5725 lpfc_sli4_read_config(phba);
5726 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5727 goto retrieve_ppname;
5729 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5730 rc = lpfc_sli4_get_ctl_attr(phba);
5732 goto out_free_mboxq;
5735 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5736 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5737 sizeof(struct lpfc_mbx_get_port_name) -
5738 sizeof(struct lpfc_sli4_cfg_mhdr),
5739 LPFC_SLI4_MBX_EMBED);
5740 get_port_name = &mboxq->u.mqe.un.get_port_name;
5741 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5742 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5743 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5744 phba->sli4_hba.lnk_info.lnk_tp);
5745 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5746 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5747 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5748 if (shdr_status || shdr_add_status || rc) {
5749 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5750 "3087 Mailbox x%x (x%x/x%x) failed: "
5751 "rc:x%x, status:x%x, add_status:x%x\n",
5752 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5753 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5754 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5755 rc, shdr_status, shdr_add_status);
5757 goto out_free_mboxq;
5759 switch (phba->sli4_hba.lnk_info.lnk_no) {
5760 case LPFC_LINK_NUMBER_0:
5761 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5762 &get_port_name->u.response);
5763 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5765 case LPFC_LINK_NUMBER_1:
5766 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5767 &get_port_name->u.response);
5768 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5770 case LPFC_LINK_NUMBER_2:
5771 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5772 &get_port_name->u.response);
5773 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5775 case LPFC_LINK_NUMBER_3:
5776 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5777 &get_port_name->u.response);
5778 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5784 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5785 phba->Port[0] = cport_name;
5786 phba->Port[1] = '\0';
5787 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5788 "3091 SLI get port name: %s\n", phba->Port);
5792 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5793 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5795 mempool_free(mboxq, phba->mbox_mem_pool);
5800 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5801 * @phba: pointer to lpfc hba data structure.
5803 * This routine is called to explicitly arm the SLI4 device's completion and
5807 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5810 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5811 struct lpfc_sli4_hdw_queue *qp;
5812 struct lpfc_queue *eq;
5814 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5815 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
5816 if (sli4_hba->nvmels_cq)
5817 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5820 if (sli4_hba->hdwq) {
5821 /* Loop thru all Hardware Queues */
5822 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5823 qp = &sli4_hba->hdwq[qidx];
5824 /* ARM the corresponding CQ */
5825 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
5829 /* Loop thru all IRQ vectors */
5830 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5831 eq = sli4_hba->hba_eq_hdl[qidx].eq;
5832 /* ARM the corresponding EQ */
5833 sli4_hba->sli4_write_eq_db(phba, eq,
5834 0, LPFC_QUEUE_REARM);
5838 if (phba->nvmet_support) {
5839 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5840 sli4_hba->sli4_write_cq_db(phba,
5841 sli4_hba->nvmet_cqset[qidx], 0,
5848 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5849 * @phba: Pointer to HBA context object.
5850 * @type: The resource extent type.
5851 * @extnt_count: buffer to hold port available extent count.
5852 * @extnt_size: buffer to hold element count per extent.
5854 * This function calls the port and retrievs the number of available
5855 * extents and their size for a particular extent type.
5857 * Returns: 0 if successful. Nonzero otherwise.
5860 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5861 uint16_t *extnt_count, uint16_t *extnt_size)
5866 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5869 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5873 /* Find out how many extents are available for this resource type */
5874 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5875 sizeof(struct lpfc_sli4_cfg_mhdr));
5876 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5877 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5878 length, LPFC_SLI4_MBX_EMBED);
5880 /* Send an extents count of 0 - the GET doesn't use it. */
5881 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5882 LPFC_SLI4_MBX_EMBED);
5888 if (!phba->sli4_hba.intr_enable)
5889 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5891 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5892 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5899 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5900 if (bf_get(lpfc_mbox_hdr_status,
5901 &rsrc_info->header.cfg_shdr.response)) {
5902 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5903 "2930 Failed to get resource extents "
5904 "Status 0x%x Add'l Status 0x%x\n",
5905 bf_get(lpfc_mbox_hdr_status,
5906 &rsrc_info->header.cfg_shdr.response),
5907 bf_get(lpfc_mbox_hdr_add_status,
5908 &rsrc_info->header.cfg_shdr.response));
5913 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5915 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5918 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5919 "3162 Retrieved extents type-%d from port: count:%d, "
5920 "size:%d\n", type, *extnt_count, *extnt_size);
5923 mempool_free(mbox, phba->mbox_mem_pool);
5928 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5929 * @phba: Pointer to HBA context object.
5930 * @type: The extent type to check.
5932 * This function reads the current available extents from the port and checks
5933 * if the extent count or extent size has changed since the last access.
5934 * Callers use this routine post port reset to understand if there is a
5935 * extent reprovisioning requirement.
5938 * -Error: error indicates problem.
5939 * 1: Extent count or size has changed.
5943 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5945 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5946 uint16_t size_diff, rsrc_ext_size;
5948 struct lpfc_rsrc_blks *rsrc_entry;
5949 struct list_head *rsrc_blk_list = NULL;
5953 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5960 case LPFC_RSC_TYPE_FCOE_RPI:
5961 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5963 case LPFC_RSC_TYPE_FCOE_VPI:
5964 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5966 case LPFC_RSC_TYPE_FCOE_XRI:
5967 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5969 case LPFC_RSC_TYPE_FCOE_VFI:
5970 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5976 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5978 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5982 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5989 * lpfc_sli4_cfg_post_extnts -
5990 * @phba: Pointer to HBA context object.
5991 * @extnt_cnt: number of available extents.
5992 * @type: the extent type (rpi, xri, vfi, vpi).
5993 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5994 * @mbox: pointer to the caller's allocated mailbox structure.
5996 * This function executes the extents allocation request. It also
5997 * takes care of the amount of memory needed to allocate or get the
5998 * allocated extents. It is the caller's responsibility to evaluate
6002 * -Error: Error value describes the condition found.
6006 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6007 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6012 uint32_t alloc_len, mbox_tmo;
6014 /* Calculate the total requested length of the dma memory */
6015 req_len = extnt_cnt * sizeof(uint16_t);
6018 * Calculate the size of an embedded mailbox. The uint32_t
6019 * accounts for extents-specific word.
6021 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6025 * Presume the allocation and response will fit into an embedded
6026 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6028 *emb = LPFC_SLI4_MBX_EMBED;
6029 if (req_len > emb_len) {
6030 req_len = extnt_cnt * sizeof(uint16_t) +
6031 sizeof(union lpfc_sli4_cfg_shdr) +
6033 *emb = LPFC_SLI4_MBX_NEMBED;
6036 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6037 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6039 if (alloc_len < req_len) {
6040 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6041 "2982 Allocated DMA memory size (x%x) is "
6042 "less than the requested DMA memory "
6043 "size (x%x)\n", alloc_len, req_len);
6046 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6050 if (!phba->sli4_hba.intr_enable)
6051 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6053 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6054 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6063 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
6064 * @phba: Pointer to HBA context object.
6065 * @type: The resource extent type to allocate.
6067 * This function allocates the number of elements for the specified
6071 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6074 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6075 uint16_t rsrc_id, rsrc_start, j, k;
6078 unsigned long longs;
6079 unsigned long *bmask;
6080 struct lpfc_rsrc_blks *rsrc_blks;
6083 struct lpfc_id_range *id_array = NULL;
6084 void *virtaddr = NULL;
6085 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6086 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6087 struct list_head *ext_blk_list;
6089 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6095 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
6096 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6097 "3009 No available Resource Extents "
6098 "for resource type 0x%x: Count: 0x%x, "
6099 "Size 0x%x\n", type, rsrc_cnt,
6104 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6105 "2903 Post resource extents type-0x%x: "
6106 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6108 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6112 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6119 * Figure out where the response is located. Then get local pointers
6120 * to the response data. The port does not guarantee to respond to
6121 * all extents counts request so update the local variable with the
6122 * allocated count from the port.
6124 if (emb == LPFC_SLI4_MBX_EMBED) {
6125 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6126 id_array = &rsrc_ext->u.rsp.id[0];
6127 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6129 virtaddr = mbox->sge_array->addr[0];
6130 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6131 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6132 id_array = &n_rsrc->id;
6135 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6136 rsrc_id_cnt = rsrc_cnt * rsrc_size;
6139 * Based on the resource size and count, correct the base and max
6142 length = sizeof(struct lpfc_rsrc_blks);
6144 case LPFC_RSC_TYPE_FCOE_RPI:
6145 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6146 sizeof(unsigned long),
6148 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6152 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6155 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6156 kfree(phba->sli4_hba.rpi_bmask);
6162 * The next_rpi was initialized with the maximum available
6163 * count but the port may allocate a smaller number. Catch
6164 * that case and update the next_rpi.
6166 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6168 /* Initialize local ptrs for common extent processing later. */
6169 bmask = phba->sli4_hba.rpi_bmask;
6170 ids = phba->sli4_hba.rpi_ids;
6171 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6173 case LPFC_RSC_TYPE_FCOE_VPI:
6174 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6176 if (unlikely(!phba->vpi_bmask)) {
6180 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6182 if (unlikely(!phba->vpi_ids)) {
6183 kfree(phba->vpi_bmask);
6188 /* Initialize local ptrs for common extent processing later. */
6189 bmask = phba->vpi_bmask;
6190 ids = phba->vpi_ids;
6191 ext_blk_list = &phba->lpfc_vpi_blk_list;
6193 case LPFC_RSC_TYPE_FCOE_XRI:
6194 phba->sli4_hba.xri_bmask = kcalloc(longs,
6195 sizeof(unsigned long),
6197 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6201 phba->sli4_hba.max_cfg_param.xri_used = 0;
6202 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6205 if (unlikely(!phba->sli4_hba.xri_ids)) {
6206 kfree(phba->sli4_hba.xri_bmask);
6211 /* Initialize local ptrs for common extent processing later. */
6212 bmask = phba->sli4_hba.xri_bmask;
6213 ids = phba->sli4_hba.xri_ids;
6214 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6216 case LPFC_RSC_TYPE_FCOE_VFI:
6217 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6218 sizeof(unsigned long),
6220 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6224 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6227 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6228 kfree(phba->sli4_hba.vfi_bmask);
6233 /* Initialize local ptrs for common extent processing later. */
6234 bmask = phba->sli4_hba.vfi_bmask;
6235 ids = phba->sli4_hba.vfi_ids;
6236 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6239 /* Unsupported Opcode. Fail call. */
6243 ext_blk_list = NULL;
6248 * Complete initializing the extent configuration with the
6249 * allocated ids assigned to this function. The bitmask serves
6250 * as an index into the array and manages the available ids. The
6251 * array just stores the ids communicated to the port via the wqes.
6253 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6255 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6258 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6261 rsrc_blks = kzalloc(length, GFP_KERNEL);
6262 if (unlikely(!rsrc_blks)) {
6268 rsrc_blks->rsrc_start = rsrc_id;
6269 rsrc_blks->rsrc_size = rsrc_size;
6270 list_add_tail(&rsrc_blks->list, ext_blk_list);
6271 rsrc_start = rsrc_id;
6272 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6273 phba->sli4_hba.io_xri_start = rsrc_start +
6274 lpfc_sli4_get_iocb_cnt(phba);
6277 while (rsrc_id < (rsrc_start + rsrc_size)) {
6282 /* Entire word processed. Get next word.*/
6287 lpfc_sli4_mbox_cmd_free(phba, mbox);
6294 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6295 * @phba: Pointer to HBA context object.
6296 * @type: the extent's type.
6298 * This function deallocates all extents of a particular resource type.
6299 * SLI4 does not allow for deallocating a particular extent range. It
6300 * is the caller's responsibility to release all kernel memory resources.
6303 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6306 uint32_t length, mbox_tmo = 0;
6308 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6309 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6311 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6316 * This function sends an embedded mailbox because it only sends the
6317 * the resource type. All extents of this type are released by the
6320 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6321 sizeof(struct lpfc_sli4_cfg_mhdr));
6322 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6323 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6324 length, LPFC_SLI4_MBX_EMBED);
6326 /* Send an extents count of 0 - the dealloc doesn't use it. */
6327 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6328 LPFC_SLI4_MBX_EMBED);
6333 if (!phba->sli4_hba.intr_enable)
6334 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6336 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6337 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6344 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6345 if (bf_get(lpfc_mbox_hdr_status,
6346 &dealloc_rsrc->header.cfg_shdr.response)) {
6347 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6348 "2919 Failed to release resource extents "
6349 "for type %d - Status 0x%x Add'l Status 0x%x. "
6350 "Resource memory not released.\n",
6352 bf_get(lpfc_mbox_hdr_status,
6353 &dealloc_rsrc->header.cfg_shdr.response),
6354 bf_get(lpfc_mbox_hdr_add_status,
6355 &dealloc_rsrc->header.cfg_shdr.response));
6360 /* Release kernel memory resources for the specific type. */
6362 case LPFC_RSC_TYPE_FCOE_VPI:
6363 kfree(phba->vpi_bmask);
6364 kfree(phba->vpi_ids);
6365 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6366 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6367 &phba->lpfc_vpi_blk_list, list) {
6368 list_del_init(&rsrc_blk->list);
6371 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6373 case LPFC_RSC_TYPE_FCOE_XRI:
6374 kfree(phba->sli4_hba.xri_bmask);
6375 kfree(phba->sli4_hba.xri_ids);
6376 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6377 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6378 list_del_init(&rsrc_blk->list);
6382 case LPFC_RSC_TYPE_FCOE_VFI:
6383 kfree(phba->sli4_hba.vfi_bmask);
6384 kfree(phba->sli4_hba.vfi_ids);
6385 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6386 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6387 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6388 list_del_init(&rsrc_blk->list);
6392 case LPFC_RSC_TYPE_FCOE_RPI:
6393 /* RPI bitmask and physical id array are cleaned up earlier. */
6394 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6395 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6396 list_del_init(&rsrc_blk->list);
6404 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6407 mempool_free(mbox, phba->mbox_mem_pool);
6412 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6417 len = sizeof(struct lpfc_mbx_set_feature) -
6418 sizeof(struct lpfc_sli4_cfg_mhdr);
6419 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6420 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6421 LPFC_SLI4_MBX_EMBED);
6424 case LPFC_SET_UE_RECOVERY:
6425 bf_set(lpfc_mbx_set_feature_UER,
6426 &mbox->u.mqe.un.set_feature, 1);
6427 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6428 mbox->u.mqe.un.set_feature.param_len = 8;
6430 case LPFC_SET_MDS_DIAGS:
6431 bf_set(lpfc_mbx_set_feature_mds,
6432 &mbox->u.mqe.un.set_feature, 1);
6433 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6434 &mbox->u.mqe.un.set_feature, 1);
6435 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6436 mbox->u.mqe.un.set_feature.param_len = 8;
6438 case LPFC_SET_DUAL_DUMP:
6439 bf_set(lpfc_mbx_set_feature_dd,
6440 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6441 bf_set(lpfc_mbx_set_feature_ddquery,
6442 &mbox->u.mqe.un.set_feature, 0);
6443 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6444 mbox->u.mqe.un.set_feature.param_len = 4;
6452 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6453 * @phba: Pointer to HBA context object.
6455 * Disable FW logging into host memory on the adapter. To
6456 * be done before reading logs from the host memory.
6459 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6461 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6463 spin_lock_irq(&phba->hbalock);
6464 ras_fwlog->state = INACTIVE;
6465 spin_unlock_irq(&phba->hbalock);
6467 /* Disable FW logging to host memory */
6468 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6469 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6471 /* Wait 10ms for firmware to stop using DMA buffer */
6472 usleep_range(10 * 1000, 20 * 1000);
6476 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6477 * @phba: Pointer to HBA context object.
6479 * This function is called to free memory allocated for RAS FW logging
6480 * support in the driver.
6483 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6485 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6486 struct lpfc_dmabuf *dmabuf, *next;
6488 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6489 list_for_each_entry_safe(dmabuf, next,
6490 &ras_fwlog->fwlog_buff_list,
6492 list_del(&dmabuf->list);
6493 dma_free_coherent(&phba->pcidev->dev,
6494 LPFC_RAS_MAX_ENTRY_SIZE,
6495 dmabuf->virt, dmabuf->phys);
6500 if (ras_fwlog->lwpd.virt) {
6501 dma_free_coherent(&phba->pcidev->dev,
6502 sizeof(uint32_t) * 2,
6503 ras_fwlog->lwpd.virt,
6504 ras_fwlog->lwpd.phys);
6505 ras_fwlog->lwpd.virt = NULL;
6508 spin_lock_irq(&phba->hbalock);
6509 ras_fwlog->state = INACTIVE;
6510 spin_unlock_irq(&phba->hbalock);
6514 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6515 * @phba: Pointer to HBA context object.
6516 * @fwlog_buff_count: Count of buffers to be created.
6518 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6519 * to update FW log is posted to the adapter.
6520 * Buffer count is calculated based on module param ras_fwlog_buffsize
6521 * Size of each buffer posted to FW is 64K.
6525 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6526 uint32_t fwlog_buff_count)
6528 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6529 struct lpfc_dmabuf *dmabuf;
6532 /* Initialize List */
6533 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6535 /* Allocate memory for the LWPD */
6536 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6537 sizeof(uint32_t) * 2,
6538 &ras_fwlog->lwpd.phys,
6540 if (!ras_fwlog->lwpd.virt) {
6541 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6542 "6185 LWPD Memory Alloc Failed\n");
6547 ras_fwlog->fw_buffcount = fwlog_buff_count;
6548 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6549 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6553 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6554 "6186 Memory Alloc failed FW logging");
6558 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6559 LPFC_RAS_MAX_ENTRY_SIZE,
6560 &dmabuf->phys, GFP_KERNEL);
6561 if (!dmabuf->virt) {
6564 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6565 "6187 DMA Alloc Failed FW logging");
6568 dmabuf->buffer_tag = i;
6569 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6574 lpfc_sli4_ras_dma_free(phba);
6580 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6581 * @phba: pointer to lpfc hba data structure.
6582 * @pmb: pointer to the driver internal queue element for mailbox command.
6584 * Completion handler for driver's RAS MBX command to the device.
6587 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6590 union lpfc_sli4_cfg_shdr *shdr;
6591 uint32_t shdr_status, shdr_add_status;
6592 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6596 shdr = (union lpfc_sli4_cfg_shdr *)
6597 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6598 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6599 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6601 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6602 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6603 "6188 FW LOG mailbox "
6604 "completed with status x%x add_status x%x,"
6605 " mbx status x%x\n",
6606 shdr_status, shdr_add_status, mb->mbxStatus);
6608 ras_fwlog->ras_hwsupport = false;
6612 spin_lock_irq(&phba->hbalock);
6613 ras_fwlog->state = ACTIVE;
6614 spin_unlock_irq(&phba->hbalock);
6615 mempool_free(pmb, phba->mbox_mem_pool);
6620 /* Free RAS DMA memory */
6621 lpfc_sli4_ras_dma_free(phba);
6622 mempool_free(pmb, phba->mbox_mem_pool);
6626 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6627 * @phba: pointer to lpfc hba data structure.
6628 * @fwlog_level: Logging verbosity level.
6629 * @fwlog_enable: Enable/Disable logging.
6631 * Initialize memory and post mailbox command to enable FW logging in host
6635 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6636 uint32_t fwlog_level,
6637 uint32_t fwlog_enable)
6639 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6640 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6641 struct lpfc_dmabuf *dmabuf;
6643 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6646 spin_lock_irq(&phba->hbalock);
6647 ras_fwlog->state = INACTIVE;
6648 spin_unlock_irq(&phba->hbalock);
6650 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6651 phba->cfg_ras_fwlog_buffsize);
6652 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6655 * If re-enabling FW logging support use earlier allocated
6656 * DMA buffers while posting MBX command.
6658 if (!ras_fwlog->lwpd.virt) {
6659 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6661 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6662 "6189 FW Log Memory Allocation Failed");
6667 /* Setup Mailbox command */
6668 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6670 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6671 "6190 RAS MBX Alloc Failed");
6676 ras_fwlog->fw_loglevel = fwlog_level;
6677 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6678 sizeof(struct lpfc_sli4_cfg_mhdr));
6680 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6681 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6682 len, LPFC_SLI4_MBX_EMBED);
6684 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6685 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6687 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6688 ras_fwlog->fw_loglevel);
6689 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6690 ras_fwlog->fw_buffcount);
6691 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6692 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6694 /* Update DMA buffer address */
6695 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6696 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6698 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6699 putPaddrLow(dmabuf->phys);
6701 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6702 putPaddrHigh(dmabuf->phys);
6705 /* Update LPWD address */
6706 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6707 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6709 spin_lock_irq(&phba->hbalock);
6710 ras_fwlog->state = REG_INPROGRESS;
6711 spin_unlock_irq(&phba->hbalock);
6712 mbox->vport = phba->pport;
6713 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6715 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6717 if (rc == MBX_NOT_FINISHED) {
6718 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6719 "6191 FW-Log Mailbox failed. "
6720 "status %d mbxStatus : x%x", rc,
6721 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6722 mempool_free(mbox, phba->mbox_mem_pool);
6729 lpfc_sli4_ras_dma_free(phba);
6735 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6736 * @phba: Pointer to HBA context object.
6738 * Check if RAS is supported on the adapter and initialize it.
6741 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6743 /* Check RAS FW Log needs to be enabled or not */
6744 if (lpfc_check_fwlog_support(phba))
6747 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6748 LPFC_RAS_ENABLE_LOGGING);
6752 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6753 * @phba: Pointer to HBA context object.
6755 * This function allocates all SLI4 resource identifiers.
6758 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6760 int i, rc, error = 0;
6761 uint16_t count, base;
6762 unsigned long longs;
6764 if (!phba->sli4_hba.rpi_hdrs_in_use)
6765 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6766 if (phba->sli4_hba.extents_in_use) {
6768 * The port supports resource extents. The XRI, VPI, VFI, RPI
6769 * resource extent count must be read and allocated before
6770 * provisioning the resource id arrays.
6772 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6773 LPFC_IDX_RSRC_RDY) {
6775 * Extent-based resources are set - the driver could
6776 * be in a port reset. Figure out if any corrective
6777 * actions need to be taken.
6779 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6780 LPFC_RSC_TYPE_FCOE_VFI);
6783 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6784 LPFC_RSC_TYPE_FCOE_VPI);
6787 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6788 LPFC_RSC_TYPE_FCOE_XRI);
6791 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6792 LPFC_RSC_TYPE_FCOE_RPI);
6797 * It's possible that the number of resources
6798 * provided to this port instance changed between
6799 * resets. Detect this condition and reallocate
6800 * resources. Otherwise, there is no action.
6803 lpfc_printf_log(phba, KERN_INFO,
6804 LOG_MBOX | LOG_INIT,
6805 "2931 Detected extent resource "
6806 "change. Reallocating all "
6808 rc = lpfc_sli4_dealloc_extent(phba,
6809 LPFC_RSC_TYPE_FCOE_VFI);
6810 rc = lpfc_sli4_dealloc_extent(phba,
6811 LPFC_RSC_TYPE_FCOE_VPI);
6812 rc = lpfc_sli4_dealloc_extent(phba,
6813 LPFC_RSC_TYPE_FCOE_XRI);
6814 rc = lpfc_sli4_dealloc_extent(phba,
6815 LPFC_RSC_TYPE_FCOE_RPI);
6820 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6824 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6828 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6832 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6835 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6840 * The port does not support resource extents. The XRI, VPI,
6841 * VFI, RPI resource ids were determined from READ_CONFIG.
6842 * Just allocate the bitmasks and provision the resource id
6843 * arrays. If a port reset is active, the resources don't
6844 * need any action - just exit.
6846 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6847 LPFC_IDX_RSRC_RDY) {
6848 lpfc_sli4_dealloc_resource_identifiers(phba);
6849 lpfc_sli4_remove_rpis(phba);
6852 count = phba->sli4_hba.max_cfg_param.max_rpi;
6854 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6855 "3279 Invalid provisioning of "
6860 base = phba->sli4_hba.max_cfg_param.rpi_base;
6861 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6862 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6863 sizeof(unsigned long),
6865 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6869 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6871 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6873 goto free_rpi_bmask;
6876 for (i = 0; i < count; i++)
6877 phba->sli4_hba.rpi_ids[i] = base + i;
6880 count = phba->sli4_hba.max_cfg_param.max_vpi;
6882 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6883 "3280 Invalid provisioning of "
6888 base = phba->sli4_hba.max_cfg_param.vpi_base;
6889 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6890 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6892 if (unlikely(!phba->vpi_bmask)) {
6896 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6898 if (unlikely(!phba->vpi_ids)) {
6900 goto free_vpi_bmask;
6903 for (i = 0; i < count; i++)
6904 phba->vpi_ids[i] = base + i;
6907 count = phba->sli4_hba.max_cfg_param.max_xri;
6909 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6910 "3281 Invalid provisioning of "
6915 base = phba->sli4_hba.max_cfg_param.xri_base;
6916 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6917 phba->sli4_hba.xri_bmask = kcalloc(longs,
6918 sizeof(unsigned long),
6920 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6924 phba->sli4_hba.max_cfg_param.xri_used = 0;
6925 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6927 if (unlikely(!phba->sli4_hba.xri_ids)) {
6929 goto free_xri_bmask;
6932 for (i = 0; i < count; i++)
6933 phba->sli4_hba.xri_ids[i] = base + i;
6936 count = phba->sli4_hba.max_cfg_param.max_vfi;
6938 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6939 "3282 Invalid provisioning of "
6944 base = phba->sli4_hba.max_cfg_param.vfi_base;
6945 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6946 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6947 sizeof(unsigned long),
6949 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6953 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6955 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6957 goto free_vfi_bmask;
6960 for (i = 0; i < count; i++)
6961 phba->sli4_hba.vfi_ids[i] = base + i;
6964 * Mark all resources ready. An HBA reset doesn't need
6965 * to reset the initialization.
6967 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6973 kfree(phba->sli4_hba.vfi_bmask);
6974 phba->sli4_hba.vfi_bmask = NULL;
6976 kfree(phba->sli4_hba.xri_ids);
6977 phba->sli4_hba.xri_ids = NULL;
6979 kfree(phba->sli4_hba.xri_bmask);
6980 phba->sli4_hba.xri_bmask = NULL;
6982 kfree(phba->vpi_ids);
6983 phba->vpi_ids = NULL;
6985 kfree(phba->vpi_bmask);
6986 phba->vpi_bmask = NULL;
6988 kfree(phba->sli4_hba.rpi_ids);
6989 phba->sli4_hba.rpi_ids = NULL;
6991 kfree(phba->sli4_hba.rpi_bmask);
6992 phba->sli4_hba.rpi_bmask = NULL;
6998 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6999 * @phba: Pointer to HBA context object.
7001 * This function allocates the number of elements for the specified
7005 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7007 if (phba->sli4_hba.extents_in_use) {
7008 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7009 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7010 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7011 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7013 kfree(phba->vpi_bmask);
7014 phba->sli4_hba.max_cfg_param.vpi_used = 0;
7015 kfree(phba->vpi_ids);
7016 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7017 kfree(phba->sli4_hba.xri_bmask);
7018 kfree(phba->sli4_hba.xri_ids);
7019 kfree(phba->sli4_hba.vfi_bmask);
7020 kfree(phba->sli4_hba.vfi_ids);
7021 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7022 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7029 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
7030 * @phba: Pointer to HBA context object.
7031 * @type: The resource extent type.
7032 * @extnt_cnt: buffer to hold port extent count response
7033 * @extnt_size: buffer to hold port extent size response.
7035 * This function calls the port to read the host allocated extents
7036 * for a particular type.
7039 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7040 uint16_t *extnt_cnt, uint16_t *extnt_size)
7044 uint16_t curr_blks = 0;
7045 uint32_t req_len, emb_len;
7046 uint32_t alloc_len, mbox_tmo;
7047 struct list_head *blk_list_head;
7048 struct lpfc_rsrc_blks *rsrc_blk;
7050 void *virtaddr = NULL;
7051 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7052 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7053 union lpfc_sli4_cfg_shdr *shdr;
7056 case LPFC_RSC_TYPE_FCOE_VPI:
7057 blk_list_head = &phba->lpfc_vpi_blk_list;
7059 case LPFC_RSC_TYPE_FCOE_XRI:
7060 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7062 case LPFC_RSC_TYPE_FCOE_VFI:
7063 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7065 case LPFC_RSC_TYPE_FCOE_RPI:
7066 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7072 /* Count the number of extents currently allocatd for this type. */
7073 list_for_each_entry(rsrc_blk, blk_list_head, list) {
7074 if (curr_blks == 0) {
7076 * The GET_ALLOCATED mailbox does not return the size,
7077 * just the count. The size should be just the size
7078 * stored in the current allocated block and all sizes
7079 * for an extent type are the same so set the return
7082 *extnt_size = rsrc_blk->rsrc_size;
7088 * Calculate the size of an embedded mailbox. The uint32_t
7089 * accounts for extents-specific word.
7091 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7095 * Presume the allocation and response will fit into an embedded
7096 * mailbox. If not true, reconfigure to a non-embedded mailbox.
7098 emb = LPFC_SLI4_MBX_EMBED;
7100 if (req_len > emb_len) {
7101 req_len = curr_blks * sizeof(uint16_t) +
7102 sizeof(union lpfc_sli4_cfg_shdr) +
7104 emb = LPFC_SLI4_MBX_NEMBED;
7107 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7110 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7112 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7113 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7115 if (alloc_len < req_len) {
7116 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7117 "2983 Allocated DMA memory size (x%x) is "
7118 "less than the requested DMA memory "
7119 "size (x%x)\n", alloc_len, req_len);
7123 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7129 if (!phba->sli4_hba.intr_enable)
7130 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7132 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7133 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7142 * Figure out where the response is located. Then get local pointers
7143 * to the response data. The port does not guarantee to respond to
7144 * all extents counts request so update the local variable with the
7145 * allocated count from the port.
7147 if (emb == LPFC_SLI4_MBX_EMBED) {
7148 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7149 shdr = &rsrc_ext->header.cfg_shdr;
7150 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7152 virtaddr = mbox->sge_array->addr[0];
7153 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7154 shdr = &n_rsrc->cfg_shdr;
7155 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7158 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7159 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7160 "2984 Failed to read allocated resources "
7161 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7163 bf_get(lpfc_mbox_hdr_status, &shdr->response),
7164 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7169 lpfc_sli4_mbox_cmd_free(phba, mbox);
7174 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7175 * @phba: pointer to lpfc hba data structure.
7176 * @sgl_list: linked link of sgl buffers to post
7177 * @cnt: number of linked list buffers
7179 * This routine walks the list of buffers that have been allocated and
7180 * repost them to the port by using SGL block post. This is needed after a
7181 * pci_function_reset/warm_start or start. It attempts to construct blocks
7182 * of buffer sgls which contains contiguous xris and uses the non-embedded
7183 * SGL block post mailbox commands to post them to the port. For single
7184 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7185 * mailbox command for posting.
7187 * Returns: 0 = success, non-zero failure.
7190 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7191 struct list_head *sgl_list, int cnt)
7193 struct lpfc_sglq *sglq_entry = NULL;
7194 struct lpfc_sglq *sglq_entry_next = NULL;
7195 struct lpfc_sglq *sglq_entry_first = NULL;
7196 int status, total_cnt;
7197 int post_cnt = 0, num_posted = 0, block_cnt = 0;
7198 int last_xritag = NO_XRI;
7199 LIST_HEAD(prep_sgl_list);
7200 LIST_HEAD(blck_sgl_list);
7201 LIST_HEAD(allc_sgl_list);
7202 LIST_HEAD(post_sgl_list);
7203 LIST_HEAD(free_sgl_list);
7205 spin_lock_irq(&phba->hbalock);
7206 spin_lock(&phba->sli4_hba.sgl_list_lock);
7207 list_splice_init(sgl_list, &allc_sgl_list);
7208 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7209 spin_unlock_irq(&phba->hbalock);
7212 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7213 &allc_sgl_list, list) {
7214 list_del_init(&sglq_entry->list);
7216 if ((last_xritag != NO_XRI) &&
7217 (sglq_entry->sli4_xritag != last_xritag + 1)) {
7218 /* a hole in xri block, form a sgl posting block */
7219 list_splice_init(&prep_sgl_list, &blck_sgl_list);
7220 post_cnt = block_cnt - 1;
7221 /* prepare list for next posting block */
7222 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7225 /* prepare list for next posting block */
7226 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7227 /* enough sgls for non-embed sgl mbox command */
7228 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7229 list_splice_init(&prep_sgl_list,
7231 post_cnt = block_cnt;
7237 /* keep track of last sgl's xritag */
7238 last_xritag = sglq_entry->sli4_xritag;
7240 /* end of repost sgl list condition for buffers */
7241 if (num_posted == total_cnt) {
7242 if (post_cnt == 0) {
7243 list_splice_init(&prep_sgl_list,
7245 post_cnt = block_cnt;
7246 } else if (block_cnt == 1) {
7247 status = lpfc_sli4_post_sgl(phba,
7248 sglq_entry->phys, 0,
7249 sglq_entry->sli4_xritag);
7251 /* successful, put sgl to posted list */
7252 list_add_tail(&sglq_entry->list,
7255 /* Failure, put sgl to free list */
7256 lpfc_printf_log(phba, KERN_WARNING,
7258 "3159 Failed to post "
7259 "sgl, xritag:x%x\n",
7260 sglq_entry->sli4_xritag);
7261 list_add_tail(&sglq_entry->list,
7268 /* continue until a nembed page worth of sgls */
7272 /* post the buffer list sgls as a block */
7273 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7277 /* success, put sgl list to posted sgl list */
7278 list_splice_init(&blck_sgl_list, &post_sgl_list);
7280 /* Failure, put sgl list to free sgl list */
7281 sglq_entry_first = list_first_entry(&blck_sgl_list,
7284 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7285 "3160 Failed to post sgl-list, "
7287 sglq_entry_first->sli4_xritag,
7288 (sglq_entry_first->sli4_xritag +
7290 list_splice_init(&blck_sgl_list, &free_sgl_list);
7291 total_cnt -= post_cnt;
7294 /* don't reset xirtag due to hole in xri block */
7296 last_xritag = NO_XRI;
7298 /* reset sgl post count for next round of posting */
7302 /* free the sgls failed to post */
7303 lpfc_free_sgl_list(phba, &free_sgl_list);
7305 /* push sgls posted to the available list */
7306 if (!list_empty(&post_sgl_list)) {
7307 spin_lock_irq(&phba->hbalock);
7308 spin_lock(&phba->sli4_hba.sgl_list_lock);
7309 list_splice_init(&post_sgl_list, sgl_list);
7310 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7311 spin_unlock_irq(&phba->hbalock);
7313 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7314 "3161 Failure to post sgl to port.\n");
7318 /* return the number of XRIs actually posted */
7323 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7324 * @phba: pointer to lpfc hba data structure.
7326 * This routine walks the list of nvme buffers that have been allocated and
7327 * repost them to the port by using SGL block post. This is needed after a
7328 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7329 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7330 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7332 * Returns: 0 = success, non-zero failure.
7335 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7337 LIST_HEAD(post_nblist);
7338 int num_posted, rc = 0;
7340 /* get all NVME buffers need to repost to a local list */
7341 lpfc_io_buf_flush(phba, &post_nblist);
7343 /* post the list of nvme buffer sgls to port if available */
7344 if (!list_empty(&post_nblist)) {
7345 num_posted = lpfc_sli4_post_io_sgl_list(
7346 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7347 /* failed to post any nvme buffer, return error */
7348 if (num_posted == 0)
7355 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7359 len = sizeof(struct lpfc_mbx_set_host_data) -
7360 sizeof(struct lpfc_sli4_cfg_mhdr);
7361 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7362 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7363 LPFC_SLI4_MBX_EMBED);
7365 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7366 mbox->u.mqe.un.set_host_data.param_len =
7367 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7368 snprintf(mbox->u.mqe.un.set_host_data.data,
7369 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7370 "Linux %s v"LPFC_DRIVER_VERSION,
7371 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7375 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7376 struct lpfc_queue *drq, int count, int idx)
7379 struct lpfc_rqe hrqe;
7380 struct lpfc_rqe drqe;
7381 struct lpfc_rqb *rqbp;
7382 unsigned long flags;
7383 struct rqb_dmabuf *rqb_buffer;
7384 LIST_HEAD(rqb_buf_list);
7387 for (i = 0; i < count; i++) {
7388 spin_lock_irqsave(&phba->hbalock, flags);
7389 /* IF RQ is already full, don't bother */
7390 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7391 spin_unlock_irqrestore(&phba->hbalock, flags);
7394 spin_unlock_irqrestore(&phba->hbalock, flags);
7396 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7399 rqb_buffer->hrq = hrq;
7400 rqb_buffer->drq = drq;
7401 rqb_buffer->idx = idx;
7402 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7405 spin_lock_irqsave(&phba->hbalock, flags);
7406 while (!list_empty(&rqb_buf_list)) {
7407 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7410 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7411 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7412 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7413 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7414 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7416 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7417 "6421 Cannot post to HRQ %d: %x %x %x "
7425 rqbp->rqb_free_buffer(phba, rqb_buffer);
7427 list_add_tail(&rqb_buffer->hbuf.list,
7428 &rqbp->rqb_buffer_list);
7429 rqbp->buffer_count++;
7432 spin_unlock_irqrestore(&phba->hbalock, flags);
7437 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7438 * @phba: pointer to lpfc hba data structure.
7440 * This routine initializes the per-cq idle_stat to dynamically dictate
7441 * polling decisions.
7446 static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7449 struct lpfc_sli4_hdw_queue *hdwq;
7450 struct lpfc_queue *cq;
7451 struct lpfc_idle_stat *idle_stat;
7454 for_each_present_cpu(i) {
7455 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7458 /* Skip if we've already handled this cq's primary CPU */
7462 idle_stat = &phba->sli4_hba.idle_stat[i];
7464 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7465 idle_stat->prev_wall = wall;
7467 if (phba->nvmet_support)
7468 cq->poll_mode = LPFC_QUEUE_WORK;
7470 cq->poll_mode = LPFC_IRQ_POLL;
7473 if (!phba->nvmet_support)
7474 schedule_delayed_work(&phba->idle_stat_delay_work,
7475 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7478 static void lpfc_sli4_dip(struct lpfc_hba *phba)
7482 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7483 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
7484 if_type == LPFC_SLI_INTF_IF_TYPE_6) {
7485 struct lpfc_register reg_data;
7487 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7491 if (bf_get(lpfc_sliport_status_dip, ®_data))
7492 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7493 "2904 Firmware Dump Image Present"
7499 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
7500 * @phba: Pointer to HBA context object.
7502 * This function is the main SLI4 device initialization PCI function. This
7503 * function is called by the HBA initialization code, HBA reset code and
7504 * HBA error attention handler code. Caller is not required to hold any
7508 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7510 int rc, i, cnt, len, dd;
7511 LPFC_MBOXQ_t *mboxq;
7512 struct lpfc_mqe *mqe;
7515 uint32_t ftr_rsp = 0;
7516 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7517 struct lpfc_vport *vport = phba->pport;
7518 struct lpfc_dmabuf *mp;
7519 struct lpfc_rqb *rqbp;
7521 /* Perform a PCI function reset to start from clean */
7522 rc = lpfc_pci_function_reset(phba);
7526 /* Check the HBA Host Status Register for readyness */
7527 rc = lpfc_sli4_post_status_check(phba);
7531 spin_lock_irq(&phba->hbalock);
7532 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7533 spin_unlock_irq(&phba->hbalock);
7536 lpfc_sli4_dip(phba);
7539 * Allocate a single mailbox container for initializing the
7542 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7546 /* Issue READ_REV to collect vpd and FW information. */
7547 vpd_size = SLI4_PAGE_SIZE;
7548 vpd = kzalloc(vpd_size, GFP_KERNEL);
7554 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7560 mqe = &mboxq->u.mqe;
7561 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7562 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7563 phba->hba_flag |= HBA_FCOE_MODE;
7564 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7566 phba->hba_flag &= ~HBA_FCOE_MODE;
7569 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7571 phba->hba_flag |= HBA_FIP_SUPPORT;
7573 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7575 phba->hba_flag &= ~HBA_IOQ_FLUSH;
7577 if (phba->sli_rev != LPFC_SLI_REV4) {
7578 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7579 "0376 READ_REV Error. SLI Level %d "
7580 "FCoE enabled %d\n",
7581 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7588 * Continue initialization with default values even if driver failed
7589 * to read FCoE param config regions, only read parameters if the
7592 if (phba->hba_flag & HBA_FCOE_MODE &&
7593 lpfc_sli4_read_fcoe_params(phba))
7594 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7595 "2570 Failed to read FCoE parameters\n");
7598 * Retrieve sli4 device physical port name, failure of doing it
7599 * is considered as non-fatal.
7601 rc = lpfc_sli4_retrieve_pport_name(phba);
7603 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7604 "3080 Successful retrieving SLI4 device "
7605 "physical port name: %s.\n", phba->Port);
7607 rc = lpfc_sli4_get_ctl_attr(phba);
7609 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7610 "8351 Successful retrieving SLI4 device "
7614 * Evaluate the read rev and vpd data. Populate the driver
7615 * state with the results. If this routine fails, the failure
7616 * is not fatal as the driver will use generic values.
7618 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7619 if (unlikely(!rc)) {
7620 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7621 "0377 Error %d parsing vpd. "
7622 "Using defaults.\n", rc);
7627 /* Save information as VPD data */
7628 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7629 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7632 * This is because first G7 ASIC doesn't support the standard
7633 * 0x5a NVME cmd descriptor type/subtype
7635 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7636 LPFC_SLI_INTF_IF_TYPE_6) &&
7637 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7638 (phba->vpd.rev.smRev == 0) &&
7639 (phba->cfg_nvme_embed_cmd == 1))
7640 phba->cfg_nvme_embed_cmd = 0;
7642 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7643 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7645 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7647 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7649 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7651 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7652 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7653 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7654 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7655 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7656 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7657 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7658 "(%d):0380 READ_REV Status x%x "
7659 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7660 mboxq->vport ? mboxq->vport->vpi : 0,
7661 bf_get(lpfc_mqe_status, mqe),
7662 phba->vpd.rev.opFwName,
7663 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7664 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7666 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7667 LPFC_SLI_INTF_IF_TYPE_0) {
7668 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7669 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7670 if (rc == MBX_SUCCESS) {
7671 phba->hba_flag |= HBA_RECOVERABLE_UE;
7672 /* Set 1Sec interval to detect UE */
7673 phba->eratt_poll_interval = 1;
7674 phba->sli4_hba.ue_to_sr = bf_get(
7675 lpfc_mbx_set_feature_UESR,
7676 &mboxq->u.mqe.un.set_feature);
7677 phba->sli4_hba.ue_to_rp = bf_get(
7678 lpfc_mbx_set_feature_UERP,
7679 &mboxq->u.mqe.un.set_feature);
7683 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7684 /* Enable MDS Diagnostics only if the SLI Port supports it */
7685 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7686 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7687 if (rc != MBX_SUCCESS)
7688 phba->mds_diags_support = 0;
7692 * Discover the port's supported feature set and match it against the
7695 lpfc_request_features(phba, mboxq);
7696 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7702 /* Disable VMID if app header is not supported */
7703 if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
7704 &mqe->un.req_ftrs))) {
7705 bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
7706 phba->cfg_vmid_app_header = 0;
7707 lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
7708 "1242 vmid feature not supported\n");
7712 * The port must support FCP initiator mode as this is the
7713 * only mode running in the host.
7715 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7716 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7717 "0378 No support for fcpi mode.\n");
7721 /* Performance Hints are ONLY for FCoE */
7722 if (phba->hba_flag & HBA_FCOE_MODE) {
7723 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7724 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7726 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7730 * If the port cannot support the host's requested features
7731 * then turn off the global config parameters to disable the
7732 * feature in the driver. This is not a fatal error.
7734 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7735 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7736 phba->cfg_enable_bg = 0;
7737 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7742 if (phba->max_vpi && phba->cfg_enable_npiv &&
7743 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7747 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7748 "0379 Feature Mismatch Data: x%08x %08x "
7749 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7750 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7751 phba->cfg_enable_npiv, phba->max_vpi);
7752 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7753 phba->cfg_enable_bg = 0;
7754 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7755 phba->cfg_enable_npiv = 0;
7758 /* These SLI3 features are assumed in SLI4 */
7759 spin_lock_irq(&phba->hbalock);
7760 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7761 spin_unlock_irq(&phba->hbalock);
7763 /* Always try to enable dual dump feature if we can */
7764 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
7765 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7766 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
7767 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
7768 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7769 "6448 Dual Dump is enabled\n");
7771 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
7772 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
7774 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7775 lpfc_sli_config_mbox_subsys_get(
7777 lpfc_sli_config_mbox_opcode_get(
7781 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7782 * calls depends on these resources to complete port setup.
7784 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7786 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7787 "2920 Failed to alloc Resource IDs "
7792 lpfc_set_host_data(phba, mboxq);
7794 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7796 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7797 "2134 Failed to set host os driver version %x",
7801 /* Read the port's service parameters. */
7802 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7804 phba->link_state = LPFC_HBA_ERROR;
7809 mboxq->vport = vport;
7810 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7811 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7812 if (rc == MBX_SUCCESS) {
7813 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7818 * This memory was allocated by the lpfc_read_sparam routine. Release
7819 * it to the mbuf pool.
7821 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7823 mboxq->ctx_buf = NULL;
7825 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7826 "0382 READ_SPARAM command failed "
7827 "status %d, mbxStatus x%x\n",
7828 rc, bf_get(lpfc_mqe_status, mqe));
7829 phba->link_state = LPFC_HBA_ERROR;
7834 lpfc_update_vport_wwn(vport);
7836 /* Update the fc_host data structures with new wwn. */
7837 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7838 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7840 /* Create all the SLI4 queues */
7841 rc = lpfc_sli4_queue_create(phba);
7843 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7844 "3089 Failed to allocate queues\n");
7848 /* Set up all the queues to the device */
7849 rc = lpfc_sli4_queue_setup(phba);
7851 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7852 "0381 Error %d during queue setup.\n ", rc);
7853 goto out_stop_timers;
7855 /* Initialize the driver internal SLI layer lists. */
7856 lpfc_sli4_setup(phba);
7857 lpfc_sli4_queue_init(phba);
7859 /* update host els xri-sgl sizes and mappings */
7860 rc = lpfc_sli4_els_sgl_update(phba);
7862 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7863 "1400 Failed to update xri-sgl size and "
7864 "mapping: %d\n", rc);
7865 goto out_destroy_queue;
7868 /* register the els sgl pool to the port */
7869 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7870 phba->sli4_hba.els_xri_cnt);
7871 if (unlikely(rc < 0)) {
7872 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7873 "0582 Error %d during els sgl post "
7876 goto out_destroy_queue;
7878 phba->sli4_hba.els_xri_cnt = rc;
7880 if (phba->nvmet_support) {
7881 /* update host nvmet xri-sgl sizes and mappings */
7882 rc = lpfc_sli4_nvmet_sgl_update(phba);
7884 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7885 "6308 Failed to update nvmet-sgl size "
7886 "and mapping: %d\n", rc);
7887 goto out_destroy_queue;
7890 /* register the nvmet sgl pool to the port */
7891 rc = lpfc_sli4_repost_sgl_list(
7893 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7894 phba->sli4_hba.nvmet_xri_cnt);
7895 if (unlikely(rc < 0)) {
7896 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7897 "3117 Error %d during nvmet "
7900 goto out_destroy_queue;
7902 phba->sli4_hba.nvmet_xri_cnt = rc;
7904 /* We allocate an iocbq for every receive context SGL.
7905 * The additional allocation is for abort and ls handling.
7907 cnt = phba->sli4_hba.nvmet_xri_cnt +
7908 phba->sli4_hba.max_cfg_param.max_xri;
7910 /* update host common xri-sgl sizes and mappings */
7911 rc = lpfc_sli4_io_sgl_update(phba);
7913 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7914 "6082 Failed to update nvme-sgl size "
7915 "and mapping: %d\n", rc);
7916 goto out_destroy_queue;
7919 /* register the allocated common sgl pool to the port */
7920 rc = lpfc_sli4_repost_io_sgl_list(phba);
7922 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7923 "6116 Error %d during nvme sgl post "
7925 /* Some NVME buffers were moved to abort nvme list */
7926 /* A pci function reset will repost them */
7928 goto out_destroy_queue;
7930 /* Each lpfc_io_buf job structure has an iocbq element.
7931 * This cnt provides for abort, els, ct and ls requests.
7933 cnt = phba->sli4_hba.max_cfg_param.max_xri;
7936 if (!phba->sli.iocbq_lookup) {
7937 /* Initialize and populate the iocb list per host */
7938 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7939 "2821 initialize iocb list with %d entries\n",
7941 rc = lpfc_init_iocb_list(phba, cnt);
7943 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7944 "1413 Failed to init iocb list.\n");
7945 goto out_destroy_queue;
7949 if (phba->nvmet_support)
7950 lpfc_nvmet_create_targetport(phba);
7952 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7953 /* Post initial buffers to all RQs created */
7954 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7955 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7956 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7957 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7958 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7959 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7960 rqbp->buffer_count = 0;
7962 lpfc_post_rq_buffer(
7963 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7964 phba->sli4_hba.nvmet_mrq_data[i],
7965 phba->cfg_nvmet_mrq_post, i);
7969 /* Post the rpi header region to the device. */
7970 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7972 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7973 "0393 Error %d during rpi post operation\n",
7976 goto out_free_iocblist;
7978 lpfc_sli4_node_prep(phba);
7980 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7981 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7983 * The FC Port needs to register FCFI (index 0)
7985 lpfc_reg_fcfi(phba, mboxq);
7986 mboxq->vport = phba->pport;
7987 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7988 if (rc != MBX_SUCCESS)
7989 goto out_unset_queue;
7991 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7992 &mboxq->u.mqe.un.reg_fcfi);
7994 /* We are a NVME Target mode with MRQ > 1 */
7996 /* First register the FCFI */
7997 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7998 mboxq->vport = phba->pport;
7999 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8000 if (rc != MBX_SUCCESS)
8001 goto out_unset_queue;
8003 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
8004 &mboxq->u.mqe.un.reg_fcfi_mrq);
8006 /* Next register the MRQs */
8007 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
8008 mboxq->vport = phba->pport;
8009 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8010 if (rc != MBX_SUCCESS)
8011 goto out_unset_queue;
8014 /* Check if the port is configured to be disabled */
8015 lpfc_sli_read_link_ste(phba);
8018 /* Don't post more new bufs if repost already recovered
8021 if (phba->nvmet_support == 0) {
8022 if (phba->sli4_hba.io_xri_cnt == 0) {
8023 len = lpfc_new_io_buf(
8024 phba, phba->sli4_hba.io_xri_max);
8027 goto out_unset_queue;
8030 if (phba->cfg_xri_rebalancing)
8031 lpfc_create_multixri_pools(phba);
8034 phba->cfg_xri_rebalancing = 0;
8037 /* Allow asynchronous mailbox command to go through */
8038 spin_lock_irq(&phba->hbalock);
8039 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8040 spin_unlock_irq(&phba->hbalock);
8042 /* Post receive buffers to the device */
8043 lpfc_sli4_rb_setup(phba);
8045 /* Reset HBA FCF states after HBA reset */
8046 phba->fcf.fcf_flag = 0;
8047 phba->fcf.current_rec.flag = 0;
8049 /* Start the ELS watchdog timer */
8050 mod_timer(&vport->els_tmofunc,
8051 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
8053 /* Start heart beat timer */
8054 mod_timer(&phba->hb_tmofunc,
8055 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
8056 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
8057 phba->last_completion_time = jiffies;
8059 /* start eq_delay heartbeat */
8060 if (phba->cfg_auto_imax)
8061 queue_delayed_work(phba->wq, &phba->eq_delay_work,
8062 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
8064 /* start per phba idle_stat_delay heartbeat */
8065 lpfc_init_idle_stat_hb(phba);
8067 /* Start error attention (ERATT) polling timer */
8068 mod_timer(&phba->eratt_poll,
8069 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
8071 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
8072 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
8073 rc = pci_enable_pcie_error_reporting(phba->pcidev);
8075 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8076 "2829 This device supports "
8077 "Advanced Error Reporting (AER)\n");
8078 spin_lock_irq(&phba->hbalock);
8079 phba->hba_flag |= HBA_AER_ENABLED;
8080 spin_unlock_irq(&phba->hbalock);
8082 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8083 "2830 This device does not support "
8084 "Advanced Error Reporting (AER)\n");
8085 phba->cfg_aer_support = 0;
8091 * The port is ready, set the host's link state to LINK_DOWN
8092 * in preparation for link interrupts.
8094 spin_lock_irq(&phba->hbalock);
8095 phba->link_state = LPFC_LINK_DOWN;
8097 /* Check if physical ports are trunked */
8098 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
8099 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
8100 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
8101 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
8102 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
8103 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
8104 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
8105 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
8106 spin_unlock_irq(&phba->hbalock);
8108 /* Arm the CQs and then EQs on device */
8109 lpfc_sli4_arm_cqeq_intr(phba);
8111 /* Indicate device interrupt mode */
8112 phba->sli4_hba.intr_enable = 1;
8114 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
8115 (phba->hba_flag & LINK_DISABLED)) {
8116 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8117 "3103 Adapter Link is disabled.\n");
8118 lpfc_down_link(phba, mboxq);
8119 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8120 if (rc != MBX_SUCCESS) {
8121 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8122 "3104 Adapter failed to issue "
8123 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
8124 goto out_io_buff_free;
8126 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
8127 /* don't perform init_link on SLI4 FC port loopback test */
8128 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
8129 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
8131 goto out_io_buff_free;
8134 mempool_free(mboxq, phba->mbox_mem_pool);
8137 /* Free allocated IO Buffers */
8140 /* Unset all the queues set up in this routine when error out */
8141 lpfc_sli4_queue_unset(phba);
8143 lpfc_free_iocb_list(phba);
8145 lpfc_sli4_queue_destroy(phba);
8147 lpfc_stop_hba_timers(phba);
8149 mempool_free(mboxq, phba->mbox_mem_pool);
8154 * lpfc_mbox_timeout - Timeout call back function for mbox timer
8155 * @t: Context to fetch pointer to hba structure from.
8157 * This is the callback function for mailbox timer. The mailbox
8158 * timer is armed when a new mailbox command is issued and the timer
8159 * is deleted when the mailbox complete. The function is called by
8160 * the kernel timer code when a mailbox does not complete within
8161 * expected time. This function wakes up the worker thread to
8162 * process the mailbox timeout and returns. All the processing is
8163 * done by the worker thread function lpfc_mbox_timeout_handler.
8166 lpfc_mbox_timeout(struct timer_list *t)
8168 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
8169 unsigned long iflag;
8170 uint32_t tmo_posted;
8172 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
8173 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
8175 phba->pport->work_port_events |= WORKER_MBOX_TMO;
8176 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
8179 lpfc_worker_wake_up(phba);
8184 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
8186 * @phba: Pointer to HBA context object.
8188 * This function checks if any mailbox completions are present on the mailbox
8192 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
8196 struct lpfc_queue *mcq;
8197 struct lpfc_mcqe *mcqe;
8198 bool pending_completions = false;
8201 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8204 /* Check for completions on mailbox completion queue */
8206 mcq = phba->sli4_hba.mbx_cq;
8207 idx = mcq->hba_index;
8208 qe_valid = mcq->qe_valid;
8209 while (bf_get_le32(lpfc_cqe_valid,
8210 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
8211 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
8212 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
8213 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
8214 pending_completions = true;
8217 idx = (idx + 1) % mcq->entry_count;
8218 if (mcq->hba_index == idx)
8221 /* if the index wrapped around, toggle the valid bit */
8222 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
8223 qe_valid = (qe_valid) ? 0 : 1;
8225 return pending_completions;
8230 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
8232 * @phba: Pointer to HBA context object.
8234 * For sli4, it is possible to miss an interrupt. As such mbox completions
8235 * maybe missed causing erroneous mailbox timeouts to occur. This function
8236 * checks to see if mbox completions are on the mailbox completion queue
8237 * and will process all the completions associated with the eq for the
8238 * mailbox completion queue.
8241 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
8243 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
8245 struct lpfc_queue *fpeq = NULL;
8246 struct lpfc_queue *eq;
8249 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8252 /* Find the EQ associated with the mbox CQ */
8253 if (sli4_hba->hdwq) {
8254 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8255 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
8256 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
8265 /* Turn off interrupts from this EQ */
8267 sli4_hba->sli4_eq_clr_intr(fpeq);
8269 /* Check to see if a mbox completion is pending */
8271 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
8274 * If a mbox completion is pending, process all the events on EQ
8275 * associated with the mbox completion queue (this could include
8276 * mailbox commands, async events, els commands, receive queue data
8281 /* process and rearm the EQ */
8282 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
8284 /* Always clear and re-arm the EQ */
8285 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
8287 return mbox_pending;
8292 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
8293 * @phba: Pointer to HBA context object.
8295 * This function is called from worker thread when a mailbox command times out.
8296 * The caller is not required to hold any locks. This function will reset the
8297 * HBA and recover all the pending commands.
8300 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
8302 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
8303 MAILBOX_t *mb = NULL;
8305 struct lpfc_sli *psli = &phba->sli;
8307 /* If the mailbox completed, process the completion */
8308 lpfc_sli4_process_missed_mbox_completions(phba);
8310 if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
8315 /* Check the pmbox pointer first. There is a race condition
8316 * between the mbox timeout handler getting executed in the
8317 * worklist and the mailbox actually completing. When this
8318 * race condition occurs, the mbox_active will be NULL.
8320 spin_lock_irq(&phba->hbalock);
8321 if (pmbox == NULL) {
8322 lpfc_printf_log(phba, KERN_WARNING,
8324 "0353 Active Mailbox cleared - mailbox timeout "
8326 spin_unlock_irq(&phba->hbalock);
8330 /* Mbox cmd <mbxCommand> timeout */
8331 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8332 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
8334 phba->pport->port_state,
8336 phba->sli.mbox_active);
8337 spin_unlock_irq(&phba->hbalock);
8339 /* Setting state unknown so lpfc_sli_abort_iocb_ring
8340 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
8341 * it to fail all outstanding SCSI IO.
8343 spin_lock_irq(&phba->pport->work_port_lock);
8344 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8345 spin_unlock_irq(&phba->pport->work_port_lock);
8346 spin_lock_irq(&phba->hbalock);
8347 phba->link_state = LPFC_LINK_UNKNOWN;
8348 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8349 spin_unlock_irq(&phba->hbalock);
8351 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8352 "0345 Resetting board due to mailbox timeout\n");
8354 /* Reset the HBA device */
8355 lpfc_reset_hba(phba);
8359 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
8360 * @phba: Pointer to HBA context object.
8361 * @pmbox: Pointer to mailbox object.
8362 * @flag: Flag indicating how the mailbox need to be processed.
8364 * This function is called by discovery code and HBA management code
8365 * to submit a mailbox command to firmware with SLI-3 interface spec. This
8366 * function gets the hbalock to protect the data structures.
8367 * The mailbox command can be submitted in polling mode, in which case
8368 * this function will wait in a polling loop for the completion of the
8370 * If the mailbox is submitted in no_wait mode (not polling) the
8371 * function will submit the command and returns immediately without waiting
8372 * for the mailbox completion. The no_wait is supported only when HBA
8373 * is in SLI2/SLI3 mode - interrupts are enabled.
8374 * The SLI interface allows only one mailbox pending at a time. If the
8375 * mailbox is issued in polling mode and there is already a mailbox
8376 * pending, then the function will return an error. If the mailbox is issued
8377 * in NO_WAIT mode and there is a mailbox pending already, the function
8378 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
8379 * The sli layer owns the mailbox object until the completion of mailbox
8380 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
8381 * return codes the caller owns the mailbox command after the return of
8385 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8389 struct lpfc_sli *psli = &phba->sli;
8390 uint32_t status, evtctr;
8391 uint32_t ha_copy, hc_copy;
8393 unsigned long timeout;
8394 unsigned long drvr_flag = 0;
8395 uint32_t word0, ldata;
8396 void __iomem *to_slim;
8397 int processing_queue = 0;
8399 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8401 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8402 /* processing mbox queue from intr_handler */
8403 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8404 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8407 processing_queue = 1;
8408 pmbox = lpfc_mbox_get(phba);
8410 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8415 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8416 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8418 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8419 lpfc_printf_log(phba, KERN_ERR,
8420 LOG_MBOX | LOG_VPORT,
8421 "1806 Mbox x%x failed. No vport\n",
8422 pmbox->u.mb.mbxCommand);
8424 goto out_not_finished;
8428 /* If the PCI channel is in offline state, do not post mbox. */
8429 if (unlikely(pci_channel_offline(phba->pcidev))) {
8430 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8431 goto out_not_finished;
8434 /* If HBA has a deferred error attention, fail the iocb. */
8435 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8436 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8437 goto out_not_finished;
8443 status = MBX_SUCCESS;
8445 if (phba->link_state == LPFC_HBA_ERROR) {
8446 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8448 /* Mbox command <mbxCommand> cannot issue */
8449 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8450 "(%d):0311 Mailbox command x%x cannot "
8451 "issue Data: x%x x%x\n",
8452 pmbox->vport ? pmbox->vport->vpi : 0,
8453 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8454 goto out_not_finished;
8457 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8458 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8459 !(hc_copy & HC_MBINT_ENA)) {
8460 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8461 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8462 "(%d):2528 Mailbox command x%x cannot "
8463 "issue Data: x%x x%x\n",
8464 pmbox->vport ? pmbox->vport->vpi : 0,
8465 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8466 goto out_not_finished;
8470 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8471 /* Polling for a mbox command when another one is already active
8472 * is not allowed in SLI. Also, the driver must have established
8473 * SLI2 mode to queue and process multiple mbox commands.
8476 if (flag & MBX_POLL) {
8477 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8479 /* Mbox command <mbxCommand> cannot issue */
8480 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8481 "(%d):2529 Mailbox command x%x "
8482 "cannot issue Data: x%x x%x\n",
8483 pmbox->vport ? pmbox->vport->vpi : 0,
8484 pmbox->u.mb.mbxCommand,
8485 psli->sli_flag, flag);
8486 goto out_not_finished;
8489 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8490 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8491 /* Mbox command <mbxCommand> cannot issue */
8492 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8493 "(%d):2530 Mailbox command x%x "
8494 "cannot issue Data: x%x x%x\n",
8495 pmbox->vport ? pmbox->vport->vpi : 0,
8496 pmbox->u.mb.mbxCommand,
8497 psli->sli_flag, flag);
8498 goto out_not_finished;
8501 /* Another mailbox command is still being processed, queue this
8502 * command to be processed later.
8504 lpfc_mbox_put(phba, pmbox);
8506 /* Mbox cmd issue - BUSY */
8507 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8508 "(%d):0308 Mbox cmd issue - BUSY Data: "
8509 "x%x x%x x%x x%x\n",
8510 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8512 phba->pport ? phba->pport->port_state : 0xff,
8513 psli->sli_flag, flag);
8515 psli->slistat.mbox_busy++;
8516 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8519 lpfc_debugfs_disc_trc(pmbox->vport,
8520 LPFC_DISC_TRC_MBOX_VPORT,
8521 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
8522 (uint32_t)mbx->mbxCommand,
8523 mbx->un.varWords[0], mbx->un.varWords[1]);
8526 lpfc_debugfs_disc_trc(phba->pport,
8528 "MBOX Bsy: cmd:x%x mb:x%x x%x",
8529 (uint32_t)mbx->mbxCommand,
8530 mbx->un.varWords[0], mbx->un.varWords[1]);
8536 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8538 /* If we are not polling, we MUST be in SLI2 mode */
8539 if (flag != MBX_POLL) {
8540 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8541 (mbx->mbxCommand != MBX_KILL_BOARD)) {
8542 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8543 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8544 /* Mbox command <mbxCommand> cannot issue */
8545 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8546 "(%d):2531 Mailbox command x%x "
8547 "cannot issue Data: x%x x%x\n",
8548 pmbox->vport ? pmbox->vport->vpi : 0,
8549 pmbox->u.mb.mbxCommand,
8550 psli->sli_flag, flag);
8551 goto out_not_finished;
8553 /* timeout active mbox command */
8554 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8556 mod_timer(&psli->mbox_tmo, jiffies + timeout);
8559 /* Mailbox cmd <cmd> issue */
8560 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8561 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8563 pmbox->vport ? pmbox->vport->vpi : 0,
8565 phba->pport ? phba->pport->port_state : 0xff,
8566 psli->sli_flag, flag);
8568 if (mbx->mbxCommand != MBX_HEARTBEAT) {
8570 lpfc_debugfs_disc_trc(pmbox->vport,
8571 LPFC_DISC_TRC_MBOX_VPORT,
8572 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8573 (uint32_t)mbx->mbxCommand,
8574 mbx->un.varWords[0], mbx->un.varWords[1]);
8577 lpfc_debugfs_disc_trc(phba->pport,
8579 "MBOX Send: cmd:x%x mb:x%x x%x",
8580 (uint32_t)mbx->mbxCommand,
8581 mbx->un.varWords[0], mbx->un.varWords[1]);
8585 psli->slistat.mbox_cmd++;
8586 evtctr = psli->slistat.mbox_event;
8588 /* next set own bit for the adapter and copy over command word */
8589 mbx->mbxOwner = OWN_CHIP;
8591 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8592 /* Populate mbox extension offset word. */
8593 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8594 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8595 = (uint8_t *)phba->mbox_ext
8596 - (uint8_t *)phba->mbox;
8599 /* Copy the mailbox extension data */
8600 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8601 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8602 (uint8_t *)phba->mbox_ext,
8603 pmbox->in_ext_byte_len);
8605 /* Copy command data to host SLIM area */
8606 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8608 /* Populate mbox extension offset word. */
8609 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8610 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8611 = MAILBOX_HBA_EXT_OFFSET;
8613 /* Copy the mailbox extension data */
8614 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8615 lpfc_memcpy_to_slim(phba->MBslimaddr +
8616 MAILBOX_HBA_EXT_OFFSET,
8617 pmbox->ctx_buf, pmbox->in_ext_byte_len);
8619 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8620 /* copy command data into host mbox for cmpl */
8621 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8624 /* First copy mbox command data to HBA SLIM, skip past first
8626 to_slim = phba->MBslimaddr + sizeof (uint32_t);
8627 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8628 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8630 /* Next copy over first word, with mbxOwner set */
8631 ldata = *((uint32_t *)mbx);
8632 to_slim = phba->MBslimaddr;
8633 writel(ldata, to_slim);
8634 readl(to_slim); /* flush */
8636 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8637 /* switch over to host mailbox */
8638 psli->sli_flag |= LPFC_SLI_ACTIVE;
8645 /* Set up reference to mailbox command */
8646 psli->mbox_active = pmbox;
8647 /* Interrupt board to do it */
8648 writel(CA_MBATT, phba->CAregaddr);
8649 readl(phba->CAregaddr); /* flush */
8650 /* Don't wait for it to finish, just return */
8654 /* Set up null reference to mailbox command */
8655 psli->mbox_active = NULL;
8656 /* Interrupt board to do it */
8657 writel(CA_MBATT, phba->CAregaddr);
8658 readl(phba->CAregaddr); /* flush */
8660 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8661 /* First read mbox status word */
8662 word0 = *((uint32_t *)phba->mbox);
8663 word0 = le32_to_cpu(word0);
8665 /* First read mbox status word */
8666 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8667 spin_unlock_irqrestore(&phba->hbalock,
8669 goto out_not_finished;
8673 /* Read the HBA Host Attention Register */
8674 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8675 spin_unlock_irqrestore(&phba->hbalock,
8677 goto out_not_finished;
8679 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8682 /* Wait for command to complete */
8683 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8684 (!(ha_copy & HA_MBATT) &&
8685 (phba->link_state > LPFC_WARM_START))) {
8686 if (time_after(jiffies, timeout)) {
8687 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8688 spin_unlock_irqrestore(&phba->hbalock,
8690 goto out_not_finished;
8693 /* Check if we took a mbox interrupt while we were
8695 if (((word0 & OWN_CHIP) != OWN_CHIP)
8696 && (evtctr != psli->slistat.mbox_event))
8700 spin_unlock_irqrestore(&phba->hbalock,
8703 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8706 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8707 /* First copy command data */
8708 word0 = *((uint32_t *)phba->mbox);
8709 word0 = le32_to_cpu(word0);
8710 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8713 /* Check real SLIM for any errors */
8714 slimword0 = readl(phba->MBslimaddr);
8715 slimmb = (MAILBOX_t *) & slimword0;
8716 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8717 && slimmb->mbxStatus) {
8724 /* First copy command data */
8725 word0 = readl(phba->MBslimaddr);
8727 /* Read the HBA Host Attention Register */
8728 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8729 spin_unlock_irqrestore(&phba->hbalock,
8731 goto out_not_finished;
8735 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8736 /* copy results back to user */
8737 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8739 /* Copy the mailbox extension data */
8740 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8741 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8743 pmbox->out_ext_byte_len);
8746 /* First copy command data */
8747 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8749 /* Copy the mailbox extension data */
8750 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8751 lpfc_memcpy_from_slim(
8754 MAILBOX_HBA_EXT_OFFSET,
8755 pmbox->out_ext_byte_len);
8759 writel(HA_MBATT, phba->HAregaddr);
8760 readl(phba->HAregaddr); /* flush */
8762 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8763 status = mbx->mbxStatus;
8766 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8770 if (processing_queue) {
8771 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8772 lpfc_mbox_cmpl_put(phba, pmbox);
8774 return MBX_NOT_FINISHED;
8778 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8779 * @phba: Pointer to HBA context object.
8781 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8782 * the driver internal pending mailbox queue. It will then try to wait out the
8783 * possible outstanding mailbox command before return.
8786 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8787 * the outstanding mailbox command timed out.
8790 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8792 struct lpfc_sli *psli = &phba->sli;
8794 unsigned long timeout = 0;
8796 /* Mark the asynchronous mailbox command posting as blocked */
8797 spin_lock_irq(&phba->hbalock);
8798 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8799 /* Determine how long we might wait for the active mailbox
8800 * command to be gracefully completed by firmware.
8802 if (phba->sli.mbox_active)
8803 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8804 phba->sli.mbox_active) *
8806 spin_unlock_irq(&phba->hbalock);
8808 /* Make sure the mailbox is really active */
8810 lpfc_sli4_process_missed_mbox_completions(phba);
8812 /* Wait for the outstnading mailbox command to complete */
8813 while (phba->sli.mbox_active) {
8814 /* Check active mailbox complete status every 2ms */
8816 if (time_after(jiffies, timeout)) {
8817 /* Timeout, marked the outstanding cmd not complete */
8823 /* Can not cleanly block async mailbox command, fails it */
8825 spin_lock_irq(&phba->hbalock);
8826 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8827 spin_unlock_irq(&phba->hbalock);
8833 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8834 * @phba: Pointer to HBA context object.
8836 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8837 * commands from the driver internal pending mailbox queue. It makes sure
8838 * that there is no outstanding mailbox command before resuming posting
8839 * asynchronous mailbox commands. If, for any reason, there is outstanding
8840 * mailbox command, it will try to wait it out before resuming asynchronous
8841 * mailbox command posting.
8844 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8846 struct lpfc_sli *psli = &phba->sli;
8848 spin_lock_irq(&phba->hbalock);
8849 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8850 /* Asynchronous mailbox posting is not blocked, do nothing */
8851 spin_unlock_irq(&phba->hbalock);
8855 /* Outstanding synchronous mailbox command is guaranteed to be done,
8856 * successful or timeout, after timing-out the outstanding mailbox
8857 * command shall always be removed, so just unblock posting async
8858 * mailbox command and resume
8860 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8861 spin_unlock_irq(&phba->hbalock);
8863 /* wake up worker thread to post asynchronous mailbox command */
8864 lpfc_worker_wake_up(phba);
8868 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8869 * @phba: Pointer to HBA context object.
8870 * @mboxq: Pointer to mailbox object.
8872 * The function waits for the bootstrap mailbox register ready bit from
8873 * port for twice the regular mailbox command timeout value.
8875 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8876 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8879 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8882 unsigned long timeout;
8883 struct lpfc_register bmbx_reg;
8885 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8889 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8890 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8894 if (time_after(jiffies, timeout))
8895 return MBXERR_ERROR;
8896 } while (!db_ready);
8902 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8903 * @phba: Pointer to HBA context object.
8904 * @mboxq: Pointer to mailbox object.
8906 * The function posts a mailbox to the port. The mailbox is expected
8907 * to be comletely filled in and ready for the port to operate on it.
8908 * This routine executes a synchronous completion operation on the
8909 * mailbox by polling for its completion.
8911 * The caller must not be holding any locks when calling this routine.
8914 * MBX_SUCCESS - mailbox posted successfully
8915 * Any of the MBX error values.
8918 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8920 int rc = MBX_SUCCESS;
8921 unsigned long iflag;
8922 uint32_t mcqe_status;
8924 struct lpfc_sli *psli = &phba->sli;
8925 struct lpfc_mqe *mb = &mboxq->u.mqe;
8926 struct lpfc_bmbx_create *mbox_rgn;
8927 struct dma_address *dma_address;
8930 * Only one mailbox can be active to the bootstrap mailbox region
8931 * at a time and there is no queueing provided.
8933 spin_lock_irqsave(&phba->hbalock, iflag);
8934 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8935 spin_unlock_irqrestore(&phba->hbalock, iflag);
8936 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8937 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8938 "cannot issue Data: x%x x%x\n",
8939 mboxq->vport ? mboxq->vport->vpi : 0,
8940 mboxq->u.mb.mbxCommand,
8941 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8942 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8943 psli->sli_flag, MBX_POLL);
8944 return MBXERR_ERROR;
8946 /* The server grabs the token and owns it until release */
8947 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8948 phba->sli.mbox_active = mboxq;
8949 spin_unlock_irqrestore(&phba->hbalock, iflag);
8951 /* wait for bootstrap mbox register for readyness */
8952 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8956 * Initialize the bootstrap memory region to avoid stale data areas
8957 * in the mailbox post. Then copy the caller's mailbox contents to
8958 * the bmbx mailbox region.
8960 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8961 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8962 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8963 sizeof(struct lpfc_mqe));
8965 /* Post the high mailbox dma address to the port and wait for ready. */
8966 dma_address = &phba->sli4_hba.bmbx.dma_address;
8967 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8969 /* wait for bootstrap mbox register for hi-address write done */
8970 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8974 /* Post the low mailbox dma address to the port. */
8975 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8977 /* wait for bootstrap mbox register for low address write done */
8978 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8983 * Read the CQ to ensure the mailbox has completed.
8984 * If so, update the mailbox status so that the upper layers
8985 * can complete the request normally.
8987 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8988 sizeof(struct lpfc_mqe));
8989 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8990 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8991 sizeof(struct lpfc_mcqe));
8992 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8994 * When the CQE status indicates a failure and the mailbox status
8995 * indicates success then copy the CQE status into the mailbox status
8996 * (and prefix it with x4000).
8998 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8999 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
9000 bf_set(lpfc_mqe_status, mb,
9001 (LPFC_MBX_ERROR_RANGE | mcqe_status));
9004 lpfc_sli4_swap_str(phba, mboxq);
9006 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9007 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
9008 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
9009 " x%x x%x CQ: x%x x%x x%x x%x\n",
9010 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9011 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9012 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9013 bf_get(lpfc_mqe_status, mb),
9014 mb->un.mb_words[0], mb->un.mb_words[1],
9015 mb->un.mb_words[2], mb->un.mb_words[3],
9016 mb->un.mb_words[4], mb->un.mb_words[5],
9017 mb->un.mb_words[6], mb->un.mb_words[7],
9018 mb->un.mb_words[8], mb->un.mb_words[9],
9019 mb->un.mb_words[10], mb->un.mb_words[11],
9020 mb->un.mb_words[12], mboxq->mcqe.word0,
9021 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
9022 mboxq->mcqe.trailer);
9024 /* We are holding the token, no needed for lock when release */
9025 spin_lock_irqsave(&phba->hbalock, iflag);
9026 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9027 phba->sli.mbox_active = NULL;
9028 spin_unlock_irqrestore(&phba->hbalock, iflag);
9033 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
9034 * @phba: Pointer to HBA context object.
9035 * @mboxq: Pointer to mailbox object.
9036 * @flag: Flag indicating how the mailbox need to be processed.
9038 * This function is called by discovery code and HBA management code to submit
9039 * a mailbox command to firmware with SLI-4 interface spec.
9041 * Return codes the caller owns the mailbox command after the return of the
9045 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
9048 struct lpfc_sli *psli = &phba->sli;
9049 unsigned long iflags;
9052 /* dump from issue mailbox command if setup */
9053 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
9055 rc = lpfc_mbox_dev_check(phba);
9057 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9058 "(%d):2544 Mailbox command x%x (x%x/x%x) "
9059 "cannot issue Data: x%x x%x\n",
9060 mboxq->vport ? mboxq->vport->vpi : 0,
9061 mboxq->u.mb.mbxCommand,
9062 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9063 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9064 psli->sli_flag, flag);
9065 goto out_not_finished;
9068 /* Detect polling mode and jump to a handler */
9069 if (!phba->sli4_hba.intr_enable) {
9070 if (flag == MBX_POLL)
9071 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9074 if (rc != MBX_SUCCESS)
9075 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9076 "(%d):2541 Mailbox command x%x "
9077 "(x%x/x%x) failure: "
9078 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9080 mboxq->vport ? mboxq->vport->vpi : 0,
9081 mboxq->u.mb.mbxCommand,
9082 lpfc_sli_config_mbox_subsys_get(phba,
9084 lpfc_sli_config_mbox_opcode_get(phba,
9086 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9087 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9088 bf_get(lpfc_mcqe_ext_status,
9090 psli->sli_flag, flag);
9092 } else if (flag == MBX_POLL) {
9093 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9094 "(%d):2542 Try to issue mailbox command "
9095 "x%x (x%x/x%x) synchronously ahead of async "
9096 "mailbox command queue: x%x x%x\n",
9097 mboxq->vport ? mboxq->vport->vpi : 0,
9098 mboxq->u.mb.mbxCommand,
9099 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9100 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9101 psli->sli_flag, flag);
9102 /* Try to block the asynchronous mailbox posting */
9103 rc = lpfc_sli4_async_mbox_block(phba);
9105 /* Successfully blocked, now issue sync mbox cmd */
9106 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9107 if (rc != MBX_SUCCESS)
9108 lpfc_printf_log(phba, KERN_WARNING,
9110 "(%d):2597 Sync Mailbox command "
9111 "x%x (x%x/x%x) failure: "
9112 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9114 mboxq->vport ? mboxq->vport->vpi : 0,
9115 mboxq->u.mb.mbxCommand,
9116 lpfc_sli_config_mbox_subsys_get(phba,
9118 lpfc_sli_config_mbox_opcode_get(phba,
9120 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9121 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9122 bf_get(lpfc_mcqe_ext_status,
9124 psli->sli_flag, flag);
9125 /* Unblock the async mailbox posting afterward */
9126 lpfc_sli4_async_mbox_unblock(phba);
9131 /* Now, interrupt mode asynchronous mailbox command */
9132 rc = lpfc_mbox_cmd_check(phba, mboxq);
9134 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9135 "(%d):2543 Mailbox command x%x (x%x/x%x) "
9136 "cannot issue Data: x%x x%x\n",
9137 mboxq->vport ? mboxq->vport->vpi : 0,
9138 mboxq->u.mb.mbxCommand,
9139 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9140 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9141 psli->sli_flag, flag);
9142 goto out_not_finished;
9145 /* Put the mailbox command to the driver internal FIFO */
9146 psli->slistat.mbox_busy++;
9147 spin_lock_irqsave(&phba->hbalock, iflags);
9148 lpfc_mbox_put(phba, mboxq);
9149 spin_unlock_irqrestore(&phba->hbalock, iflags);
9150 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9151 "(%d):0354 Mbox cmd issue - Enqueue Data: "
9152 "x%x (x%x/x%x) x%x x%x x%x\n",
9153 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
9154 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
9155 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9156 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9157 phba->pport->port_state,
9158 psli->sli_flag, MBX_NOWAIT);
9159 /* Wake up worker thread to transport mailbox command from head */
9160 lpfc_worker_wake_up(phba);
9165 return MBX_NOT_FINISHED;
9169 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
9170 * @phba: Pointer to HBA context object.
9172 * This function is called by worker thread to send a mailbox command to
9173 * SLI4 HBA firmware.
9177 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
9179 struct lpfc_sli *psli = &phba->sli;
9180 LPFC_MBOXQ_t *mboxq;
9181 int rc = MBX_SUCCESS;
9182 unsigned long iflags;
9183 struct lpfc_mqe *mqe;
9186 /* Check interrupt mode before post async mailbox command */
9187 if (unlikely(!phba->sli4_hba.intr_enable))
9188 return MBX_NOT_FINISHED;
9190 /* Check for mailbox command service token */
9191 spin_lock_irqsave(&phba->hbalock, iflags);
9192 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9193 spin_unlock_irqrestore(&phba->hbalock, iflags);
9194 return MBX_NOT_FINISHED;
9196 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9197 spin_unlock_irqrestore(&phba->hbalock, iflags);
9198 return MBX_NOT_FINISHED;
9200 if (unlikely(phba->sli.mbox_active)) {
9201 spin_unlock_irqrestore(&phba->hbalock, iflags);
9202 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9203 "0384 There is pending active mailbox cmd\n");
9204 return MBX_NOT_FINISHED;
9206 /* Take the mailbox command service token */
9207 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9209 /* Get the next mailbox command from head of queue */
9210 mboxq = lpfc_mbox_get(phba);
9212 /* If no more mailbox command waiting for post, we're done */
9214 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9215 spin_unlock_irqrestore(&phba->hbalock, iflags);
9218 phba->sli.mbox_active = mboxq;
9219 spin_unlock_irqrestore(&phba->hbalock, iflags);
9221 /* Check device readiness for posting mailbox command */
9222 rc = lpfc_mbox_dev_check(phba);
9224 /* Driver clean routine will clean up pending mailbox */
9225 goto out_not_finished;
9227 /* Prepare the mbox command to be posted */
9228 mqe = &mboxq->u.mqe;
9229 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
9231 /* Start timer for the mbox_tmo and log some mailbox post messages */
9232 mod_timer(&psli->mbox_tmo, (jiffies +
9233 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
9235 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9236 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
9238 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9239 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9240 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9241 phba->pport->port_state, psli->sli_flag);
9243 if (mbx_cmnd != MBX_HEARTBEAT) {
9245 lpfc_debugfs_disc_trc(mboxq->vport,
9246 LPFC_DISC_TRC_MBOX_VPORT,
9247 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9248 mbx_cmnd, mqe->un.mb_words[0],
9249 mqe->un.mb_words[1]);
9251 lpfc_debugfs_disc_trc(phba->pport,
9253 "MBOX Send: cmd:x%x mb:x%x x%x",
9254 mbx_cmnd, mqe->un.mb_words[0],
9255 mqe->un.mb_words[1]);
9258 psli->slistat.mbox_cmd++;
9260 /* Post the mailbox command to the port */
9261 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
9262 if (rc != MBX_SUCCESS) {
9263 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9264 "(%d):2533 Mailbox command x%x (x%x/x%x) "
9265 "cannot issue Data: x%x x%x\n",
9266 mboxq->vport ? mboxq->vport->vpi : 0,
9267 mboxq->u.mb.mbxCommand,
9268 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9269 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9270 psli->sli_flag, MBX_NOWAIT);
9271 goto out_not_finished;
9277 spin_lock_irqsave(&phba->hbalock, iflags);
9278 if (phba->sli.mbox_active) {
9279 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
9280 __lpfc_mbox_cmpl_put(phba, mboxq);
9281 /* Release the token */
9282 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9283 phba->sli.mbox_active = NULL;
9285 spin_unlock_irqrestore(&phba->hbalock, iflags);
9287 return MBX_NOT_FINISHED;
9291 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
9292 * @phba: Pointer to HBA context object.
9293 * @pmbox: Pointer to mailbox object.
9294 * @flag: Flag indicating how the mailbox need to be processed.
9296 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
9297 * the API jump table function pointer from the lpfc_hba struct.
9299 * Return codes the caller owns the mailbox command after the return of the
9303 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
9305 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
9309 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
9310 * @phba: The hba struct for which this call is being executed.
9311 * @dev_grp: The HBA PCI-Device group number.
9313 * This routine sets up the mbox interface API function jump table in @phba
9315 * Returns: 0 - success, -ENODEV - failure.
9318 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9322 case LPFC_PCI_DEV_LP:
9323 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
9324 phba->lpfc_sli_handle_slow_ring_event =
9325 lpfc_sli_handle_slow_ring_event_s3;
9326 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
9327 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
9328 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
9330 case LPFC_PCI_DEV_OC:
9331 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
9332 phba->lpfc_sli_handle_slow_ring_event =
9333 lpfc_sli_handle_slow_ring_event_s4;
9334 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
9335 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
9336 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
9339 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9340 "1420 Invalid HBA PCI-device group: 0x%x\n",
9348 * __lpfc_sli_ringtx_put - Add an iocb to the txq
9349 * @phba: Pointer to HBA context object.
9350 * @pring: Pointer to driver SLI ring object.
9351 * @piocb: Pointer to address of newly added command iocb.
9353 * This function is called with hbalock held for SLI3 ports or
9354 * the ring lock held for SLI4 ports to add a command
9355 * iocb to the txq when SLI layer cannot submit the command iocb
9359 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9360 struct lpfc_iocbq *piocb)
9362 if (phba->sli_rev == LPFC_SLI_REV4)
9363 lockdep_assert_held(&pring->ring_lock);
9365 lockdep_assert_held(&phba->hbalock);
9366 /* Insert the caller's iocb in the txq tail for later processing. */
9367 list_add_tail(&piocb->list, &pring->txq);
9371 * lpfc_sli_next_iocb - Get the next iocb in the txq
9372 * @phba: Pointer to HBA context object.
9373 * @pring: Pointer to driver SLI ring object.
9374 * @piocb: Pointer to address of newly added command iocb.
9376 * This function is called with hbalock held before a new
9377 * iocb is submitted to the firmware. This function checks
9378 * txq to flush the iocbs in txq to Firmware before
9379 * submitting new iocbs to the Firmware.
9380 * If there are iocbs in the txq which need to be submitted
9381 * to firmware, lpfc_sli_next_iocb returns the first element
9382 * of the txq after dequeuing it from txq.
9383 * If there is no iocb in the txq then the function will return
9384 * *piocb and *piocb is set to NULL. Caller needs to check
9385 * *piocb to find if there are more commands in the txq.
9387 static struct lpfc_iocbq *
9388 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9389 struct lpfc_iocbq **piocb)
9391 struct lpfc_iocbq * nextiocb;
9393 lockdep_assert_held(&phba->hbalock);
9395 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9405 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
9406 * @phba: Pointer to HBA context object.
9407 * @ring_number: SLI ring number to issue iocb on.
9408 * @piocb: Pointer to command iocb.
9409 * @flag: Flag indicating if this command can be put into txq.
9411 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9412 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9413 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9414 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9415 * this function allows only iocbs for posting buffers. This function finds
9416 * next available slot in the command ring and posts the command to the
9417 * available slot and writes the port attention register to request HBA start
9418 * processing new iocb. If there is no slot available in the ring and
9419 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9420 * the function returns IOCB_BUSY.
9422 * This function is called with hbalock held. The function will return success
9423 * after it successfully submit the iocb to firmware or after adding to the
9427 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9428 struct lpfc_iocbq *piocb, uint32_t flag)
9430 struct lpfc_iocbq *nextiocb;
9432 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9434 lockdep_assert_held(&phba->hbalock);
9436 if (piocb->iocb_cmpl && (!piocb->vport) &&
9437 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9438 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9439 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9440 "1807 IOCB x%x failed. No vport\n",
9441 piocb->iocb.ulpCommand);
9447 /* If the PCI channel is in offline state, do not post iocbs. */
9448 if (unlikely(pci_channel_offline(phba->pcidev)))
9451 /* If HBA has a deferred error attention, fail the iocb. */
9452 if (unlikely(phba->hba_flag & DEFER_ERATT))
9456 * We should never get an IOCB if we are in a < LINK_DOWN state
9458 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9462 * Check to see if we are blocking IOCB processing because of a
9463 * outstanding event.
9465 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9468 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9470 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
9471 * can be issued if the link is not up.
9473 switch (piocb->iocb.ulpCommand) {
9474 case CMD_GEN_REQUEST64_CR:
9475 case CMD_GEN_REQUEST64_CX:
9476 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9477 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9478 FC_RCTL_DD_UNSOL_CMD) ||
9479 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9480 MENLO_TRANSPORT_TYPE))
9484 case CMD_QUE_RING_BUF_CN:
9485 case CMD_QUE_RING_BUF64_CN:
9487 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9488 * completion, iocb_cmpl MUST be 0.
9490 if (piocb->iocb_cmpl)
9491 piocb->iocb_cmpl = NULL;
9493 case CMD_CREATE_XRI_CR:
9494 case CMD_CLOSE_XRI_CN:
9495 case CMD_CLOSE_XRI_CX:
9502 * For FCP commands, we must be in a state where we can process link
9505 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9506 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9510 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9511 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9512 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9515 lpfc_sli_update_ring(phba, pring);
9517 lpfc_sli_update_full_ring(phba, pring);
9520 return IOCB_SUCCESS;
9525 pring->stats.iocb_cmd_delay++;
9529 if (!(flag & SLI_IOCB_RET_IOCB)) {
9530 __lpfc_sli_ringtx_put(phba, pring, piocb);
9531 return IOCB_SUCCESS;
9538 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9539 * @phba: Pointer to HBA context object.
9540 * @piocbq: Pointer to command iocb.
9541 * @sglq: Pointer to the scatter gather queue object.
9543 * This routine converts the bpl or bde that is in the IOCB
9544 * to a sgl list for the sli4 hardware. The physical address
9545 * of the bpl/bde is converted back to a virtual address.
9546 * If the IOCB contains a BPL then the list of BDE's is
9547 * converted to sli4_sge's. If the IOCB contains a single
9548 * BDE then it is converted to a single sli_sge.
9549 * The IOCB is still in cpu endianess so the contents of
9550 * the bpl can be used without byte swapping.
9552 * Returns valid XRI = Success, NO_XRI = Failure.
9555 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9556 struct lpfc_sglq *sglq)
9558 uint16_t xritag = NO_XRI;
9559 struct ulp_bde64 *bpl = NULL;
9560 struct ulp_bde64 bde;
9561 struct sli4_sge *sgl = NULL;
9562 struct lpfc_dmabuf *dmabuf;
9566 uint32_t offset = 0; /* accumulated offset in the sg request list */
9567 int inbound = 0; /* number of sg reply entries inbound from firmware */
9569 if (!piocbq || !sglq)
9572 sgl = (struct sli4_sge *)sglq->sgl;
9573 icmd = &piocbq->iocb;
9574 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9575 return sglq->sli4_xritag;
9576 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9577 numBdes = icmd->un.genreq64.bdl.bdeSize /
9578 sizeof(struct ulp_bde64);
9579 /* The addrHigh and addrLow fields within the IOCB
9580 * have not been byteswapped yet so there is no
9581 * need to swap them back.
9583 if (piocbq->context3)
9584 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9588 bpl = (struct ulp_bde64 *)dmabuf->virt;
9592 for (i = 0; i < numBdes; i++) {
9593 /* Should already be byte swapped. */
9594 sgl->addr_hi = bpl->addrHigh;
9595 sgl->addr_lo = bpl->addrLow;
9597 sgl->word2 = le32_to_cpu(sgl->word2);
9598 if ((i+1) == numBdes)
9599 bf_set(lpfc_sli4_sge_last, sgl, 1);
9601 bf_set(lpfc_sli4_sge_last, sgl, 0);
9602 /* swap the size field back to the cpu so we
9603 * can assign it to the sgl.
9605 bde.tus.w = le32_to_cpu(bpl->tus.w);
9606 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9607 /* The offsets in the sgl need to be accumulated
9608 * separately for the request and reply lists.
9609 * The request is always first, the reply follows.
9611 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9612 /* add up the reply sg entries */
9613 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9615 /* first inbound? reset the offset */
9618 bf_set(lpfc_sli4_sge_offset, sgl, offset);
9619 bf_set(lpfc_sli4_sge_type, sgl,
9620 LPFC_SGE_TYPE_DATA);
9621 offset += bde.tus.f.bdeSize;
9623 sgl->word2 = cpu_to_le32(sgl->word2);
9627 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9628 /* The addrHigh and addrLow fields of the BDE have not
9629 * been byteswapped yet so they need to be swapped
9630 * before putting them in the sgl.
9633 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9635 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9636 sgl->word2 = le32_to_cpu(sgl->word2);
9637 bf_set(lpfc_sli4_sge_last, sgl, 1);
9638 sgl->word2 = cpu_to_le32(sgl->word2);
9640 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9642 return sglq->sli4_xritag;
9646 * lpfc_sli4_iocb2wqe - Convert the IOCB to a work queue entry.
9647 * @phba: Pointer to HBA context object.
9648 * @iocbq: Pointer to command iocb.
9649 * @wqe: Pointer to the work queue entry.
9651 * This routine converts the iocb command to its Work Queue Entry
9652 * equivalent. The wqe pointer should not have any fields set when
9653 * this routine is called because it will memcpy over them.
9654 * This routine does not set the CQ_ID or the WQEC bits in the
9657 * Returns: 0 = Success, IOCB_ERROR = Failure.
9660 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9661 union lpfc_wqe128 *wqe)
9663 uint32_t xmit_len = 0, total_len = 0;
9667 uint8_t command_type = ELS_COMMAND_NON_FIP;
9670 uint16_t abrt_iotag;
9671 struct lpfc_iocbq *abrtiocbq;
9672 struct ulp_bde64 *bpl = NULL;
9673 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9675 struct ulp_bde64 bde;
9676 struct lpfc_nodelist *ndlp;
9680 fip = phba->hba_flag & HBA_FIP_SUPPORT;
9681 /* The fcp commands will set command type */
9682 if (iocbq->iocb_flag & LPFC_IO_FCP)
9683 command_type = FCP_COMMAND;
9684 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9685 command_type = ELS_COMMAND_FIP;
9687 command_type = ELS_COMMAND_NON_FIP;
9689 if (phba->fcp_embed_io)
9690 memset(wqe, 0, sizeof(union lpfc_wqe128));
9691 /* Some of the fields are in the right position already */
9692 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9693 /* The ct field has moved so reset */
9694 wqe->generic.wqe_com.word7 = 0;
9695 wqe->generic.wqe_com.word10 = 0;
9697 abort_tag = (uint32_t) iocbq->iotag;
9698 xritag = iocbq->sli4_xritag;
9699 /* words0-2 bpl convert bde */
9700 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9701 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9702 sizeof(struct ulp_bde64);
9703 bpl = (struct ulp_bde64 *)
9704 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9708 /* Should already be byte swapped. */
9709 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9710 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9711 /* swap the size field back to the cpu so we
9712 * can assign it to the sgl.
9714 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
9715 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9717 for (i = 0; i < numBdes; i++) {
9718 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9719 total_len += bde.tus.f.bdeSize;
9722 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9724 iocbq->iocb.ulpIoTag = iocbq->iotag;
9725 cmnd = iocbq->iocb.ulpCommand;
9727 switch (iocbq->iocb.ulpCommand) {
9728 case CMD_ELS_REQUEST64_CR:
9729 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9730 ndlp = iocbq->context_un.ndlp;
9732 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9733 if (!iocbq->iocb.ulpLe) {
9734 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9735 "2007 Only Limited Edition cmd Format"
9736 " supported 0x%x\n",
9737 iocbq->iocb.ulpCommand);
9741 wqe->els_req.payload_len = xmit_len;
9742 /* Els_reguest64 has a TMO */
9743 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9744 iocbq->iocb.ulpTimeout);
9745 /* Need a VF for word 4 set the vf bit*/
9746 bf_set(els_req64_vf, &wqe->els_req, 0);
9747 /* And a VFID for word 12 */
9748 bf_set(els_req64_vfid, &wqe->els_req, 0);
9749 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9750 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9751 iocbq->iocb.ulpContext);
9752 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9753 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9754 /* CCP CCPE PV PRI in word10 were set in the memcpy */
9755 if (command_type == ELS_COMMAND_FIP)
9756 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9757 >> LPFC_FIP_ELS_ID_SHIFT);
9758 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9759 iocbq->context2)->virt);
9760 if_type = bf_get(lpfc_sli_intf_if_type,
9761 &phba->sli4_hba.sli_intf);
9762 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9763 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9764 *pcmd == ELS_CMD_SCR ||
9765 *pcmd == ELS_CMD_RDF ||
9766 *pcmd == ELS_CMD_RSCN_XMT ||
9767 *pcmd == ELS_CMD_FDISC ||
9768 *pcmd == ELS_CMD_LOGO ||
9769 *pcmd == ELS_CMD_QFPA ||
9770 *pcmd == ELS_CMD_UVEM ||
9771 *pcmd == ELS_CMD_PLOGI)) {
9772 bf_set(els_req64_sp, &wqe->els_req, 1);
9773 bf_set(els_req64_sid, &wqe->els_req,
9774 iocbq->vport->fc_myDID);
9775 if ((*pcmd == ELS_CMD_FLOGI) &&
9776 !(phba->fc_topology ==
9777 LPFC_TOPOLOGY_LOOP))
9778 bf_set(els_req64_sid, &wqe->els_req, 0);
9779 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9780 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9781 phba->vpi_ids[iocbq->vport->vpi]);
9782 } else if (pcmd && iocbq->context1) {
9783 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9784 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9785 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9788 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9789 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9790 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9791 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9792 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9793 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9794 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9795 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9796 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9798 case CMD_XMIT_SEQUENCE64_CX:
9799 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9800 iocbq->iocb.un.ulpWord[3]);
9801 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9802 iocbq->iocb.unsli3.rcvsli3.ox_id);
9803 /* The entire sequence is transmitted for this IOCB */
9804 xmit_len = total_len;
9805 cmnd = CMD_XMIT_SEQUENCE64_CR;
9806 if (phba->link_flag & LS_LOOPBACK_MODE)
9807 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9809 case CMD_XMIT_SEQUENCE64_CR:
9810 /* word3 iocb=io_tag32 wqe=reserved */
9811 wqe->xmit_sequence.rsvd3 = 0;
9812 /* word4 relative_offset memcpy */
9813 /* word5 r_ctl/df_ctl memcpy */
9814 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9815 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9816 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9817 LPFC_WQE_IOD_WRITE);
9818 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9819 LPFC_WQE_LENLOC_WORD12);
9820 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9821 wqe->xmit_sequence.xmit_len = xmit_len;
9822 command_type = OTHER_COMMAND;
9824 case CMD_XMIT_BCAST64_CN:
9825 /* word3 iocb=iotag32 wqe=seq_payload_len */
9826 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9827 /* word4 iocb=rsvd wqe=rsvd */
9828 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9829 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9830 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9831 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9832 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9833 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9834 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9835 LPFC_WQE_LENLOC_WORD3);
9836 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9838 case CMD_FCP_IWRITE64_CR:
9839 command_type = FCP_COMMAND_DATA_OUT;
9840 /* word3 iocb=iotag wqe=payload_offset_len */
9841 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9842 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9843 xmit_len + sizeof(struct fcp_rsp));
9844 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9846 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9847 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9848 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9849 iocbq->iocb.ulpFCP2Rcvy);
9850 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9851 /* Always open the exchange */
9852 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9853 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9854 LPFC_WQE_LENLOC_WORD4);
9855 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9856 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9857 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9858 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9859 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9860 if (iocbq->priority) {
9861 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9862 (iocbq->priority << 1));
9864 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9865 (phba->cfg_XLanePriority << 1));
9868 /* Note, word 10 is already initialized to 0 */
9870 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9871 if (phba->cfg_enable_pbde)
9872 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9874 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9876 if (phba->fcp_embed_io) {
9877 struct lpfc_io_buf *lpfc_cmd;
9878 struct sli4_sge *sgl;
9879 struct fcp_cmnd *fcp_cmnd;
9882 /* 128 byte wqe support here */
9884 lpfc_cmd = iocbq->context1;
9885 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9886 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9888 /* Word 0-2 - FCP_CMND */
9889 wqe->generic.bde.tus.f.bdeFlags =
9890 BUFF_TYPE_BDE_IMMED;
9891 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9892 wqe->generic.bde.addrHigh = 0;
9893 wqe->generic.bde.addrLow = 88; /* Word 22 */
9895 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9896 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9898 /* Word 22-29 FCP CMND Payload */
9899 ptr = &wqe->words[22];
9900 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9903 case CMD_FCP_IREAD64_CR:
9904 /* word3 iocb=iotag wqe=payload_offset_len */
9905 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9906 bf_set(payload_offset_len, &wqe->fcp_iread,
9907 xmit_len + sizeof(struct fcp_rsp));
9908 bf_set(cmd_buff_len, &wqe->fcp_iread,
9910 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9911 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9912 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9913 iocbq->iocb.ulpFCP2Rcvy);
9914 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9915 /* Always open the exchange */
9916 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9917 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9918 LPFC_WQE_LENLOC_WORD4);
9919 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9920 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9921 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9922 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9923 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9924 if (iocbq->priority) {
9925 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9926 (iocbq->priority << 1));
9928 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9929 (phba->cfg_XLanePriority << 1));
9932 /* Note, word 10 is already initialized to 0 */
9934 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9935 if (phba->cfg_enable_pbde)
9936 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9938 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9940 if (phba->fcp_embed_io) {
9941 struct lpfc_io_buf *lpfc_cmd;
9942 struct sli4_sge *sgl;
9943 struct fcp_cmnd *fcp_cmnd;
9946 /* 128 byte wqe support here */
9948 lpfc_cmd = iocbq->context1;
9949 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9950 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9952 /* Word 0-2 - FCP_CMND */
9953 wqe->generic.bde.tus.f.bdeFlags =
9954 BUFF_TYPE_BDE_IMMED;
9955 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9956 wqe->generic.bde.addrHigh = 0;
9957 wqe->generic.bde.addrLow = 88; /* Word 22 */
9959 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9960 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9962 /* Word 22-29 FCP CMND Payload */
9963 ptr = &wqe->words[22];
9964 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9967 case CMD_FCP_ICMND64_CR:
9968 /* word3 iocb=iotag wqe=payload_offset_len */
9969 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9970 bf_set(payload_offset_len, &wqe->fcp_icmd,
9971 xmit_len + sizeof(struct fcp_rsp));
9972 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9974 /* word3 iocb=IO_TAG wqe=reserved */
9975 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9976 /* Always open the exchange */
9977 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9978 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9979 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9980 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9981 LPFC_WQE_LENLOC_NONE);
9982 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9983 iocbq->iocb.ulpFCP2Rcvy);
9984 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9985 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9986 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9987 if (iocbq->priority) {
9988 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9989 (iocbq->priority << 1));
9991 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9992 (phba->cfg_XLanePriority << 1));
9995 /* Note, word 10 is already initialized to 0 */
9997 if (phba->fcp_embed_io) {
9998 struct lpfc_io_buf *lpfc_cmd;
9999 struct sli4_sge *sgl;
10000 struct fcp_cmnd *fcp_cmnd;
10003 /* 128 byte wqe support here */
10005 lpfc_cmd = iocbq->context1;
10006 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10007 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10009 /* Word 0-2 - FCP_CMND */
10010 wqe->generic.bde.tus.f.bdeFlags =
10011 BUFF_TYPE_BDE_IMMED;
10012 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10013 wqe->generic.bde.addrHigh = 0;
10014 wqe->generic.bde.addrLow = 88; /* Word 22 */
10016 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
10017 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
10019 /* Word 22-29 FCP CMND Payload */
10020 ptr = &wqe->words[22];
10021 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10024 case CMD_GEN_REQUEST64_CR:
10025 /* For this command calculate the xmit length of the
10029 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
10030 sizeof(struct ulp_bde64);
10031 for (i = 0; i < numBdes; i++) {
10032 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
10033 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
10035 xmit_len += bde.tus.f.bdeSize;
10037 /* word3 iocb=IO_TAG wqe=request_payload_len */
10038 wqe->gen_req.request_payload_len = xmit_len;
10039 /* word4 iocb=parameter wqe=relative_offset memcpy */
10040 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
10041 /* word6 context tag copied in memcpy */
10042 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
10043 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
10044 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10045 "2015 Invalid CT %x command 0x%x\n",
10046 ct, iocbq->iocb.ulpCommand);
10049 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
10050 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
10051 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
10052 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
10053 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
10054 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
10055 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
10056 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
10057 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
10058 command_type = OTHER_COMMAND;
10060 case CMD_XMIT_ELS_RSP64_CX:
10061 ndlp = (struct lpfc_nodelist *)iocbq->context1;
10062 /* words0-2 BDE memcpy */
10063 /* word3 iocb=iotag32 wqe=response_payload_len */
10064 wqe->xmit_els_rsp.response_payload_len = xmit_len;
10066 wqe->xmit_els_rsp.word4 = 0;
10067 /* word5 iocb=rsvd wge=did */
10068 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
10069 iocbq->iocb.un.xseq64.xmit_els_remoteID);
10071 if_type = bf_get(lpfc_sli_intf_if_type,
10072 &phba->sli4_hba.sli_intf);
10073 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10074 if (iocbq->vport->fc_flag & FC_PT2PT) {
10075 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
10076 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
10077 iocbq->vport->fc_myDID);
10078 if (iocbq->vport->fc_myDID == Fabric_DID) {
10079 bf_set(wqe_els_did,
10080 &wqe->xmit_els_rsp.wqe_dest, 0);
10084 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
10085 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10086 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
10087 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
10088 iocbq->iocb.unsli3.rcvsli3.ox_id);
10089 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
10090 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
10091 phba->vpi_ids[iocbq->vport->vpi]);
10092 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
10093 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
10094 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
10095 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
10096 LPFC_WQE_LENLOC_WORD3);
10097 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
10098 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
10099 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
10100 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
10101 iocbq->context2)->virt);
10102 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
10103 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
10104 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
10105 iocbq->vport->fc_myDID);
10106 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
10107 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
10108 phba->vpi_ids[phba->pport->vpi]);
10110 command_type = OTHER_COMMAND;
10112 case CMD_CLOSE_XRI_CN:
10113 case CMD_ABORT_XRI_CN:
10114 case CMD_ABORT_XRI_CX:
10115 /* words 0-2 memcpy should be 0 rserved */
10116 /* port will send abts */
10117 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
10118 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
10119 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
10120 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
10124 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
10126 * The link is down, or the command was ELS_FIP
10127 * so the fw does not need to send abts
10130 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
10132 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
10133 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
10134 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
10135 wqe->abort_cmd.rsrvd5 = 0;
10136 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
10137 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10138 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
10140 * The abort handler will send us CMD_ABORT_XRI_CN or
10141 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
10143 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
10144 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
10145 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
10146 LPFC_WQE_LENLOC_NONE);
10147 cmnd = CMD_ABORT_XRI_CX;
10148 command_type = OTHER_COMMAND;
10151 case CMD_XMIT_BLS_RSP64_CX:
10152 ndlp = (struct lpfc_nodelist *)iocbq->context1;
10153 /* As BLS ABTS RSP WQE is very different from other WQEs,
10154 * we re-construct this WQE here based on information in
10155 * iocbq from scratch.
10157 memset(wqe, 0, sizeof(*wqe));
10158 /* OX_ID is invariable to who sent ABTS to CT exchange */
10159 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
10160 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
10161 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
10162 LPFC_ABTS_UNSOL_INT) {
10163 /* ABTS sent by initiator to CT exchange, the
10164 * RX_ID field will be filled with the newly
10165 * allocated responder XRI.
10167 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10168 iocbq->sli4_xritag);
10170 /* ABTS sent by responder to CT exchange, the
10171 * RX_ID field will be filled with the responder
10174 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10175 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
10177 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
10178 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
10181 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
10183 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
10184 iocbq->iocb.ulpContext);
10185 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
10186 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
10187 phba->vpi_ids[phba->pport->vpi]);
10188 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
10189 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
10190 LPFC_WQE_LENLOC_NONE);
10191 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
10192 command_type = OTHER_COMMAND;
10193 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
10194 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
10195 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
10196 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
10197 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
10198 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
10199 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
10203 case CMD_SEND_FRAME:
10204 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
10205 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
10206 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
10207 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
10208 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
10209 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10210 bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
10211 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
10212 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10213 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10214 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10216 case CMD_XRI_ABORTED_CX:
10217 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
10218 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
10219 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
10220 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
10221 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
10223 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10224 "2014 Invalid command 0x%x\n",
10225 iocbq->iocb.ulpCommand);
10229 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
10230 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
10231 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
10232 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
10233 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
10234 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
10235 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
10236 LPFC_IO_DIF_INSERT);
10237 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10238 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10239 wqe->generic.wqe_com.abort_tag = abort_tag;
10240 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
10241 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
10242 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
10243 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10248 * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
10249 * @phba: Pointer to HBA context object.
10250 * @ring_number: SLI ring number to issue wqe on.
10251 * @piocb: Pointer to command iocb.
10252 * @flag: Flag indicating if this command can be put into txq.
10254 * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
10255 * send an iocb command to an HBA with SLI-4 interface spec.
10257 * This function takes the hbalock before invoking the lockless version.
10258 * The function will return success after it successfully submit the wqe to
10259 * firmware or after adding to the txq.
10262 __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10263 struct lpfc_iocbq *piocb, uint32_t flag)
10265 unsigned long iflags;
10268 spin_lock_irqsave(&phba->hbalock, iflags);
10269 rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
10270 spin_unlock_irqrestore(&phba->hbalock, iflags);
10276 * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
10277 * @phba: Pointer to HBA context object.
10278 * @ring_number: SLI ring number to issue wqe on.
10279 * @piocb: Pointer to command iocb.
10280 * @flag: Flag indicating if this command can be put into txq.
10282 * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
10283 * an wqe command to an HBA with SLI-4 interface spec.
10285 * This function is a lockless version. The function will return success
10286 * after it successfully submit the wqe to firmware or after adding to the
10290 __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10291 struct lpfc_iocbq *piocb, uint32_t flag)
10294 struct lpfc_io_buf *lpfc_cmd =
10295 (struct lpfc_io_buf *)piocb->context1;
10296 union lpfc_wqe128 *wqe = &piocb->wqe;
10297 struct sli4_sge *sgl;
10299 /* 128 byte wqe support here */
10300 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10302 if (phba->fcp_embed_io) {
10303 struct fcp_cmnd *fcp_cmnd;
10306 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10308 /* Word 0-2 - FCP_CMND */
10309 wqe->generic.bde.tus.f.bdeFlags =
10310 BUFF_TYPE_BDE_IMMED;
10311 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10312 wqe->generic.bde.addrHigh = 0;
10313 wqe->generic.bde.addrLow = 88; /* Word 22 */
10315 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10316 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10318 /* Word 22-29 FCP CMND Payload */
10319 ptr = &wqe->words[22];
10320 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10322 /* Word 0-2 - Inline BDE */
10323 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
10324 wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
10325 wqe->generic.bde.addrHigh = sgl->addr_hi;
10326 wqe->generic.bde.addrLow = sgl->addr_lo;
10329 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10330 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
10333 /* add the VMID tags as per switch response */
10334 if (unlikely(piocb->iocb_flag & LPFC_IO_VMID)) {
10335 if (phba->pport->vmid_priority_tagging) {
10336 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
10337 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10338 (piocb->vmid_tag.cs_ctl_vmid));
10340 bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
10341 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10342 wqe->words[31] = piocb->vmid_tag.app_id;
10345 rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
10350 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10351 * @phba: Pointer to HBA context object.
10352 * @ring_number: SLI ring number to issue iocb on.
10353 * @piocb: Pointer to command iocb.
10354 * @flag: Flag indicating if this command can be put into txq.
10356 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10357 * an iocb command to an HBA with SLI-4 interface spec.
10359 * This function is called with ringlock held. The function will return success
10360 * after it successfully submit the iocb to firmware or after adding to the
10364 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10365 struct lpfc_iocbq *piocb, uint32_t flag)
10367 struct lpfc_sglq *sglq;
10368 union lpfc_wqe128 wqe;
10369 struct lpfc_queue *wq;
10370 struct lpfc_sli_ring *pring;
10373 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
10374 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10375 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10377 wq = phba->sli4_hba.els_wq;
10380 /* Get corresponding ring */
10384 * The WQE can be either 64 or 128 bytes,
10387 lockdep_assert_held(&pring->ring_lock);
10389 if (piocb->sli4_xritag == NO_XRI) {
10390 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10391 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
10394 if (!list_empty(&pring->txq)) {
10395 if (!(flag & SLI_IOCB_RET_IOCB)) {
10396 __lpfc_sli_ringtx_put(phba,
10398 return IOCB_SUCCESS;
10403 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10405 if (!(flag & SLI_IOCB_RET_IOCB)) {
10406 __lpfc_sli_ringtx_put(phba,
10409 return IOCB_SUCCESS;
10415 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
10416 /* These IO's already have an XRI and a mapped sgl. */
10421 * This is a continuation of a commandi,(CX) so this
10422 * sglq is on the active list
10424 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10430 piocb->sli4_lxritag = sglq->sli4_lxritag;
10431 piocb->sli4_xritag = sglq->sli4_xritag;
10432 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
10436 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
10439 if (lpfc_sli4_wq_put(wq, &wqe))
10441 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10447 * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
10449 * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
10450 * or IOCB for sli-3 function.
10451 * pointer from the lpfc_hba struct.
10454 * IOCB_ERROR - Error
10455 * IOCB_SUCCESS - Success
10459 lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
10460 struct lpfc_iocbq *piocb, uint32_t flag)
10462 return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
10466 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10468 * This routine wraps the actual lockless version for issusing IOCB function
10469 * pointer from the lpfc_hba struct.
10472 * IOCB_ERROR - Error
10473 * IOCB_SUCCESS - Success
10477 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10478 struct lpfc_iocbq *piocb, uint32_t flag)
10480 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10484 * lpfc_sli_api_table_setup - Set up sli api function jump table
10485 * @phba: The hba struct for which this call is being executed.
10486 * @dev_grp: The HBA PCI-Device group number.
10488 * This routine sets up the SLI interface API function jump table in @phba
10490 * Returns: 0 - success, -ENODEV - failure.
10493 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10497 case LPFC_PCI_DEV_LP:
10498 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10499 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10500 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
10502 case LPFC_PCI_DEV_OC:
10503 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10504 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10505 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
10508 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10509 "1419 Invalid HBA PCI-device group: 0x%x\n",
10513 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10518 * lpfc_sli4_calc_ring - Calculates which ring to use
10519 * @phba: Pointer to HBA context object.
10520 * @piocb: Pointer to command iocb.
10522 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10523 * hba_wqidx, thus we need to calculate the corresponding ring.
10524 * Since ABORTS must go on the same WQ of the command they are
10525 * aborting, we use command's hba_wqidx.
10527 struct lpfc_sli_ring *
10528 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10530 struct lpfc_io_buf *lpfc_cmd;
10532 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10533 if (unlikely(!phba->sli4_hba.hdwq))
10536 * for abort iocb hba_wqidx should already
10537 * be setup based on what work queue we used.
10539 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10540 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10541 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10543 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
10545 if (unlikely(!phba->sli4_hba.els_wq))
10547 piocb->hba_wqidx = 0;
10548 return phba->sli4_hba.els_wq->pring;
10553 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10554 * @phba: Pointer to HBA context object.
10555 * @ring_number: Ring number
10556 * @piocb: Pointer to command iocb.
10557 * @flag: Flag indicating if this command can be put into txq.
10559 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10560 * function. This function gets the hbalock and calls
10561 * __lpfc_sli_issue_iocb function and will return the error returned
10562 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10563 * functions which do not hold hbalock.
10566 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10567 struct lpfc_iocbq *piocb, uint32_t flag)
10569 struct lpfc_sli_ring *pring;
10570 struct lpfc_queue *eq;
10571 unsigned long iflags;
10574 if (phba->sli_rev == LPFC_SLI_REV4) {
10575 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
10577 pring = lpfc_sli4_calc_ring(phba, piocb);
10578 if (unlikely(pring == NULL))
10581 spin_lock_irqsave(&pring->ring_lock, iflags);
10582 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10583 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10585 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
10587 /* For now, SLI2/3 will still use hbalock */
10588 spin_lock_irqsave(&phba->hbalock, iflags);
10589 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10590 spin_unlock_irqrestore(&phba->hbalock, iflags);
10596 * lpfc_extra_ring_setup - Extra ring setup function
10597 * @phba: Pointer to HBA context object.
10599 * This function is called while driver attaches with the
10600 * HBA to setup the extra ring. The extra ring is used
10601 * only when driver needs to support target mode functionality
10602 * or IP over FC functionalities.
10604 * This function is called with no lock held. SLI3 only.
10607 lpfc_extra_ring_setup( struct lpfc_hba *phba)
10609 struct lpfc_sli *psli;
10610 struct lpfc_sli_ring *pring;
10614 /* Adjust cmd/rsp ring iocb entries more evenly */
10616 /* Take some away from the FCP ring */
10617 pring = &psli->sli3_ring[LPFC_FCP_RING];
10618 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10619 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10620 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10621 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10623 /* and give them to the extra ring */
10624 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10626 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10627 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10628 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10629 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10631 /* Setup default profile for this ring */
10632 pring->iotag_max = 4096;
10633 pring->num_mask = 1;
10634 pring->prt[0].profile = 0; /* Mask 0 */
10635 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10636 pring->prt[0].type = phba->cfg_multi_ring_type;
10637 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10642 lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
10643 struct lpfc_nodelist *ndlp)
10645 unsigned long iflags;
10646 struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
10648 spin_lock_irqsave(&phba->hbalock, iflags);
10649 if (!list_empty(&evtp->evt_listp)) {
10650 spin_unlock_irqrestore(&phba->hbalock, iflags);
10654 /* Incrementing the reference count until the queued work is done. */
10655 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
10656 if (!evtp->evt_arg1) {
10657 spin_unlock_irqrestore(&phba->hbalock, iflags);
10660 evtp->evt = LPFC_EVT_RECOVER_PORT;
10661 list_add_tail(&evtp->evt_listp, &phba->work_list);
10662 spin_unlock_irqrestore(&phba->hbalock, iflags);
10664 lpfc_worker_wake_up(phba);
10667 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10668 * @phba: Pointer to HBA context object.
10669 * @iocbq: Pointer to iocb object.
10671 * The async_event handler calls this routine when it receives
10672 * an ASYNC_STATUS_CN event from the port. The port generates
10673 * this event when an Abort Sequence request to an rport fails
10674 * twice in succession. The abort could be originated by the
10675 * driver or by the port. The ABTS could have been for an ELS
10676 * or FCP IO. The port only generates this event when an ABTS
10677 * fails to complete after one retry.
10680 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10681 struct lpfc_iocbq *iocbq)
10683 struct lpfc_nodelist *ndlp = NULL;
10684 uint16_t rpi = 0, vpi = 0;
10685 struct lpfc_vport *vport = NULL;
10687 /* The rpi in the ulpContext is vport-sensitive. */
10688 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10689 rpi = iocbq->iocb.ulpContext;
10691 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10692 "3092 Port generated ABTS async event "
10693 "on vpi %d rpi %d status 0x%x\n",
10694 vpi, rpi, iocbq->iocb.ulpStatus);
10696 vport = lpfc_find_vport_by_vpid(phba, vpi);
10699 ndlp = lpfc_findnode_rpi(vport, rpi);
10703 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10704 lpfc_sli_abts_recover_port(vport, ndlp);
10708 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10709 "3095 Event Context not found, no "
10710 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10711 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10715 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10716 * @phba: pointer to HBA context object.
10717 * @ndlp: nodelist pointer for the impacted rport.
10718 * @axri: pointer to the wcqe containing the failed exchange.
10720 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10721 * port. The port generates this event when an abort exchange request to an
10722 * rport fails twice in succession with no reply. The abort could be originated
10723 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10726 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10727 struct lpfc_nodelist *ndlp,
10728 struct sli4_wcqe_xri_aborted *axri)
10730 uint32_t ext_status = 0;
10733 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10734 "3115 Node Context not found, driver "
10735 "ignoring abts err event\n");
10739 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10740 "3116 Port generated FCP XRI ABORT event on "
10741 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10742 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10743 bf_get(lpfc_wcqe_xa_xri, axri),
10744 bf_get(lpfc_wcqe_xa_status, axri),
10748 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10749 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10750 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10752 ext_status = axri->parameter & IOERR_PARAM_MASK;
10753 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10754 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10755 lpfc_sli_post_recovery_event(phba, ndlp);
10759 * lpfc_sli_async_event_handler - ASYNC iocb handler function
10760 * @phba: Pointer to HBA context object.
10761 * @pring: Pointer to driver SLI ring object.
10762 * @iocbq: Pointer to iocb object.
10764 * This function is called by the slow ring event handler
10765 * function when there is an ASYNC event iocb in the ring.
10766 * This function is called with no lock held.
10767 * Currently this function handles only temperature related
10768 * ASYNC events. The function decodes the temperature sensor
10769 * event message and posts events for the management applications.
10772 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10773 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10777 struct temp_event temp_event_data;
10778 struct Scsi_Host *shost;
10781 icmd = &iocbq->iocb;
10782 evt_code = icmd->un.asyncstat.evt_code;
10784 switch (evt_code) {
10785 case ASYNC_TEMP_WARN:
10786 case ASYNC_TEMP_SAFE:
10787 temp_event_data.data = (uint32_t) icmd->ulpContext;
10788 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10789 if (evt_code == ASYNC_TEMP_WARN) {
10790 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10791 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10792 "0347 Adapter is very hot, please take "
10793 "corrective action. temperature : %d Celsius\n",
10794 (uint32_t) icmd->ulpContext);
10796 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10797 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10798 "0340 Adapter temperature is OK now. "
10799 "temperature : %d Celsius\n",
10800 (uint32_t) icmd->ulpContext);
10803 /* Send temperature change event to applications */
10804 shost = lpfc_shost_from_vport(phba->pport);
10805 fc_host_post_vendor_event(shost, fc_get_event_number(),
10806 sizeof(temp_event_data), (char *) &temp_event_data,
10807 LPFC_NL_VENDOR_ID);
10809 case ASYNC_STATUS_CN:
10810 lpfc_sli_abts_err_handler(phba, iocbq);
10813 iocb_w = (uint32_t *) icmd;
10814 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10815 "0346 Ring %d handler: unexpected ASYNC_STATUS"
10817 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10818 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10819 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10820 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10821 pring->ringno, icmd->un.asyncstat.evt_code,
10822 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10823 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10824 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10825 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10833 * lpfc_sli4_setup - SLI ring setup function
10834 * @phba: Pointer to HBA context object.
10836 * lpfc_sli_setup sets up rings of the SLI interface with
10837 * number of iocbs per ring and iotags. This function is
10838 * called while driver attach to the HBA and before the
10839 * interrupts are enabled. So there is no need for locking.
10841 * This function always returns 0.
10844 lpfc_sli4_setup(struct lpfc_hba *phba)
10846 struct lpfc_sli_ring *pring;
10848 pring = phba->sli4_hba.els_wq->pring;
10849 pring->num_mask = LPFC_MAX_RING_MASK;
10850 pring->prt[0].profile = 0; /* Mask 0 */
10851 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10852 pring->prt[0].type = FC_TYPE_ELS;
10853 pring->prt[0].lpfc_sli_rcv_unsol_event =
10854 lpfc_els_unsol_event;
10855 pring->prt[1].profile = 0; /* Mask 1 */
10856 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10857 pring->prt[1].type = FC_TYPE_ELS;
10858 pring->prt[1].lpfc_sli_rcv_unsol_event =
10859 lpfc_els_unsol_event;
10860 pring->prt[2].profile = 0; /* Mask 2 */
10861 /* NameServer Inquiry */
10862 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10864 pring->prt[2].type = FC_TYPE_CT;
10865 pring->prt[2].lpfc_sli_rcv_unsol_event =
10866 lpfc_ct_unsol_event;
10867 pring->prt[3].profile = 0; /* Mask 3 */
10868 /* NameServer response */
10869 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10871 pring->prt[3].type = FC_TYPE_CT;
10872 pring->prt[3].lpfc_sli_rcv_unsol_event =
10873 lpfc_ct_unsol_event;
10878 * lpfc_sli_setup - SLI ring setup function
10879 * @phba: Pointer to HBA context object.
10881 * lpfc_sli_setup sets up rings of the SLI interface with
10882 * number of iocbs per ring and iotags. This function is
10883 * called while driver attach to the HBA and before the
10884 * interrupts are enabled. So there is no need for locking.
10886 * This function always returns 0. SLI3 only.
10889 lpfc_sli_setup(struct lpfc_hba *phba)
10891 int i, totiocbsize = 0;
10892 struct lpfc_sli *psli = &phba->sli;
10893 struct lpfc_sli_ring *pring;
10895 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10896 psli->sli_flag = 0;
10898 psli->iocbq_lookup = NULL;
10899 psli->iocbq_lookup_len = 0;
10900 psli->last_iotag = 0;
10902 for (i = 0; i < psli->num_rings; i++) {
10903 pring = &psli->sli3_ring[i];
10905 case LPFC_FCP_RING: /* ring 0 - FCP */
10906 /* numCiocb and numRiocb are used in config_port */
10907 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10908 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10909 pring->sli.sli3.numCiocb +=
10910 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10911 pring->sli.sli3.numRiocb +=
10912 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10913 pring->sli.sli3.numCiocb +=
10914 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10915 pring->sli.sli3.numRiocb +=
10916 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10917 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10918 SLI3_IOCB_CMD_SIZE :
10919 SLI2_IOCB_CMD_SIZE;
10920 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10921 SLI3_IOCB_RSP_SIZE :
10922 SLI2_IOCB_RSP_SIZE;
10923 pring->iotag_ctr = 0;
10925 (phba->cfg_hba_queue_depth * 2);
10926 pring->fast_iotag = pring->iotag_max;
10927 pring->num_mask = 0;
10929 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
10930 /* numCiocb and numRiocb are used in config_port */
10931 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10932 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10933 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10934 SLI3_IOCB_CMD_SIZE :
10935 SLI2_IOCB_CMD_SIZE;
10936 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10937 SLI3_IOCB_RSP_SIZE :
10938 SLI2_IOCB_RSP_SIZE;
10939 pring->iotag_max = phba->cfg_hba_queue_depth;
10940 pring->num_mask = 0;
10942 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10943 /* numCiocb and numRiocb are used in config_port */
10944 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10945 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10946 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10947 SLI3_IOCB_CMD_SIZE :
10948 SLI2_IOCB_CMD_SIZE;
10949 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10950 SLI3_IOCB_RSP_SIZE :
10951 SLI2_IOCB_RSP_SIZE;
10952 pring->fast_iotag = 0;
10953 pring->iotag_ctr = 0;
10954 pring->iotag_max = 4096;
10955 pring->lpfc_sli_rcv_async_status =
10956 lpfc_sli_async_event_handler;
10957 pring->num_mask = LPFC_MAX_RING_MASK;
10958 pring->prt[0].profile = 0; /* Mask 0 */
10959 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10960 pring->prt[0].type = FC_TYPE_ELS;
10961 pring->prt[0].lpfc_sli_rcv_unsol_event =
10962 lpfc_els_unsol_event;
10963 pring->prt[1].profile = 0; /* Mask 1 */
10964 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10965 pring->prt[1].type = FC_TYPE_ELS;
10966 pring->prt[1].lpfc_sli_rcv_unsol_event =
10967 lpfc_els_unsol_event;
10968 pring->prt[2].profile = 0; /* Mask 2 */
10969 /* NameServer Inquiry */
10970 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10972 pring->prt[2].type = FC_TYPE_CT;
10973 pring->prt[2].lpfc_sli_rcv_unsol_event =
10974 lpfc_ct_unsol_event;
10975 pring->prt[3].profile = 0; /* Mask 3 */
10976 /* NameServer response */
10977 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10979 pring->prt[3].type = FC_TYPE_CT;
10980 pring->prt[3].lpfc_sli_rcv_unsol_event =
10981 lpfc_ct_unsol_event;
10984 totiocbsize += (pring->sli.sli3.numCiocb *
10985 pring->sli.sli3.sizeCiocb) +
10986 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10988 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10989 /* Too many cmd / rsp ring entries in SLI2 SLIM */
10990 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10991 "SLI2 SLIM Data: x%x x%lx\n",
10992 phba->brd_no, totiocbsize,
10993 (unsigned long) MAX_SLIM_IOCB_SIZE);
10995 if (phba->cfg_multi_ring_support == 2)
10996 lpfc_extra_ring_setup(phba);
11002 * lpfc_sli4_queue_init - Queue initialization function
11003 * @phba: Pointer to HBA context object.
11005 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
11006 * ring. This function also initializes ring indices of each ring.
11007 * This function is called during the initialization of the SLI
11008 * interface of an HBA.
11009 * This function is called with no lock held and always returns
11013 lpfc_sli4_queue_init(struct lpfc_hba *phba)
11015 struct lpfc_sli *psli;
11016 struct lpfc_sli_ring *pring;
11020 spin_lock_irq(&phba->hbalock);
11021 INIT_LIST_HEAD(&psli->mboxq);
11022 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11023 /* Initialize list headers for txq and txcmplq as double linked lists */
11024 for (i = 0; i < phba->cfg_hdw_queue; i++) {
11025 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
11027 pring->ringno = LPFC_FCP_RING;
11028 pring->txcmplq_cnt = 0;
11029 INIT_LIST_HEAD(&pring->txq);
11030 INIT_LIST_HEAD(&pring->txcmplq);
11031 INIT_LIST_HEAD(&pring->iocb_continueq);
11032 spin_lock_init(&pring->ring_lock);
11034 pring = phba->sli4_hba.els_wq->pring;
11036 pring->ringno = LPFC_ELS_RING;
11037 pring->txcmplq_cnt = 0;
11038 INIT_LIST_HEAD(&pring->txq);
11039 INIT_LIST_HEAD(&pring->txcmplq);
11040 INIT_LIST_HEAD(&pring->iocb_continueq);
11041 spin_lock_init(&pring->ring_lock);
11043 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11044 pring = phba->sli4_hba.nvmels_wq->pring;
11046 pring->ringno = LPFC_ELS_RING;
11047 pring->txcmplq_cnt = 0;
11048 INIT_LIST_HEAD(&pring->txq);
11049 INIT_LIST_HEAD(&pring->txcmplq);
11050 INIT_LIST_HEAD(&pring->iocb_continueq);
11051 spin_lock_init(&pring->ring_lock);
11054 spin_unlock_irq(&phba->hbalock);
11058 * lpfc_sli_queue_init - Queue initialization function
11059 * @phba: Pointer to HBA context object.
11061 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
11062 * ring. This function also initializes ring indices of each ring.
11063 * This function is called during the initialization of the SLI
11064 * interface of an HBA.
11065 * This function is called with no lock held and always returns
11069 lpfc_sli_queue_init(struct lpfc_hba *phba)
11071 struct lpfc_sli *psli;
11072 struct lpfc_sli_ring *pring;
11076 spin_lock_irq(&phba->hbalock);
11077 INIT_LIST_HEAD(&psli->mboxq);
11078 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11079 /* Initialize list headers for txq and txcmplq as double linked lists */
11080 for (i = 0; i < psli->num_rings; i++) {
11081 pring = &psli->sli3_ring[i];
11083 pring->sli.sli3.next_cmdidx = 0;
11084 pring->sli.sli3.local_getidx = 0;
11085 pring->sli.sli3.cmdidx = 0;
11086 INIT_LIST_HEAD(&pring->iocb_continueq);
11087 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
11088 INIT_LIST_HEAD(&pring->postbufq);
11090 INIT_LIST_HEAD(&pring->txq);
11091 INIT_LIST_HEAD(&pring->txcmplq);
11092 spin_lock_init(&pring->ring_lock);
11094 spin_unlock_irq(&phba->hbalock);
11098 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
11099 * @phba: Pointer to HBA context object.
11101 * This routine flushes the mailbox command subsystem. It will unconditionally
11102 * flush all the mailbox commands in the three possible stages in the mailbox
11103 * command sub-system: pending mailbox command queue; the outstanding mailbox
11104 * command; and completed mailbox command queue. It is caller's responsibility
11105 * to make sure that the driver is in the proper state to flush the mailbox
11106 * command sub-system. Namely, the posting of mailbox commands into the
11107 * pending mailbox command queue from the various clients must be stopped;
11108 * either the HBA is in a state that it will never works on the outstanding
11109 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
11110 * mailbox command has been completed.
11113 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11115 LIST_HEAD(completions);
11116 struct lpfc_sli *psli = &phba->sli;
11118 unsigned long iflag;
11120 /* Disable softirqs, including timers from obtaining phba->hbalock */
11121 local_bh_disable();
11123 /* Flush all the mailbox commands in the mbox system */
11124 spin_lock_irqsave(&phba->hbalock, iflag);
11126 /* The pending mailbox command queue */
11127 list_splice_init(&phba->sli.mboxq, &completions);
11128 /* The outstanding active mailbox command */
11129 if (psli->mbox_active) {
11130 list_add_tail(&psli->mbox_active->list, &completions);
11131 psli->mbox_active = NULL;
11132 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11134 /* The completed mailbox command queue */
11135 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11136 spin_unlock_irqrestore(&phba->hbalock, iflag);
11138 /* Enable softirqs again, done with phba->hbalock */
11141 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
11142 while (!list_empty(&completions)) {
11143 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11144 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11145 if (pmb->mbox_cmpl)
11146 pmb->mbox_cmpl(phba, pmb);
11151 * lpfc_sli_host_down - Vport cleanup function
11152 * @vport: Pointer to virtual port object.
11154 * lpfc_sli_host_down is called to clean up the resources
11155 * associated with a vport before destroying virtual
11156 * port data structures.
11157 * This function does following operations:
11158 * - Free discovery resources associated with this virtual
11160 * - Free iocbs associated with this virtual port in
11162 * - Send abort for all iocb commands associated with this
11163 * vport in txcmplq.
11165 * This function is called with no lock held and always returns 1.
11168 lpfc_sli_host_down(struct lpfc_vport *vport)
11170 LIST_HEAD(completions);
11171 struct lpfc_hba *phba = vport->phba;
11172 struct lpfc_sli *psli = &phba->sli;
11173 struct lpfc_queue *qp = NULL;
11174 struct lpfc_sli_ring *pring;
11175 struct lpfc_iocbq *iocb, *next_iocb;
11177 unsigned long flags = 0;
11178 uint16_t prev_pring_flag;
11180 lpfc_cleanup_discovery_resources(vport);
11182 spin_lock_irqsave(&phba->hbalock, flags);
11185 * Error everything on the txq since these iocbs
11186 * have not been given to the FW yet.
11187 * Also issue ABTS for everything on the txcmplq
11189 if (phba->sli_rev != LPFC_SLI_REV4) {
11190 for (i = 0; i < psli->num_rings; i++) {
11191 pring = &psli->sli3_ring[i];
11192 prev_pring_flag = pring->flag;
11193 /* Only slow rings */
11194 if (pring->ringno == LPFC_ELS_RING) {
11195 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11196 /* Set the lpfc data pending flag */
11197 set_bit(LPFC_DATA_READY, &phba->data_flags);
11199 list_for_each_entry_safe(iocb, next_iocb,
11200 &pring->txq, list) {
11201 if (iocb->vport != vport)
11203 list_move_tail(&iocb->list, &completions);
11205 list_for_each_entry_safe(iocb, next_iocb,
11206 &pring->txcmplq, list) {
11207 if (iocb->vport != vport)
11209 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11212 pring->flag = prev_pring_flag;
11215 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11219 if (pring == phba->sli4_hba.els_wq->pring) {
11220 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11221 /* Set the lpfc data pending flag */
11222 set_bit(LPFC_DATA_READY, &phba->data_flags);
11224 prev_pring_flag = pring->flag;
11225 spin_lock(&pring->ring_lock);
11226 list_for_each_entry_safe(iocb, next_iocb,
11227 &pring->txq, list) {
11228 if (iocb->vport != vport)
11230 list_move_tail(&iocb->list, &completions);
11232 spin_unlock(&pring->ring_lock);
11233 list_for_each_entry_safe(iocb, next_iocb,
11234 &pring->txcmplq, list) {
11235 if (iocb->vport != vport)
11237 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11240 pring->flag = prev_pring_flag;
11243 spin_unlock_irqrestore(&phba->hbalock, flags);
11245 /* Make sure HBA is alive */
11246 lpfc_issue_hb_tmo(phba);
11248 /* Cancel all the IOCBs from the completions list */
11249 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11255 * lpfc_sli_hba_down - Resource cleanup function for the HBA
11256 * @phba: Pointer to HBA context object.
11258 * This function cleans up all iocb, buffers, mailbox commands
11259 * while shutting down the HBA. This function is called with no
11260 * lock held and always returns 1.
11261 * This function does the following to cleanup driver resources:
11262 * - Free discovery resources for each virtual port
11263 * - Cleanup any pending fabric iocbs
11264 * - Iterate through the iocb txq and free each entry
11266 * - Free up any buffer posted to the HBA
11267 * - Free mailbox commands in the mailbox queue.
11270 lpfc_sli_hba_down(struct lpfc_hba *phba)
11272 LIST_HEAD(completions);
11273 struct lpfc_sli *psli = &phba->sli;
11274 struct lpfc_queue *qp = NULL;
11275 struct lpfc_sli_ring *pring;
11276 struct lpfc_dmabuf *buf_ptr;
11277 unsigned long flags = 0;
11280 /* Shutdown the mailbox command sub-system */
11281 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
11283 lpfc_hba_down_prep(phba);
11285 /* Disable softirqs, including timers from obtaining phba->hbalock */
11286 local_bh_disable();
11288 lpfc_fabric_abort_hba(phba);
11290 spin_lock_irqsave(&phba->hbalock, flags);
11293 * Error everything on the txq since these iocbs
11294 * have not been given to the FW yet.
11296 if (phba->sli_rev != LPFC_SLI_REV4) {
11297 for (i = 0; i < psli->num_rings; i++) {
11298 pring = &psli->sli3_ring[i];
11299 /* Only slow rings */
11300 if (pring->ringno == LPFC_ELS_RING) {
11301 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11302 /* Set the lpfc data pending flag */
11303 set_bit(LPFC_DATA_READY, &phba->data_flags);
11305 list_splice_init(&pring->txq, &completions);
11308 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11312 spin_lock(&pring->ring_lock);
11313 list_splice_init(&pring->txq, &completions);
11314 spin_unlock(&pring->ring_lock);
11315 if (pring == phba->sli4_hba.els_wq->pring) {
11316 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11317 /* Set the lpfc data pending flag */
11318 set_bit(LPFC_DATA_READY, &phba->data_flags);
11322 spin_unlock_irqrestore(&phba->hbalock, flags);
11324 /* Cancel all the IOCBs from the completions list */
11325 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11328 spin_lock_irqsave(&phba->hbalock, flags);
11329 list_splice_init(&phba->elsbuf, &completions);
11330 phba->elsbuf_cnt = 0;
11331 phba->elsbuf_prev_cnt = 0;
11332 spin_unlock_irqrestore(&phba->hbalock, flags);
11334 while (!list_empty(&completions)) {
11335 list_remove_head(&completions, buf_ptr,
11336 struct lpfc_dmabuf, list);
11337 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
11341 /* Enable softirqs again, done with phba->hbalock */
11344 /* Return any active mbox cmds */
11345 del_timer_sync(&psli->mbox_tmo);
11347 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
11348 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11349 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
11355 * lpfc_sli_pcimem_bcopy - SLI memory copy function
11356 * @srcp: Source memory pointer.
11357 * @destp: Destination memory pointer.
11358 * @cnt: Number of words required to be copied.
11360 * This function is used for copying data between driver memory
11361 * and the SLI memory. This function also changes the endianness
11362 * of each word if native endianness is different from SLI
11363 * endianness. This function can be called with or without
11367 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
11369 uint32_t *src = srcp;
11370 uint32_t *dest = destp;
11374 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
11376 ldata = le32_to_cpu(ldata);
11385 * lpfc_sli_bemem_bcopy - SLI memory copy function
11386 * @srcp: Source memory pointer.
11387 * @destp: Destination memory pointer.
11388 * @cnt: Number of words required to be copied.
11390 * This function is used for copying data between a data structure
11391 * with big endian representation to local endianness.
11392 * This function can be called with or without lock.
11395 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
11397 uint32_t *src = srcp;
11398 uint32_t *dest = destp;
11402 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
11404 ldata = be32_to_cpu(ldata);
11412 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
11413 * @phba: Pointer to HBA context object.
11414 * @pring: Pointer to driver SLI ring object.
11415 * @mp: Pointer to driver buffer object.
11417 * This function is called with no lock held.
11418 * It always return zero after adding the buffer to the postbufq
11422 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11423 struct lpfc_dmabuf *mp)
11425 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
11427 spin_lock_irq(&phba->hbalock);
11428 list_add_tail(&mp->list, &pring->postbufq);
11429 pring->postbufq_cnt++;
11430 spin_unlock_irq(&phba->hbalock);
11435 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
11436 * @phba: Pointer to HBA context object.
11438 * When HBQ is enabled, buffers are searched based on tags. This function
11439 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
11440 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
11441 * does not conflict with tags of buffer posted for unsolicited events.
11442 * The function returns the allocated tag. The function is called with
11446 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
11448 spin_lock_irq(&phba->hbalock);
11449 phba->buffer_tag_count++;
11451 * Always set the QUE_BUFTAG_BIT to distiguish between
11452 * a tag assigned by HBQ.
11454 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
11455 spin_unlock_irq(&phba->hbalock);
11456 return phba->buffer_tag_count;
11460 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
11461 * @phba: Pointer to HBA context object.
11462 * @pring: Pointer to driver SLI ring object.
11463 * @tag: Buffer tag.
11465 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
11466 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
11467 * iocb is posted to the response ring with the tag of the buffer.
11468 * This function searches the pring->postbufq list using the tag
11469 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
11470 * iocb. If the buffer is found then lpfc_dmabuf object of the
11471 * buffer is returned to the caller else NULL is returned.
11472 * This function is called with no lock held.
11474 struct lpfc_dmabuf *
11475 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11478 struct lpfc_dmabuf *mp, *next_mp;
11479 struct list_head *slp = &pring->postbufq;
11481 /* Search postbufq, from the beginning, looking for a match on tag */
11482 spin_lock_irq(&phba->hbalock);
11483 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11484 if (mp->buffer_tag == tag) {
11485 list_del_init(&mp->list);
11486 pring->postbufq_cnt--;
11487 spin_unlock_irq(&phba->hbalock);
11492 spin_unlock_irq(&phba->hbalock);
11493 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11494 "0402 Cannot find virtual addr for buffer tag on "
11495 "ring %d Data x%lx x%px x%px x%x\n",
11496 pring->ringno, (unsigned long) tag,
11497 slp->next, slp->prev, pring->postbufq_cnt);
11503 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
11504 * @phba: Pointer to HBA context object.
11505 * @pring: Pointer to driver SLI ring object.
11506 * @phys: DMA address of the buffer.
11508 * This function searches the buffer list using the dma_address
11509 * of unsolicited event to find the driver's lpfc_dmabuf object
11510 * corresponding to the dma_address. The function returns the
11511 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11512 * This function is called by the ct and els unsolicited event
11513 * handlers to get the buffer associated with the unsolicited
11516 * This function is called with no lock held.
11518 struct lpfc_dmabuf *
11519 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11522 struct lpfc_dmabuf *mp, *next_mp;
11523 struct list_head *slp = &pring->postbufq;
11525 /* Search postbufq, from the beginning, looking for a match on phys */
11526 spin_lock_irq(&phba->hbalock);
11527 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11528 if (mp->phys == phys) {
11529 list_del_init(&mp->list);
11530 pring->postbufq_cnt--;
11531 spin_unlock_irq(&phba->hbalock);
11536 spin_unlock_irq(&phba->hbalock);
11537 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11538 "0410 Cannot find virtual addr for mapped buf on "
11539 "ring %d Data x%llx x%px x%px x%x\n",
11540 pring->ringno, (unsigned long long)phys,
11541 slp->next, slp->prev, pring->postbufq_cnt);
11546 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
11547 * @phba: Pointer to HBA context object.
11548 * @cmdiocb: Pointer to driver command iocb object.
11549 * @rspiocb: Pointer to driver response iocb object.
11551 * This function is the completion handler for the abort iocbs for
11552 * ELS commands. This function is called from the ELS ring event
11553 * handler with no lock held. This function frees memory resources
11554 * associated with the abort iocb.
11557 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11558 struct lpfc_iocbq *rspiocb)
11560 IOCB_t *irsp = &rspiocb->iocb;
11561 uint16_t abort_iotag, abort_context;
11562 struct lpfc_iocbq *abort_iocb = NULL;
11564 if (irsp->ulpStatus) {
11567 * Assume that the port already completed and returned, or
11568 * will return the iocb. Just Log the message.
11570 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11571 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11573 spin_lock_irq(&phba->hbalock);
11574 if (phba->sli_rev < LPFC_SLI_REV4) {
11575 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11576 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11577 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11578 spin_unlock_irq(&phba->hbalock);
11581 if (abort_iotag != 0 &&
11582 abort_iotag <= phba->sli.last_iotag)
11584 phba->sli.iocbq_lookup[abort_iotag];
11586 /* For sli4 the abort_tag is the XRI,
11587 * so the abort routine puts the iotag of the iocb
11588 * being aborted in the context field of the abort
11591 abort_iocb = phba->sli.iocbq_lookup[abort_context];
11593 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11594 "0327 Cannot abort els iocb x%px "
11595 "with tag %x context %x, abort status %x, "
11597 abort_iocb, abort_iotag, abort_context,
11598 irsp->ulpStatus, irsp->un.ulpWord[4]);
11600 spin_unlock_irq(&phba->hbalock);
11603 lpfc_sli_release_iocbq(phba, cmdiocb);
11608 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
11609 * @phba: Pointer to HBA context object.
11610 * @cmdiocb: Pointer to driver command iocb object.
11611 * @rspiocb: Pointer to driver response iocb object.
11613 * The function is called from SLI ring event handler with no
11614 * lock held. This function is the completion handler for ELS commands
11615 * which are aborted. The function frees memory resources used for
11616 * the aborted ELS commands.
11619 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11620 struct lpfc_iocbq *rspiocb)
11622 IOCB_t *irsp = &rspiocb->iocb;
11624 /* ELS cmd tag <ulpIoTag> completes */
11625 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11626 "0139 Ignoring ELS cmd tag x%x completion Data: "
11628 irsp->ulpIoTag, irsp->ulpStatus,
11629 irsp->un.ulpWord[4], irsp->ulpTimeout);
11630 lpfc_nlp_put((struct lpfc_nodelist *)cmdiocb->context1);
11631 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11632 lpfc_ct_free_iocb(phba, cmdiocb);
11634 lpfc_els_free_iocb(phba, cmdiocb);
11638 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11639 * @phba: Pointer to HBA context object.
11640 * @pring: Pointer to driver SLI ring object.
11641 * @cmdiocb: Pointer to driver command iocb object.
11642 * @cmpl: completion function.
11644 * This function issues an abort iocb for the provided command iocb. In case
11645 * of unloading, the abort iocb will not be issued to commands on the ELS
11646 * ring. Instead, the callback function shall be changed to those commands
11647 * so that nothing happens when them finishes. This function is called with
11648 * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
11649 * when the command iocb is an abort request.
11653 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11654 struct lpfc_iocbq *cmdiocb, void *cmpl)
11656 struct lpfc_vport *vport = cmdiocb->vport;
11657 struct lpfc_iocbq *abtsiocbp;
11658 IOCB_t *icmd = NULL;
11659 IOCB_t *iabt = NULL;
11660 int retval = IOCB_ERROR;
11661 unsigned long iflags;
11662 struct lpfc_nodelist *ndlp;
11665 * There are certain command types we don't want to abort. And we
11666 * don't want to abort commands that are already in the process of
11669 icmd = &cmdiocb->iocb;
11670 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11671 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11672 cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED)
11673 return IOCB_ABORTING;
11676 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11677 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11679 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11684 * If we're unloading, don't abort iocb on the ELS ring, but change
11685 * the callback so that nothing happens when it finishes.
11687 if ((vport->load_flag & FC_UNLOADING) &&
11688 pring->ringno == LPFC_ELS_RING) {
11689 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11690 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11692 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11696 /* issue ABTS for this IOCB based on iotag */
11697 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11698 if (abtsiocbp == NULL)
11699 return IOCB_NORESOURCE;
11701 /* This signals the response to set the correct status
11702 * before calling the completion handler
11704 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11706 iabt = &abtsiocbp->iocb;
11707 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11708 iabt->un.acxri.abortContextTag = icmd->ulpContext;
11709 if (phba->sli_rev == LPFC_SLI_REV4) {
11710 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11711 if (pring->ringno == LPFC_ELS_RING)
11712 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11714 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11715 if (pring->ringno == LPFC_ELS_RING) {
11716 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11717 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11721 iabt->ulpClass = icmd->ulpClass;
11723 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11724 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11725 if (cmdiocb->iocb_flag & LPFC_IO_FCP) {
11726 abtsiocbp->iocb_flag |= LPFC_IO_FCP;
11727 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11729 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11730 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11732 if (phba->link_state >= LPFC_LINK_UP)
11733 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11735 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11738 abtsiocbp->iocb_cmpl = cmpl;
11740 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11741 abtsiocbp->vport = vport;
11743 if (phba->sli_rev == LPFC_SLI_REV4) {
11744 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11745 if (unlikely(pring == NULL))
11746 goto abort_iotag_exit;
11747 /* Note: both hbalock and ring_lock need to be set here */
11748 spin_lock_irqsave(&pring->ring_lock, iflags);
11749 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11751 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11753 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11759 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11760 "0339 Abort xri x%x, original iotag x%x, "
11761 "abort cmd iotag x%x retval x%x\n",
11762 iabt->un.acxri.abortIoTag,
11763 iabt->un.acxri.abortContextTag,
11764 abtsiocbp->iotag, retval);
11767 cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
11768 __lpfc_sli_release_iocbq(phba, abtsiocbp);
11772 * Caller to this routine should check for IOCB_ERROR
11773 * and handle it properly. This routine no longer removes
11774 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11780 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11781 * @phba: pointer to lpfc HBA data structure.
11783 * This routine will abort all pending and outstanding iocbs to an HBA.
11786 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11788 struct lpfc_sli *psli = &phba->sli;
11789 struct lpfc_sli_ring *pring;
11790 struct lpfc_queue *qp = NULL;
11793 if (phba->sli_rev != LPFC_SLI_REV4) {
11794 for (i = 0; i < psli->num_rings; i++) {
11795 pring = &psli->sli3_ring[i];
11796 lpfc_sli_abort_iocb_ring(phba, pring);
11800 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11804 lpfc_sli_abort_iocb_ring(phba, pring);
11809 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11810 * @iocbq: Pointer to driver iocb object.
11811 * @vport: Pointer to driver virtual port object.
11812 * @tgt_id: SCSI ID of the target.
11813 * @lun_id: LUN ID of the scsi device.
11814 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11816 * This function acts as an iocb filter for functions which abort or count
11817 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11818 * 0 if the filtering criteria is met for the given iocb and will return
11819 * 1 if the filtering criteria is not met.
11820 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11821 * given iocb is for the SCSI device specified by vport, tgt_id and
11822 * lun_id parameter.
11823 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11824 * given iocb is for the SCSI target specified by vport and tgt_id
11826 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11827 * given iocb is for the SCSI host associated with the given vport.
11828 * This function is called with no locks held.
11831 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11832 uint16_t tgt_id, uint64_t lun_id,
11833 lpfc_ctx_cmd ctx_cmd)
11835 struct lpfc_io_buf *lpfc_cmd;
11836 IOCB_t *icmd = NULL;
11839 if (!iocbq || iocbq->vport != vport)
11842 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11843 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
11844 iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11847 icmd = &iocbq->iocb;
11848 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11849 icmd->ulpCommand == CMD_CLOSE_XRI_CN)
11852 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11854 if (lpfc_cmd->pCmd == NULL)
11859 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11860 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11861 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11865 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11866 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11869 case LPFC_CTX_HOST:
11873 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11874 __func__, ctx_cmd);
11882 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11883 * @vport: Pointer to virtual port.
11884 * @tgt_id: SCSI ID of the target.
11885 * @lun_id: LUN ID of the scsi device.
11886 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11888 * This function returns number of FCP commands pending for the vport.
11889 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11890 * commands pending on the vport associated with SCSI device specified
11891 * by tgt_id and lun_id parameters.
11892 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11893 * commands pending on the vport associated with SCSI target specified
11894 * by tgt_id parameter.
11895 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11896 * commands pending on the vport.
11897 * This function returns the number of iocbs which satisfy the filter.
11898 * This function is called without any lock held.
11901 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11902 lpfc_ctx_cmd ctx_cmd)
11904 struct lpfc_hba *phba = vport->phba;
11905 struct lpfc_iocbq *iocbq;
11908 spin_lock_irq(&phba->hbalock);
11909 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11910 iocbq = phba->sli.iocbq_lookup[i];
11912 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11916 spin_unlock_irq(&phba->hbalock);
11922 * lpfc_sli4_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11923 * @phba: Pointer to HBA context object
11924 * @cmdiocb: Pointer to command iocb object.
11925 * @wcqe: pointer to the complete wcqe
11927 * This function is called when an aborted FCP iocb completes. This
11928 * function is called by the ring event handler with no lock held.
11929 * This function frees the iocb. It is called for sli-4 adapters.
11932 lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11933 struct lpfc_wcqe_complete *wcqe)
11935 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11936 "3017 ABORT_XRI_CN completing on rpi x%x "
11937 "original iotag x%x, abort cmd iotag x%x "
11938 "status 0x%x, reason 0x%x\n",
11939 cmdiocb->iocb.un.acxri.abortContextTag,
11940 cmdiocb->iocb.un.acxri.abortIoTag,
11942 (bf_get(lpfc_wcqe_c_status, wcqe)
11943 & LPFC_IOCB_STATUS_MASK),
11945 lpfc_sli_release_iocbq(phba, cmdiocb);
11949 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11950 * @phba: Pointer to HBA context object
11951 * @cmdiocb: Pointer to command iocb object.
11952 * @rspiocb: Pointer to response iocb object.
11954 * This function is called when an aborted FCP iocb completes. This
11955 * function is called by the ring event handler with no lock held.
11956 * This function frees the iocb.
11959 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11960 struct lpfc_iocbq *rspiocb)
11962 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11963 "3096 ABORT_XRI_CN completing on rpi x%x "
11964 "original iotag x%x, abort cmd iotag x%x "
11965 "status 0x%x, reason 0x%x\n",
11966 cmdiocb->iocb.un.acxri.abortContextTag,
11967 cmdiocb->iocb.un.acxri.abortIoTag,
11968 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11969 rspiocb->iocb.un.ulpWord[4]);
11970 lpfc_sli_release_iocbq(phba, cmdiocb);
11975 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11976 * @vport: Pointer to virtual port.
11977 * @tgt_id: SCSI ID of the target.
11978 * @lun_id: LUN ID of the scsi device.
11979 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11981 * This function sends an abort command for every SCSI command
11982 * associated with the given virtual port pending on the ring
11983 * filtered by lpfc_sli_validate_fcp_iocb function.
11984 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11985 * FCP iocbs associated with lun specified by tgt_id and lun_id
11987 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11988 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11989 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11990 * FCP iocbs associated with virtual port.
11991 * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
11992 * lpfc_sli4_calc_ring is used.
11993 * This function returns number of iocbs it failed to abort.
11994 * This function is called with no locks held.
11997 lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
11998 lpfc_ctx_cmd abort_cmd)
12000 struct lpfc_hba *phba = vport->phba;
12001 struct lpfc_sli_ring *pring = NULL;
12002 struct lpfc_iocbq *iocbq;
12003 int errcnt = 0, ret_val = 0;
12004 unsigned long iflags;
12006 void *fcp_cmpl = NULL;
12008 /* all I/Os are in process of being flushed */
12009 if (phba->hba_flag & HBA_IOQ_FLUSH)
12012 for (i = 1; i <= phba->sli.last_iotag; i++) {
12013 iocbq = phba->sli.iocbq_lookup[i];
12015 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12019 spin_lock_irqsave(&phba->hbalock, iflags);
12020 if (phba->sli_rev == LPFC_SLI_REV3) {
12021 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12022 fcp_cmpl = lpfc_sli_abort_fcp_cmpl;
12023 } else if (phba->sli_rev == LPFC_SLI_REV4) {
12024 pring = lpfc_sli4_calc_ring(phba, iocbq);
12025 fcp_cmpl = lpfc_sli4_abort_fcp_cmpl;
12027 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
12029 spin_unlock_irqrestore(&phba->hbalock, iflags);
12030 if (ret_val != IOCB_SUCCESS)
12038 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
12039 * @vport: Pointer to virtual port.
12040 * @pring: Pointer to driver SLI ring object.
12041 * @tgt_id: SCSI ID of the target.
12042 * @lun_id: LUN ID of the scsi device.
12043 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12045 * This function sends an abort command for every SCSI command
12046 * associated with the given virtual port pending on the ring
12047 * filtered by lpfc_sli_validate_fcp_iocb function.
12048 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
12049 * FCP iocbs associated with lun specified by tgt_id and lun_id
12051 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
12052 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12053 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
12054 * FCP iocbs associated with virtual port.
12055 * This function returns number of iocbs it aborted .
12056 * This function is called with no locks held right after a taskmgmt
12060 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12061 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12063 struct lpfc_hba *phba = vport->phba;
12064 struct lpfc_io_buf *lpfc_cmd;
12065 struct lpfc_iocbq *abtsiocbq;
12066 struct lpfc_nodelist *ndlp;
12067 struct lpfc_iocbq *iocbq;
12069 int sum, i, ret_val;
12070 unsigned long iflags;
12071 struct lpfc_sli_ring *pring_s4 = NULL;
12073 spin_lock_irqsave(&phba->hbalock, iflags);
12075 /* all I/Os are in process of being flushed */
12076 if (phba->hba_flag & HBA_IOQ_FLUSH) {
12077 spin_unlock_irqrestore(&phba->hbalock, iflags);
12082 for (i = 1; i <= phba->sli.last_iotag; i++) {
12083 iocbq = phba->sli.iocbq_lookup[i];
12085 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12089 /* Guard against IO completion being called at same time */
12090 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12091 spin_lock(&lpfc_cmd->buf_lock);
12093 if (!lpfc_cmd->pCmd) {
12094 spin_unlock(&lpfc_cmd->buf_lock);
12098 if (phba->sli_rev == LPFC_SLI_REV4) {
12100 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
12102 spin_unlock(&lpfc_cmd->buf_lock);
12105 /* Note: both hbalock and ring_lock must be set here */
12106 spin_lock(&pring_s4->ring_lock);
12110 * If the iocbq is already being aborted, don't take a second
12111 * action, but do count it.
12113 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
12114 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
12115 if (phba->sli_rev == LPFC_SLI_REV4)
12116 spin_unlock(&pring_s4->ring_lock);
12117 spin_unlock(&lpfc_cmd->buf_lock);
12121 /* issue ABTS for this IOCB based on iotag */
12122 abtsiocbq = __lpfc_sli_get_iocbq(phba);
12124 if (phba->sli_rev == LPFC_SLI_REV4)
12125 spin_unlock(&pring_s4->ring_lock);
12126 spin_unlock(&lpfc_cmd->buf_lock);
12130 icmd = &iocbq->iocb;
12131 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
12132 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
12133 if (phba->sli_rev == LPFC_SLI_REV4)
12134 abtsiocbq->iocb.un.acxri.abortIoTag =
12135 iocbq->sli4_xritag;
12137 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
12138 abtsiocbq->iocb.ulpLe = 1;
12139 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
12140 abtsiocbq->vport = vport;
12142 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12143 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
12144 if (iocbq->iocb_flag & LPFC_IO_FCP)
12145 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
12146 if (iocbq->iocb_flag & LPFC_IO_FOF)
12147 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
12149 ndlp = lpfc_cmd->rdata->pnode;
12151 if (lpfc_is_link_up(phba) &&
12152 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
12153 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
12155 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
12157 /* Setup callback routine and issue the command. */
12158 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
12161 * Indicate the IO is being aborted by the driver and set
12162 * the caller's flag into the aborted IO.
12164 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
12166 if (phba->sli_rev == LPFC_SLI_REV4) {
12167 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12169 spin_unlock(&pring_s4->ring_lock);
12171 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12175 spin_unlock(&lpfc_cmd->buf_lock);
12177 if (ret_val == IOCB_ERROR)
12178 __lpfc_sli_release_iocbq(phba, abtsiocbq);
12182 spin_unlock_irqrestore(&phba->hbalock, iflags);
12187 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
12188 * @phba: Pointer to HBA context object.
12189 * @cmdiocbq: Pointer to command iocb.
12190 * @rspiocbq: Pointer to response iocb.
12192 * This function is the completion handler for iocbs issued using
12193 * lpfc_sli_issue_iocb_wait function. This function is called by the
12194 * ring event handler function without any lock held. This function
12195 * can be called from both worker thread context and interrupt
12196 * context. This function also can be called from other thread which
12197 * cleans up the SLI layer objects.
12198 * This function copy the contents of the response iocb to the
12199 * response iocb memory object provided by the caller of
12200 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
12201 * sleeps for the iocb completion.
12204 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
12205 struct lpfc_iocbq *cmdiocbq,
12206 struct lpfc_iocbq *rspiocbq)
12208 wait_queue_head_t *pdone_q;
12209 unsigned long iflags;
12210 struct lpfc_io_buf *lpfc_cmd;
12212 spin_lock_irqsave(&phba->hbalock, iflags);
12213 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
12216 * A time out has occurred for the iocb. If a time out
12217 * completion handler has been supplied, call it. Otherwise,
12218 * just free the iocbq.
12221 spin_unlock_irqrestore(&phba->hbalock, iflags);
12222 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
12223 cmdiocbq->wait_iocb_cmpl = NULL;
12224 if (cmdiocbq->iocb_cmpl)
12225 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
12227 lpfc_sli_release_iocbq(phba, cmdiocbq);
12231 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
12232 if (cmdiocbq->context2 && rspiocbq)
12233 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
12234 &rspiocbq->iocb, sizeof(IOCB_t));
12236 /* Set the exchange busy flag for task management commands */
12237 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
12238 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
12239 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
12241 if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
12242 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
12244 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
12247 pdone_q = cmdiocbq->context_un.wait_queue;
12250 spin_unlock_irqrestore(&phba->hbalock, iflags);
12255 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
12256 * @phba: Pointer to HBA context object..
12257 * @piocbq: Pointer to command iocb.
12258 * @flag: Flag to test.
12260 * This routine grabs the hbalock and then test the iocb_flag to
12261 * see if the passed in flag is set.
12263 * 1 if flag is set.
12264 * 0 if flag is not set.
12267 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
12268 struct lpfc_iocbq *piocbq, uint32_t flag)
12270 unsigned long iflags;
12273 spin_lock_irqsave(&phba->hbalock, iflags);
12274 ret = piocbq->iocb_flag & flag;
12275 spin_unlock_irqrestore(&phba->hbalock, iflags);
12281 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
12282 * @phba: Pointer to HBA context object..
12283 * @ring_number: Ring number
12284 * @piocb: Pointer to command iocb.
12285 * @prspiocbq: Pointer to response iocb.
12286 * @timeout: Timeout in number of seconds.
12288 * This function issues the iocb to firmware and waits for the
12289 * iocb to complete. The iocb_cmpl field of the shall be used
12290 * to handle iocbs which time out. If the field is NULL, the
12291 * function shall free the iocbq structure. If more clean up is
12292 * needed, the caller is expected to provide a completion function
12293 * that will provide the needed clean up. If the iocb command is
12294 * not completed within timeout seconds, the function will either
12295 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
12296 * completion function set in the iocb_cmpl field and then return
12297 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
12298 * resources if this function returns IOCB_TIMEDOUT.
12299 * The function waits for the iocb completion using an
12300 * non-interruptible wait.
12301 * This function will sleep while waiting for iocb completion.
12302 * So, this function should not be called from any context which
12303 * does not allow sleeping. Due to the same reason, this function
12304 * cannot be called with interrupt disabled.
12305 * This function assumes that the iocb completions occur while
12306 * this function sleep. So, this function cannot be called from
12307 * the thread which process iocb completion for this ring.
12308 * This function clears the iocb_flag of the iocb object before
12309 * issuing the iocb and the iocb completion handler sets this
12310 * flag and wakes this thread when the iocb completes.
12311 * The contents of the response iocb will be copied to prspiocbq
12312 * by the completion handler when the command completes.
12313 * This function returns IOCB_SUCCESS when success.
12314 * This function is called with no lock held.
12317 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
12318 uint32_t ring_number,
12319 struct lpfc_iocbq *piocb,
12320 struct lpfc_iocbq *prspiocbq,
12323 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
12324 long timeleft, timeout_req = 0;
12325 int retval = IOCB_SUCCESS;
12327 struct lpfc_iocbq *iocb;
12329 int txcmplq_cnt = 0;
12330 struct lpfc_sli_ring *pring;
12331 unsigned long iflags;
12332 bool iocb_completed = true;
12334 if (phba->sli_rev >= LPFC_SLI_REV4)
12335 pring = lpfc_sli4_calc_ring(phba, piocb);
12337 pring = &phba->sli.sli3_ring[ring_number];
12339 * If the caller has provided a response iocbq buffer, then context2
12340 * is NULL or its an error.
12343 if (piocb->context2)
12345 piocb->context2 = prspiocbq;
12348 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
12349 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
12350 piocb->context_un.wait_queue = &done_q;
12351 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
12353 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12354 if (lpfc_readl(phba->HCregaddr, &creg_val))
12356 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
12357 writel(creg_val, phba->HCregaddr);
12358 readl(phba->HCregaddr); /* flush */
12361 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
12362 SLI_IOCB_RET_IOCB);
12363 if (retval == IOCB_SUCCESS) {
12364 timeout_req = msecs_to_jiffies(timeout * 1000);
12365 timeleft = wait_event_timeout(done_q,
12366 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
12368 spin_lock_irqsave(&phba->hbalock, iflags);
12369 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
12372 * IOCB timed out. Inform the wake iocb wait
12373 * completion function and set local status
12376 iocb_completed = false;
12377 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
12379 spin_unlock_irqrestore(&phba->hbalock, iflags);
12380 if (iocb_completed) {
12381 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12382 "0331 IOCB wake signaled\n");
12383 /* Note: we are not indicating if the IOCB has a success
12384 * status or not - that's for the caller to check.
12385 * IOCB_SUCCESS means just that the command was sent and
12386 * completed. Not that it completed successfully.
12388 } else if (timeleft == 0) {
12389 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12390 "0338 IOCB wait timeout error - no "
12391 "wake response Data x%x\n", timeout);
12392 retval = IOCB_TIMEDOUT;
12394 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12395 "0330 IOCB wake NOT set, "
12397 timeout, (timeleft / jiffies));
12398 retval = IOCB_TIMEDOUT;
12400 } else if (retval == IOCB_BUSY) {
12401 if (phba->cfg_log_verbose & LOG_SLI) {
12402 list_for_each_entry(iocb, &pring->txq, list) {
12405 list_for_each_entry(iocb, &pring->txcmplq, list) {
12408 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12409 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12410 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12414 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12415 "0332 IOCB wait issue failed, Data x%x\n",
12417 retval = IOCB_ERROR;
12420 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12421 if (lpfc_readl(phba->HCregaddr, &creg_val))
12423 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12424 writel(creg_val, phba->HCregaddr);
12425 readl(phba->HCregaddr); /* flush */
12429 piocb->context2 = NULL;
12431 piocb->context_un.wait_queue = NULL;
12432 piocb->iocb_cmpl = NULL;
12437 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
12438 * @phba: Pointer to HBA context object.
12439 * @pmboxq: Pointer to driver mailbox object.
12440 * @timeout: Timeout in number of seconds.
12442 * This function issues the mailbox to firmware and waits for the
12443 * mailbox command to complete. If the mailbox command is not
12444 * completed within timeout seconds, it returns MBX_TIMEOUT.
12445 * The function waits for the mailbox completion using an
12446 * interruptible wait. If the thread is woken up due to a
12447 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12448 * should not free the mailbox resources, if this function returns
12450 * This function will sleep while waiting for mailbox completion.
12451 * So, this function should not be called from any context which
12452 * does not allow sleeping. Due to the same reason, this function
12453 * cannot be called with interrupt disabled.
12454 * This function assumes that the mailbox completion occurs while
12455 * this function sleep. So, this function cannot be called from
12456 * the worker thread which processes mailbox completion.
12457 * This function is called in the context of HBA management
12459 * This function returns MBX_SUCCESS when successful.
12460 * This function is called with no lock held.
12463 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
12466 struct completion mbox_done;
12468 unsigned long flag;
12470 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12471 /* setup wake call as IOCB callback */
12472 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12474 /* setup context3 field to pass wait_queue pointer to wake function */
12475 init_completion(&mbox_done);
12476 pmboxq->context3 = &mbox_done;
12477 /* now issue the command */
12478 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12479 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
12480 wait_for_completion_timeout(&mbox_done,
12481 msecs_to_jiffies(timeout * 1000));
12483 spin_lock_irqsave(&phba->hbalock, flag);
12484 pmboxq->context3 = NULL;
12486 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12487 * else do not free the resources.
12489 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12490 retval = MBX_SUCCESS;
12492 retval = MBX_TIMEOUT;
12493 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12495 spin_unlock_irqrestore(&phba->hbalock, flag);
12501 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
12502 * @phba: Pointer to HBA context.
12503 * @mbx_action: Mailbox shutdown options.
12505 * This function is called to shutdown the driver's mailbox sub-system.
12506 * It first marks the mailbox sub-system is in a block state to prevent
12507 * the asynchronous mailbox command from issued off the pending mailbox
12508 * command queue. If the mailbox command sub-system shutdown is due to
12509 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12510 * the mailbox sub-system flush routine to forcefully bring down the
12511 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12512 * as with offline or HBA function reset), this routine will wait for the
12513 * outstanding mailbox command to complete before invoking the mailbox
12514 * sub-system flush routine to gracefully bring down mailbox sub-system.
12517 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12519 struct lpfc_sli *psli = &phba->sli;
12520 unsigned long timeout;
12522 if (mbx_action == LPFC_MBX_NO_WAIT) {
12523 /* delay 100ms for port state */
12525 lpfc_sli_mbox_sys_flush(phba);
12528 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12530 /* Disable softirqs, including timers from obtaining phba->hbalock */
12531 local_bh_disable();
12533 spin_lock_irq(&phba->hbalock);
12534 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12536 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12537 /* Determine how long we might wait for the active mailbox
12538 * command to be gracefully completed by firmware.
12540 if (phba->sli.mbox_active)
12541 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12542 phba->sli.mbox_active) *
12544 spin_unlock_irq(&phba->hbalock);
12546 /* Enable softirqs again, done with phba->hbalock */
12549 while (phba->sli.mbox_active) {
12550 /* Check active mailbox complete status every 2ms */
12552 if (time_after(jiffies, timeout))
12553 /* Timeout, let the mailbox flush routine to
12554 * forcefully release active mailbox command
12559 spin_unlock_irq(&phba->hbalock);
12561 /* Enable softirqs again, done with phba->hbalock */
12565 lpfc_sli_mbox_sys_flush(phba);
12569 * lpfc_sli_eratt_read - read sli-3 error attention events
12570 * @phba: Pointer to HBA context.
12572 * This function is called to read the SLI3 device error attention registers
12573 * for possible error attention events. The caller must hold the hostlock
12574 * with spin_lock_irq().
12576 * This function returns 1 when there is Error Attention in the Host Attention
12577 * Register and returns 0 otherwise.
12580 lpfc_sli_eratt_read(struct lpfc_hba *phba)
12584 /* Read chip Host Attention (HA) register */
12585 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12588 if (ha_copy & HA_ERATT) {
12589 /* Read host status register to retrieve error event */
12590 if (lpfc_sli_read_hs(phba))
12593 /* Check if there is a deferred error condition is active */
12594 if ((HS_FFER1 & phba->work_hs) &&
12595 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12596 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12597 phba->hba_flag |= DEFER_ERATT;
12598 /* Clear all interrupt enable conditions */
12599 writel(0, phba->HCregaddr);
12600 readl(phba->HCregaddr);
12603 /* Set the driver HA work bitmap */
12604 phba->work_ha |= HA_ERATT;
12605 /* Indicate polling handles this ERATT */
12606 phba->hba_flag |= HBA_ERATT_HANDLED;
12612 /* Set the driver HS work bitmap */
12613 phba->work_hs |= UNPLUG_ERR;
12614 /* Set the driver HA work bitmap */
12615 phba->work_ha |= HA_ERATT;
12616 /* Indicate polling handles this ERATT */
12617 phba->hba_flag |= HBA_ERATT_HANDLED;
12622 * lpfc_sli4_eratt_read - read sli-4 error attention events
12623 * @phba: Pointer to HBA context.
12625 * This function is called to read the SLI4 device error attention registers
12626 * for possible error attention events. The caller must hold the hostlock
12627 * with spin_lock_irq().
12629 * This function returns 1 when there is Error Attention in the Host Attention
12630 * Register and returns 0 otherwise.
12633 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12635 uint32_t uerr_sta_hi, uerr_sta_lo;
12636 uint32_t if_type, portsmphr;
12637 struct lpfc_register portstat_reg;
12640 * For now, use the SLI4 device internal unrecoverable error
12641 * registers for error attention. This can be changed later.
12643 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12645 case LPFC_SLI_INTF_IF_TYPE_0:
12646 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12648 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12650 phba->work_hs |= UNPLUG_ERR;
12651 phba->work_ha |= HA_ERATT;
12652 phba->hba_flag |= HBA_ERATT_HANDLED;
12655 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12656 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12657 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12658 "1423 HBA Unrecoverable error: "
12659 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12660 "ue_mask_lo_reg=0x%x, "
12661 "ue_mask_hi_reg=0x%x\n",
12662 uerr_sta_lo, uerr_sta_hi,
12663 phba->sli4_hba.ue_mask_lo,
12664 phba->sli4_hba.ue_mask_hi);
12665 phba->work_status[0] = uerr_sta_lo;
12666 phba->work_status[1] = uerr_sta_hi;
12667 phba->work_ha |= HA_ERATT;
12668 phba->hba_flag |= HBA_ERATT_HANDLED;
12672 case LPFC_SLI_INTF_IF_TYPE_2:
12673 case LPFC_SLI_INTF_IF_TYPE_6:
12674 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12675 &portstat_reg.word0) ||
12676 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12678 phba->work_hs |= UNPLUG_ERR;
12679 phba->work_ha |= HA_ERATT;
12680 phba->hba_flag |= HBA_ERATT_HANDLED;
12683 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12684 phba->work_status[0] =
12685 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12686 phba->work_status[1] =
12687 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12688 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12689 "2885 Port Status Event: "
12690 "port status reg 0x%x, "
12691 "port smphr reg 0x%x, "
12692 "error 1=0x%x, error 2=0x%x\n",
12693 portstat_reg.word0,
12695 phba->work_status[0],
12696 phba->work_status[1]);
12697 phba->work_ha |= HA_ERATT;
12698 phba->hba_flag |= HBA_ERATT_HANDLED;
12702 case LPFC_SLI_INTF_IF_TYPE_1:
12704 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12705 "2886 HBA Error Attention on unsupported "
12706 "if type %d.", if_type);
12714 * lpfc_sli_check_eratt - check error attention events
12715 * @phba: Pointer to HBA context.
12717 * This function is called from timer soft interrupt context to check HBA's
12718 * error attention register bit for error attention events.
12720 * This function returns 1 when there is Error Attention in the Host Attention
12721 * Register and returns 0 otherwise.
12724 lpfc_sli_check_eratt(struct lpfc_hba *phba)
12728 /* If somebody is waiting to handle an eratt, don't process it
12729 * here. The brdkill function will do this.
12731 if (phba->link_flag & LS_IGNORE_ERATT)
12734 /* Check if interrupt handler handles this ERATT */
12735 spin_lock_irq(&phba->hbalock);
12736 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12737 /* Interrupt handler has handled ERATT */
12738 spin_unlock_irq(&phba->hbalock);
12743 * If there is deferred error attention, do not check for error
12746 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12747 spin_unlock_irq(&phba->hbalock);
12751 /* If PCI channel is offline, don't process it */
12752 if (unlikely(pci_channel_offline(phba->pcidev))) {
12753 spin_unlock_irq(&phba->hbalock);
12757 switch (phba->sli_rev) {
12758 case LPFC_SLI_REV2:
12759 case LPFC_SLI_REV3:
12760 /* Read chip Host Attention (HA) register */
12761 ha_copy = lpfc_sli_eratt_read(phba);
12763 case LPFC_SLI_REV4:
12764 /* Read device Uncoverable Error (UERR) registers */
12765 ha_copy = lpfc_sli4_eratt_read(phba);
12768 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12769 "0299 Invalid SLI revision (%d)\n",
12774 spin_unlock_irq(&phba->hbalock);
12780 * lpfc_intr_state_check - Check device state for interrupt handling
12781 * @phba: Pointer to HBA context.
12783 * This inline routine checks whether a device or its PCI slot is in a state
12784 * that the interrupt should be handled.
12786 * This function returns 0 if the device or the PCI slot is in a state that
12787 * interrupt should be handled, otherwise -EIO.
12790 lpfc_intr_state_check(struct lpfc_hba *phba)
12792 /* If the pci channel is offline, ignore all the interrupts */
12793 if (unlikely(pci_channel_offline(phba->pcidev)))
12796 /* Update device level interrupt statistics */
12797 phba->sli.slistat.sli_intr++;
12799 /* Ignore all interrupts during initialization. */
12800 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12807 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
12808 * @irq: Interrupt number.
12809 * @dev_id: The device context pointer.
12811 * This function is directly called from the PCI layer as an interrupt
12812 * service routine when device with SLI-3 interface spec is enabled with
12813 * MSI-X multi-message interrupt mode and there are slow-path events in
12814 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12815 * interrupt mode, this function is called as part of the device-level
12816 * interrupt handler. When the PCI slot is in error recovery or the HBA
12817 * is undergoing initialization, the interrupt handler will not process
12818 * the interrupt. The link attention and ELS ring attention events are
12819 * handled by the worker thread. The interrupt handler signals the worker
12820 * thread and returns for these events. This function is called without
12821 * any lock held. It gets the hbalock to access and update SLI data
12824 * This function returns IRQ_HANDLED when interrupt is handled else it
12825 * returns IRQ_NONE.
12828 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12830 struct lpfc_hba *phba;
12831 uint32_t ha_copy, hc_copy;
12832 uint32_t work_ha_copy;
12833 unsigned long status;
12834 unsigned long iflag;
12837 MAILBOX_t *mbox, *pmbox;
12838 struct lpfc_vport *vport;
12839 struct lpfc_nodelist *ndlp;
12840 struct lpfc_dmabuf *mp;
12845 * Get the driver's phba structure from the dev_id and
12846 * assume the HBA is not interrupting.
12848 phba = (struct lpfc_hba *)dev_id;
12850 if (unlikely(!phba))
12854 * Stuff needs to be attented to when this function is invoked as an
12855 * individual interrupt handler in MSI-X multi-message interrupt mode
12857 if (phba->intr_type == MSIX) {
12858 /* Check device state for handling interrupt */
12859 if (lpfc_intr_state_check(phba))
12861 /* Need to read HA REG for slow-path events */
12862 spin_lock_irqsave(&phba->hbalock, iflag);
12863 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12865 /* If somebody is waiting to handle an eratt don't process it
12866 * here. The brdkill function will do this.
12868 if (phba->link_flag & LS_IGNORE_ERATT)
12869 ha_copy &= ~HA_ERATT;
12870 /* Check the need for handling ERATT in interrupt handler */
12871 if (ha_copy & HA_ERATT) {
12872 if (phba->hba_flag & HBA_ERATT_HANDLED)
12873 /* ERATT polling has handled ERATT */
12874 ha_copy &= ~HA_ERATT;
12876 /* Indicate interrupt handler handles ERATT */
12877 phba->hba_flag |= HBA_ERATT_HANDLED;
12881 * If there is deferred error attention, do not check for any
12884 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12885 spin_unlock_irqrestore(&phba->hbalock, iflag);
12889 /* Clear up only attention source related to slow-path */
12890 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12893 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12894 HC_LAINT_ENA | HC_ERINT_ENA),
12896 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12898 writel(hc_copy, phba->HCregaddr);
12899 readl(phba->HAregaddr); /* flush */
12900 spin_unlock_irqrestore(&phba->hbalock, iflag);
12902 ha_copy = phba->ha_copy;
12904 work_ha_copy = ha_copy & phba->work_ha_mask;
12906 if (work_ha_copy) {
12907 if (work_ha_copy & HA_LATT) {
12908 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12910 * Turn off Link Attention interrupts
12911 * until CLEAR_LA done
12913 spin_lock_irqsave(&phba->hbalock, iflag);
12914 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12915 if (lpfc_readl(phba->HCregaddr, &control))
12917 control &= ~HC_LAINT_ENA;
12918 writel(control, phba->HCregaddr);
12919 readl(phba->HCregaddr); /* flush */
12920 spin_unlock_irqrestore(&phba->hbalock, iflag);
12923 work_ha_copy &= ~HA_LATT;
12926 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12928 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12929 * the only slow ring.
12931 status = (work_ha_copy &
12932 (HA_RXMASK << (4*LPFC_ELS_RING)));
12933 status >>= (4*LPFC_ELS_RING);
12934 if (status & HA_RXMASK) {
12935 spin_lock_irqsave(&phba->hbalock, iflag);
12936 if (lpfc_readl(phba->HCregaddr, &control))
12939 lpfc_debugfs_slow_ring_trc(phba,
12940 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12942 (uint32_t)phba->sli.slistat.sli_intr);
12944 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12945 lpfc_debugfs_slow_ring_trc(phba,
12946 "ISR Disable ring:"
12947 "pwork:x%x hawork:x%x wait:x%x",
12948 phba->work_ha, work_ha_copy,
12949 (uint32_t)((unsigned long)
12950 &phba->work_waitq));
12953 ~(HC_R0INT_ENA << LPFC_ELS_RING);
12954 writel(control, phba->HCregaddr);
12955 readl(phba->HCregaddr); /* flush */
12958 lpfc_debugfs_slow_ring_trc(phba,
12959 "ISR slow ring: pwork:"
12960 "x%x hawork:x%x wait:x%x",
12961 phba->work_ha, work_ha_copy,
12962 (uint32_t)((unsigned long)
12963 &phba->work_waitq));
12965 spin_unlock_irqrestore(&phba->hbalock, iflag);
12968 spin_lock_irqsave(&phba->hbalock, iflag);
12969 if (work_ha_copy & HA_ERATT) {
12970 if (lpfc_sli_read_hs(phba))
12973 * Check if there is a deferred error condition
12976 if ((HS_FFER1 & phba->work_hs) &&
12977 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12978 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12980 phba->hba_flag |= DEFER_ERATT;
12981 /* Clear all interrupt enable conditions */
12982 writel(0, phba->HCregaddr);
12983 readl(phba->HCregaddr);
12987 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12988 pmb = phba->sli.mbox_active;
12989 pmbox = &pmb->u.mb;
12991 vport = pmb->vport;
12993 /* First check out the status word */
12994 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12995 if (pmbox->mbxOwner != OWN_HOST) {
12996 spin_unlock_irqrestore(&phba->hbalock, iflag);
12998 * Stray Mailbox Interrupt, mbxCommand <cmd>
12999 * mbxStatus <status>
13001 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13002 "(%d):0304 Stray Mailbox "
13003 "Interrupt mbxCommand x%x "
13005 (vport ? vport->vpi : 0),
13008 /* clear mailbox attention bit */
13009 work_ha_copy &= ~HA_MBATT;
13011 phba->sli.mbox_active = NULL;
13012 spin_unlock_irqrestore(&phba->hbalock, iflag);
13013 phba->last_completion_time = jiffies;
13014 del_timer(&phba->sli.mbox_tmo);
13015 if (pmb->mbox_cmpl) {
13016 lpfc_sli_pcimem_bcopy(mbox, pmbox,
13018 if (pmb->out_ext_byte_len &&
13020 lpfc_sli_pcimem_bcopy(
13023 pmb->out_ext_byte_len);
13025 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13026 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13028 lpfc_debugfs_disc_trc(vport,
13029 LPFC_DISC_TRC_MBOX_VPORT,
13030 "MBOX dflt rpi: : "
13031 "status:x%x rpi:x%x",
13032 (uint32_t)pmbox->mbxStatus,
13033 pmbox->un.varWords[0], 0);
13035 if (!pmbox->mbxStatus) {
13036 mp = (struct lpfc_dmabuf *)
13038 ndlp = (struct lpfc_nodelist *)
13041 /* Reg_LOGIN of dflt RPI was
13042 * successful. new lets get
13043 * rid of the RPI using the
13044 * same mbox buffer.
13046 lpfc_unreg_login(phba,
13048 pmbox->un.varWords[0],
13051 lpfc_mbx_cmpl_dflt_rpi;
13053 pmb->ctx_ndlp = ndlp;
13054 pmb->vport = vport;
13055 rc = lpfc_sli_issue_mbox(phba,
13058 if (rc != MBX_BUSY)
13059 lpfc_printf_log(phba,
13062 "0350 rc should have"
13063 "been MBX_BUSY\n");
13064 if (rc != MBX_NOT_FINISHED)
13065 goto send_current_mbox;
13069 &phba->pport->work_port_lock,
13071 phba->pport->work_port_events &=
13073 spin_unlock_irqrestore(
13074 &phba->pport->work_port_lock,
13077 /* Do NOT queue MBX_HEARTBEAT to the worker
13078 * thread for processing.
13080 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13081 /* Process mbox now */
13082 phba->sli.mbox_active = NULL;
13083 phba->sli.sli_flag &=
13084 ~LPFC_SLI_MBOX_ACTIVE;
13085 if (pmb->mbox_cmpl)
13086 pmb->mbox_cmpl(phba, pmb);
13088 /* Queue to worker thread to process */
13089 lpfc_mbox_cmpl_put(phba, pmb);
13093 spin_unlock_irqrestore(&phba->hbalock, iflag);
13095 if ((work_ha_copy & HA_MBATT) &&
13096 (phba->sli.mbox_active == NULL)) {
13098 /* Process next mailbox command if there is one */
13100 rc = lpfc_sli_issue_mbox(phba, NULL,
13102 } while (rc == MBX_NOT_FINISHED);
13103 if (rc != MBX_SUCCESS)
13104 lpfc_printf_log(phba, KERN_ERR,
13106 "0349 rc should be "
13110 spin_lock_irqsave(&phba->hbalock, iflag);
13111 phba->work_ha |= work_ha_copy;
13112 spin_unlock_irqrestore(&phba->hbalock, iflag);
13113 lpfc_worker_wake_up(phba);
13115 return IRQ_HANDLED;
13117 spin_unlock_irqrestore(&phba->hbalock, iflag);
13118 return IRQ_HANDLED;
13120 } /* lpfc_sli_sp_intr_handler */
13123 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
13124 * @irq: Interrupt number.
13125 * @dev_id: The device context pointer.
13127 * This function is directly called from the PCI layer as an interrupt
13128 * service routine when device with SLI-3 interface spec is enabled with
13129 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13130 * ring event in the HBA. However, when the device is enabled with either
13131 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13132 * device-level interrupt handler. When the PCI slot is in error recovery
13133 * or the HBA is undergoing initialization, the interrupt handler will not
13134 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13135 * the intrrupt context. This function is called without any lock held.
13136 * It gets the hbalock to access and update SLI data structures.
13138 * This function returns IRQ_HANDLED when interrupt is handled else it
13139 * returns IRQ_NONE.
13142 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
13144 struct lpfc_hba *phba;
13146 unsigned long status;
13147 unsigned long iflag;
13148 struct lpfc_sli_ring *pring;
13150 /* Get the driver's phba structure from the dev_id and
13151 * assume the HBA is not interrupting.
13153 phba = (struct lpfc_hba *) dev_id;
13155 if (unlikely(!phba))
13159 * Stuff needs to be attented to when this function is invoked as an
13160 * individual interrupt handler in MSI-X multi-message interrupt mode
13162 if (phba->intr_type == MSIX) {
13163 /* Check device state for handling interrupt */
13164 if (lpfc_intr_state_check(phba))
13166 /* Need to read HA REG for FCP ring and other ring events */
13167 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13168 return IRQ_HANDLED;
13169 /* Clear up only attention source related to fast-path */
13170 spin_lock_irqsave(&phba->hbalock, iflag);
13172 * If there is deferred error attention, do not check for
13175 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13176 spin_unlock_irqrestore(&phba->hbalock, iflag);
13179 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
13181 readl(phba->HAregaddr); /* flush */
13182 spin_unlock_irqrestore(&phba->hbalock, iflag);
13184 ha_copy = phba->ha_copy;
13187 * Process all events on FCP ring. Take the optimized path for FCP IO.
13189 ha_copy &= ~(phba->work_ha_mask);
13191 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13192 status >>= (4*LPFC_FCP_RING);
13193 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
13194 if (status & HA_RXMASK)
13195 lpfc_sli_handle_fast_ring_event(phba, pring, status);
13197 if (phba->cfg_multi_ring_support == 2) {
13199 * Process all events on extra ring. Take the optimized path
13200 * for extra ring IO.
13202 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13203 status >>= (4*LPFC_EXTRA_RING);
13204 if (status & HA_RXMASK) {
13205 lpfc_sli_handle_fast_ring_event(phba,
13206 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
13210 return IRQ_HANDLED;
13211 } /* lpfc_sli_fp_intr_handler */
13214 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
13215 * @irq: Interrupt number.
13216 * @dev_id: The device context pointer.
13218 * This function is the HBA device-level interrupt handler to device with
13219 * SLI-3 interface spec, called from the PCI layer when either MSI or
13220 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
13221 * requires driver attention. This function invokes the slow-path interrupt
13222 * attention handling function and fast-path interrupt attention handling
13223 * function in turn to process the relevant HBA attention events. This
13224 * function is called without any lock held. It gets the hbalock to access
13225 * and update SLI data structures.
13227 * This function returns IRQ_HANDLED when interrupt is handled, else it
13228 * returns IRQ_NONE.
13231 lpfc_sli_intr_handler(int irq, void *dev_id)
13233 struct lpfc_hba *phba;
13234 irqreturn_t sp_irq_rc, fp_irq_rc;
13235 unsigned long status1, status2;
13239 * Get the driver's phba structure from the dev_id and
13240 * assume the HBA is not interrupting.
13242 phba = (struct lpfc_hba *) dev_id;
13244 if (unlikely(!phba))
13247 /* Check device state for handling interrupt */
13248 if (lpfc_intr_state_check(phba))
13251 spin_lock(&phba->hbalock);
13252 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
13253 spin_unlock(&phba->hbalock);
13254 return IRQ_HANDLED;
13257 if (unlikely(!phba->ha_copy)) {
13258 spin_unlock(&phba->hbalock);
13260 } else if (phba->ha_copy & HA_ERATT) {
13261 if (phba->hba_flag & HBA_ERATT_HANDLED)
13262 /* ERATT polling has handled ERATT */
13263 phba->ha_copy &= ~HA_ERATT;
13265 /* Indicate interrupt handler handles ERATT */
13266 phba->hba_flag |= HBA_ERATT_HANDLED;
13270 * If there is deferred error attention, do not check for any interrupt.
13272 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13273 spin_unlock(&phba->hbalock);
13277 /* Clear attention sources except link and error attentions */
13278 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
13279 spin_unlock(&phba->hbalock);
13280 return IRQ_HANDLED;
13282 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
13283 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
13285 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
13286 writel(hc_copy, phba->HCregaddr);
13287 readl(phba->HAregaddr); /* flush */
13288 spin_unlock(&phba->hbalock);
13291 * Invokes slow-path host attention interrupt handling as appropriate.
13294 /* status of events with mailbox and link attention */
13295 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
13297 /* status of events with ELS ring */
13298 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
13299 status2 >>= (4*LPFC_ELS_RING);
13301 if (status1 || (status2 & HA_RXMASK))
13302 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
13304 sp_irq_rc = IRQ_NONE;
13307 * Invoke fast-path host attention interrupt handling as appropriate.
13310 /* status of events with FCP ring */
13311 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13312 status1 >>= (4*LPFC_FCP_RING);
13314 /* status of events with extra ring */
13315 if (phba->cfg_multi_ring_support == 2) {
13316 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13317 status2 >>= (4*LPFC_EXTRA_RING);
13321 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
13322 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
13324 fp_irq_rc = IRQ_NONE;
13326 /* Return device-level interrupt handling status */
13327 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
13328 } /* lpfc_sli_intr_handler */
13331 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
13332 * @phba: pointer to lpfc hba data structure.
13334 * This routine is invoked by the worker thread to process all the pending
13335 * SLI4 els abort xri events.
13337 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
13339 struct lpfc_cq_event *cq_event;
13340 unsigned long iflags;
13342 /* First, declare the els xri abort event has been handled */
13343 spin_lock_irqsave(&phba->hbalock, iflags);
13344 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
13345 spin_unlock_irqrestore(&phba->hbalock, iflags);
13347 /* Now, handle all the els xri abort events */
13348 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13349 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
13350 /* Get the first event from the head of the event queue */
13351 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
13352 cq_event, struct lpfc_cq_event, list);
13353 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13355 /* Notify aborted XRI for ELS work queue */
13356 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
13358 /* Free the event processed back to the free pool */
13359 lpfc_sli4_cq_event_release(phba, cq_event);
13360 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13363 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13367 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
13368 * @phba: pointer to lpfc hba data structure
13369 * @pIocbIn: pointer to the rspiocbq
13370 * @pIocbOut: pointer to the cmdiocbq
13371 * @wcqe: pointer to the complete wcqe
13373 * This routine transfers the fields of a command iocbq to a response iocbq
13374 * by copying all the IOCB fields from command iocbq and transferring the
13375 * completion status information from the complete wcqe.
13378 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
13379 struct lpfc_iocbq *pIocbIn,
13380 struct lpfc_iocbq *pIocbOut,
13381 struct lpfc_wcqe_complete *wcqe)
13384 unsigned long iflags;
13385 uint32_t status, max_response;
13386 struct lpfc_dmabuf *dmabuf;
13387 struct ulp_bde64 *bpl, bde;
13388 size_t offset = offsetof(struct lpfc_iocbq, iocb);
13390 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
13391 sizeof(struct lpfc_iocbq) - offset);
13392 /* Map WCQE parameters into irspiocb parameters */
13393 status = bf_get(lpfc_wcqe_c_status, wcqe);
13394 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
13395 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
13396 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
13397 pIocbIn->iocb.un.fcpi.fcpi_parm =
13398 pIocbOut->iocb.un.fcpi.fcpi_parm -
13399 wcqe->total_data_placed;
13401 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13403 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13404 switch (pIocbOut->iocb.ulpCommand) {
13405 case CMD_ELS_REQUEST64_CR:
13406 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13407 bpl = (struct ulp_bde64 *)dmabuf->virt;
13408 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
13409 max_response = bde.tus.f.bdeSize;
13411 case CMD_GEN_REQUEST64_CR:
13413 if (!pIocbOut->context3)
13415 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
13416 sizeof(struct ulp_bde64);
13417 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13418 bpl = (struct ulp_bde64 *)dmabuf->virt;
13419 for (i = 0; i < numBdes; i++) {
13420 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
13421 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
13422 max_response += bde.tus.f.bdeSize;
13426 max_response = wcqe->total_data_placed;
13429 if (max_response < wcqe->total_data_placed)
13430 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
13432 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
13433 wcqe->total_data_placed;
13436 /* Convert BG errors for completion status */
13437 if (status == CQE_STATUS_DI_ERROR) {
13438 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
13440 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
13441 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
13443 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
13445 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
13446 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
13447 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13448 BGS_GUARD_ERR_MASK;
13449 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
13450 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13451 BGS_APPTAG_ERR_MASK;
13452 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
13453 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13454 BGS_REFTAG_ERR_MASK;
13456 /* Check to see if there was any good data before the error */
13457 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13458 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13459 BGS_HI_WATER_MARK_PRESENT_MASK;
13460 pIocbIn->iocb.unsli3.sli3_bg.bghm =
13461 wcqe->total_data_placed;
13465 * Set ALL the error bits to indicate we don't know what
13466 * type of error it is.
13468 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13469 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13470 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13471 BGS_GUARD_ERR_MASK);
13474 /* Pick up HBA exchange busy condition */
13475 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13476 spin_lock_irqsave(&phba->hbalock, iflags);
13477 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13478 spin_unlock_irqrestore(&phba->hbalock, iflags);
13483 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13484 * @phba: Pointer to HBA context object.
13485 * @irspiocbq: Pointer to work-queue completion queue entry.
13487 * This routine handles an ELS work-queue completion event and construct
13488 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13489 * discovery engine to handle.
13491 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13493 static struct lpfc_iocbq *
13494 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13495 struct lpfc_iocbq *irspiocbq)
13497 struct lpfc_sli_ring *pring;
13498 struct lpfc_iocbq *cmdiocbq;
13499 struct lpfc_wcqe_complete *wcqe;
13500 unsigned long iflags;
13502 pring = lpfc_phba_elsring(phba);
13503 if (unlikely(!pring))
13506 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13507 pring->stats.iocb_event++;
13508 /* Look up the ELS command IOCB and create pseudo response IOCB */
13509 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13510 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13511 if (unlikely(!cmdiocbq)) {
13512 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13513 "0386 ELS complete with no corresponding "
13514 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13515 wcqe->word0, wcqe->total_data_placed,
13516 wcqe->parameter, wcqe->word3);
13517 lpfc_sli_release_iocbq(phba, irspiocbq);
13521 spin_lock_irqsave(&pring->ring_lock, iflags);
13522 /* Put the iocb back on the txcmplq */
13523 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13524 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13526 /* Fake the irspiocbq and copy necessary response information */
13527 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13532 inline struct lpfc_cq_event *
13533 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13535 struct lpfc_cq_event *cq_event;
13537 /* Allocate a new internal CQ_EVENT entry */
13538 cq_event = lpfc_sli4_cq_event_alloc(phba);
13540 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13541 "0602 Failed to alloc CQ_EVENT entry\n");
13545 /* Move the CQE into the event */
13546 memcpy(&cq_event->cqe, entry, size);
13551 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
13552 * @phba: Pointer to HBA context object.
13553 * @mcqe: Pointer to mailbox completion queue entry.
13555 * This routine process a mailbox completion queue entry with asynchronous
13558 * Return: true if work posted to worker thread, otherwise false.
13561 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13563 struct lpfc_cq_event *cq_event;
13564 unsigned long iflags;
13566 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13567 "0392 Async Event: word0:x%x, word1:x%x, "
13568 "word2:x%x, word3:x%x\n", mcqe->word0,
13569 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13571 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13575 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
13576 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13577 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
13579 /* Set the async event flag */
13580 spin_lock_irqsave(&phba->hbalock, iflags);
13581 phba->hba_flag |= ASYNC_EVENT;
13582 spin_unlock_irqrestore(&phba->hbalock, iflags);
13588 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13589 * @phba: Pointer to HBA context object.
13590 * @mcqe: Pointer to mailbox completion queue entry.
13592 * This routine process a mailbox completion queue entry with mailbox
13593 * completion event.
13595 * Return: true if work posted to worker thread, otherwise false.
13598 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13600 uint32_t mcqe_status;
13601 MAILBOX_t *mbox, *pmbox;
13602 struct lpfc_mqe *mqe;
13603 struct lpfc_vport *vport;
13604 struct lpfc_nodelist *ndlp;
13605 struct lpfc_dmabuf *mp;
13606 unsigned long iflags;
13608 bool workposted = false;
13611 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13612 if (!bf_get(lpfc_trailer_completed, mcqe))
13613 goto out_no_mqe_complete;
13615 /* Get the reference to the active mbox command */
13616 spin_lock_irqsave(&phba->hbalock, iflags);
13617 pmb = phba->sli.mbox_active;
13618 if (unlikely(!pmb)) {
13619 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13620 "1832 No pending MBOX command to handle\n");
13621 spin_unlock_irqrestore(&phba->hbalock, iflags);
13622 goto out_no_mqe_complete;
13624 spin_unlock_irqrestore(&phba->hbalock, iflags);
13626 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13628 vport = pmb->vport;
13630 /* Reset heartbeat timer */
13631 phba->last_completion_time = jiffies;
13632 del_timer(&phba->sli.mbox_tmo);
13634 /* Move mbox data to caller's mailbox region, do endian swapping */
13635 if (pmb->mbox_cmpl && mbox)
13636 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13639 * For mcqe errors, conditionally move a modified error code to
13640 * the mbox so that the error will not be missed.
13642 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13643 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13644 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13645 bf_set(lpfc_mqe_status, mqe,
13646 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13648 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13649 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13650 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13651 "MBOX dflt rpi: status:x%x rpi:x%x",
13653 pmbox->un.varWords[0], 0);
13654 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13655 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13656 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13658 /* Reg_LOGIN of dflt RPI was successful. Mark the
13659 * node as having an UNREG_LOGIN in progress to stop
13660 * an unsolicited PLOGI from the same NPortId from
13661 * starting another mailbox transaction.
13663 spin_lock_irqsave(&ndlp->lock, iflags);
13664 ndlp->nlp_flag |= NLP_UNREG_INP;
13665 spin_unlock_irqrestore(&ndlp->lock, iflags);
13666 lpfc_unreg_login(phba, vport->vpi,
13667 pmbox->un.varWords[0], pmb);
13668 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13671 /* No reference taken here. This is a default
13672 * RPI reg/immediate unreg cycle. The reference was
13673 * taken in the reg rpi path and is released when
13674 * this mailbox completes.
13676 pmb->ctx_ndlp = ndlp;
13677 pmb->vport = vport;
13678 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13679 if (rc != MBX_BUSY)
13680 lpfc_printf_log(phba, KERN_ERR,
13683 "have been MBX_BUSY\n");
13684 if (rc != MBX_NOT_FINISHED)
13685 goto send_current_mbox;
13688 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13689 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13690 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13692 /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
13693 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13694 spin_lock_irqsave(&phba->hbalock, iflags);
13695 /* Release the mailbox command posting token */
13696 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13697 phba->sli.mbox_active = NULL;
13698 if (bf_get(lpfc_trailer_consumed, mcqe))
13699 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13700 spin_unlock_irqrestore(&phba->hbalock, iflags);
13702 /* Post the next mbox command, if there is one */
13703 lpfc_sli4_post_async_mbox(phba);
13705 /* Process cmpl now */
13706 if (pmb->mbox_cmpl)
13707 pmb->mbox_cmpl(phba, pmb);
13711 /* There is mailbox completion work to queue to the worker thread */
13712 spin_lock_irqsave(&phba->hbalock, iflags);
13713 __lpfc_mbox_cmpl_put(phba, pmb);
13714 phba->work_ha |= HA_MBATT;
13715 spin_unlock_irqrestore(&phba->hbalock, iflags);
13719 spin_lock_irqsave(&phba->hbalock, iflags);
13720 /* Release the mailbox command posting token */
13721 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13722 /* Setting active mailbox pointer need to be in sync to flag clear */
13723 phba->sli.mbox_active = NULL;
13724 if (bf_get(lpfc_trailer_consumed, mcqe))
13725 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13726 spin_unlock_irqrestore(&phba->hbalock, iflags);
13727 /* Wake up worker thread to post the next pending mailbox command */
13728 lpfc_worker_wake_up(phba);
13731 out_no_mqe_complete:
13732 spin_lock_irqsave(&phba->hbalock, iflags);
13733 if (bf_get(lpfc_trailer_consumed, mcqe))
13734 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13735 spin_unlock_irqrestore(&phba->hbalock, iflags);
13740 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13741 * @phba: Pointer to HBA context object.
13742 * @cq: Pointer to associated CQ
13743 * @cqe: Pointer to mailbox completion queue entry.
13745 * This routine process a mailbox completion queue entry, it invokes the
13746 * proper mailbox complete handling or asynchronous event handling routine
13747 * according to the MCQE's async bit.
13749 * Return: true if work posted to worker thread, otherwise false.
13752 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13753 struct lpfc_cqe *cqe)
13755 struct lpfc_mcqe mcqe;
13760 /* Copy the mailbox MCQE and convert endian order as needed */
13761 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13763 /* Invoke the proper event handling routine */
13764 if (!bf_get(lpfc_trailer_async, &mcqe))
13765 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13767 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13772 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13773 * @phba: Pointer to HBA context object.
13774 * @cq: Pointer to associated CQ
13775 * @wcqe: Pointer to work-queue completion queue entry.
13777 * This routine handles an ELS work-queue completion event.
13779 * Return: true if work posted to worker thread, otherwise false.
13782 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13783 struct lpfc_wcqe_complete *wcqe)
13785 struct lpfc_iocbq *irspiocbq;
13786 unsigned long iflags;
13787 struct lpfc_sli_ring *pring = cq->pring;
13789 int txcmplq_cnt = 0;
13791 /* Check for response status */
13792 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13793 /* Log the error status */
13794 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13795 "0357 ELS CQE error: status=x%x: "
13796 "CQE: %08x %08x %08x %08x\n",
13797 bf_get(lpfc_wcqe_c_status, wcqe),
13798 wcqe->word0, wcqe->total_data_placed,
13799 wcqe->parameter, wcqe->word3);
13802 /* Get an irspiocbq for later ELS response processing use */
13803 irspiocbq = lpfc_sli_get_iocbq(phba);
13805 if (!list_empty(&pring->txq))
13807 if (!list_empty(&pring->txcmplq))
13809 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13810 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13811 "els_txcmplq_cnt=%d\n",
13812 txq_cnt, phba->iocb_cnt,
13817 /* Save off the slow-path queue event for work thread to process */
13818 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13819 spin_lock_irqsave(&phba->hbalock, iflags);
13820 list_add_tail(&irspiocbq->cq_event.list,
13821 &phba->sli4_hba.sp_queue_event);
13822 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13823 spin_unlock_irqrestore(&phba->hbalock, iflags);
13829 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13830 * @phba: Pointer to HBA context object.
13831 * @wcqe: Pointer to work-queue completion queue entry.
13833 * This routine handles slow-path WQ entry consumed event by invoking the
13834 * proper WQ release routine to the slow-path WQ.
13837 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13838 struct lpfc_wcqe_release *wcqe)
13840 /* sanity check on queue memory */
13841 if (unlikely(!phba->sli4_hba.els_wq))
13843 /* Check for the slow-path ELS work queue */
13844 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13845 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13846 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13848 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13849 "2579 Slow-path wqe consume event carries "
13850 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13851 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13852 phba->sli4_hba.els_wq->queue_id);
13856 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13857 * @phba: Pointer to HBA context object.
13858 * @cq: Pointer to a WQ completion queue.
13859 * @wcqe: Pointer to work-queue completion queue entry.
13861 * This routine handles an XRI abort event.
13863 * Return: true if work posted to worker thread, otherwise false.
13866 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13867 struct lpfc_queue *cq,
13868 struct sli4_wcqe_xri_aborted *wcqe)
13870 bool workposted = false;
13871 struct lpfc_cq_event *cq_event;
13872 unsigned long iflags;
13874 switch (cq->subtype) {
13876 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
13877 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13878 /* Notify aborted XRI for NVME work queue */
13879 if (phba->nvmet_support)
13880 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13882 workposted = false;
13884 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
13886 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
13888 workposted = false;
13891 cq_event->hdwq = cq->hdwq;
13892 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13894 list_add_tail(&cq_event->list,
13895 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13896 /* Set the els xri abort event flag */
13897 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13898 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13903 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13904 "0603 Invalid CQ subtype %d: "
13905 "%08x %08x %08x %08x\n",
13906 cq->subtype, wcqe->word0, wcqe->parameter,
13907 wcqe->word2, wcqe->word3);
13908 workposted = false;
13914 #define FC_RCTL_MDS_DIAGS 0xF4
13917 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13918 * @phba: Pointer to HBA context object.
13919 * @rcqe: Pointer to receive-queue completion queue entry.
13921 * This routine process a receive-queue completion queue entry.
13923 * Return: true if work posted to worker thread, otherwise false.
13926 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13928 bool workposted = false;
13929 struct fc_frame_header *fc_hdr;
13930 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13931 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13932 struct lpfc_nvmet_tgtport *tgtp;
13933 struct hbq_dmabuf *dma_buf;
13934 uint32_t status, rq_id;
13935 unsigned long iflags;
13937 /* sanity check on queue memory */
13938 if (unlikely(!hrq) || unlikely(!drq))
13941 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13942 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13944 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13945 if (rq_id != hrq->queue_id)
13948 status = bf_get(lpfc_rcqe_status, rcqe);
13950 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13951 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13952 "2537 Receive Frame Truncated!!\n");
13954 case FC_STATUS_RQ_SUCCESS:
13955 spin_lock_irqsave(&phba->hbalock, iflags);
13956 lpfc_sli4_rq_release(hrq, drq);
13957 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13959 hrq->RQ_no_buf_found++;
13960 spin_unlock_irqrestore(&phba->hbalock, iflags);
13964 hrq->RQ_buf_posted--;
13965 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13967 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13969 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13970 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13971 spin_unlock_irqrestore(&phba->hbalock, iflags);
13972 /* Handle MDS Loopback frames */
13973 if (!(phba->pport->load_flag & FC_UNLOADING))
13974 lpfc_sli4_handle_mds_loopback(phba->pport,
13977 lpfc_in_buf_free(phba, &dma_buf->dbuf);
13981 /* save off the frame for the work thread to process */
13982 list_add_tail(&dma_buf->cq_event.list,
13983 &phba->sli4_hba.sp_queue_event);
13984 /* Frame received */
13985 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13986 spin_unlock_irqrestore(&phba->hbalock, iflags);
13989 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13990 if (phba->nvmet_support) {
13991 tgtp = phba->targetport->private;
13992 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13993 "6402 RQE Error x%x, posted %d err_cnt "
13995 status, hrq->RQ_buf_posted,
13996 hrq->RQ_no_posted_buf,
13997 atomic_read(&tgtp->rcv_fcp_cmd_in),
13998 atomic_read(&tgtp->rcv_fcp_cmd_out),
13999 atomic_read(&tgtp->xmt_fcp_release));
14003 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14004 hrq->RQ_no_posted_buf++;
14005 /* Post more buffers if possible */
14006 spin_lock_irqsave(&phba->hbalock, iflags);
14007 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
14008 spin_unlock_irqrestore(&phba->hbalock, iflags);
14017 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
14018 * @phba: Pointer to HBA context object.
14019 * @cq: Pointer to the completion queue.
14020 * @cqe: Pointer to a completion queue entry.
14022 * This routine process a slow-path work-queue or receive queue completion queue
14025 * Return: true if work posted to worker thread, otherwise false.
14028 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14029 struct lpfc_cqe *cqe)
14031 struct lpfc_cqe cqevt;
14032 bool workposted = false;
14034 /* Copy the work queue CQE and convert endian order if needed */
14035 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
14037 /* Check and process for different type of WCQE and dispatch */
14038 switch (bf_get(lpfc_cqe_code, &cqevt)) {
14039 case CQE_CODE_COMPL_WQE:
14040 /* Process the WQ/RQ complete event */
14041 phba->last_completion_time = jiffies;
14042 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
14043 (struct lpfc_wcqe_complete *)&cqevt);
14045 case CQE_CODE_RELEASE_WQE:
14046 /* Process the WQ release event */
14047 lpfc_sli4_sp_handle_rel_wcqe(phba,
14048 (struct lpfc_wcqe_release *)&cqevt);
14050 case CQE_CODE_XRI_ABORTED:
14051 /* Process the WQ XRI abort event */
14052 phba->last_completion_time = jiffies;
14053 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14054 (struct sli4_wcqe_xri_aborted *)&cqevt);
14056 case CQE_CODE_RECEIVE:
14057 case CQE_CODE_RECEIVE_V1:
14058 /* Process the RQ event */
14059 phba->last_completion_time = jiffies;
14060 workposted = lpfc_sli4_sp_handle_rcqe(phba,
14061 (struct lpfc_rcqe *)&cqevt);
14064 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14065 "0388 Not a valid WCQE code: x%x\n",
14066 bf_get(lpfc_cqe_code, &cqevt));
14073 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
14074 * @phba: Pointer to HBA context object.
14075 * @eqe: Pointer to fast-path event queue entry.
14076 * @speq: Pointer to slow-path event queue.
14078 * This routine process a event queue entry from the slow-path event queue.
14079 * It will check the MajorCode and MinorCode to determine this is for a
14080 * completion event on a completion queue, if not, an error shall be logged
14081 * and just return. Otherwise, it will get to the corresponding completion
14082 * queue and process all the entries on that completion queue, rearm the
14083 * completion queue, and then return.
14087 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14088 struct lpfc_queue *speq)
14090 struct lpfc_queue *cq = NULL, *childq;
14094 /* Get the reference to the corresponding CQ */
14095 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14097 list_for_each_entry(childq, &speq->child_list, list) {
14098 if (childq->queue_id == cqid) {
14103 if (unlikely(!cq)) {
14104 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14105 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14106 "0365 Slow-path CQ identifier "
14107 "(%d) does not exist\n", cqid);
14111 /* Save EQ associated with this CQ */
14112 cq->assoc_qp = speq;
14114 if (is_kdump_kernel())
14115 ret = queue_work(phba->wq, &cq->spwork);
14117 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14120 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14121 "0390 Cannot schedule queue work "
14122 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14123 cqid, cq->queue_id, raw_smp_processor_id());
14127 * __lpfc_sli4_process_cq - Process elements of a CQ
14128 * @phba: Pointer to HBA context object.
14129 * @cq: Pointer to CQ to be processed
14130 * @handler: Routine to process each cqe
14131 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
14132 * @poll_mode: Polling mode we were called from
14134 * This routine processes completion queue entries in a CQ. While a valid
14135 * queue element is found, the handler is called. During processing checks
14136 * are made for periodic doorbell writes to let the hardware know of
14137 * element consumption.
14139 * If the max limit on cqes to process is hit, or there are no more valid
14140 * entries, the loop stops. If we processed a sufficient number of elements,
14141 * meaning there is sufficient load, rather than rearming and generating
14142 * another interrupt, a cq rescheduling delay will be set. A delay of 0
14143 * indicates no rescheduling.
14145 * Returns True if work scheduled, False otherwise.
14148 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14149 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
14150 struct lpfc_cqe *), unsigned long *delay,
14151 enum lpfc_poll_mode poll_mode)
14153 struct lpfc_cqe *cqe;
14154 bool workposted = false;
14155 int count = 0, consumed = 0;
14158 /* default - no reschedule */
14161 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14162 goto rearm_and_exit;
14164 /* Process all the entries to the CQ */
14166 cqe = lpfc_sli4_cq_get(cq);
14168 workposted |= handler(phba, cq, cqe);
14169 __lpfc_sli4_consume_cqe(phba, cq, cqe);
14172 if (!(++count % cq->max_proc_limit))
14175 if (!(count % cq->notify_interval)) {
14176 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14179 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
14182 if (count == LPFC_NVMET_CQ_NOTIFY)
14183 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14185 cqe = lpfc_sli4_cq_get(cq);
14187 if (count >= phba->cfg_cq_poll_threshold) {
14192 /* Note: complete the irq_poll softirq before rearming CQ */
14193 if (poll_mode == LPFC_IRQ_POLL)
14194 irq_poll_complete(&cq->iop);
14196 /* Track the max number of CQEs processed in 1 EQ */
14197 if (count > cq->CQ_max_cqe)
14198 cq->CQ_max_cqe = count;
14200 cq->assoc_qp->EQ_cqe_cnt += count;
14202 /* Catch the no cq entry condition */
14203 if (unlikely(count == 0))
14204 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14205 "0369 No entry from completion queue "
14206 "qid=%d\n", cq->queue_id);
14208 xchg(&cq->queue_claimed, 0);
14211 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14212 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14218 * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
14219 * @cq: pointer to CQ to process
14221 * This routine calls the cq processing routine with a handler specific
14222 * to the type of queue bound to it.
14224 * The CQ routine returns two values: the first is the calling status,
14225 * which indicates whether work was queued to the background discovery
14226 * thread. If true, the routine should wakeup the discovery thread;
14227 * the second is the delay parameter. If non-zero, rather than rearming
14228 * the CQ and yet another interrupt, the CQ handler should be queued so
14229 * that it is processed in a subsequent polling action. The value of
14230 * the delay indicates when to reschedule it.
14233 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14235 struct lpfc_hba *phba = cq->phba;
14236 unsigned long delay;
14237 bool workposted = false;
14240 /* Process and rearm the CQ */
14241 switch (cq->type) {
14243 workposted |= __lpfc_sli4_process_cq(phba, cq,
14244 lpfc_sli4_sp_handle_mcqe,
14245 &delay, LPFC_QUEUE_WORK);
14248 if (cq->subtype == LPFC_IO)
14249 workposted |= __lpfc_sli4_process_cq(phba, cq,
14250 lpfc_sli4_fp_handle_cqe,
14251 &delay, LPFC_QUEUE_WORK);
14253 workposted |= __lpfc_sli4_process_cq(phba, cq,
14254 lpfc_sli4_sp_handle_cqe,
14255 &delay, LPFC_QUEUE_WORK);
14258 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14259 "0370 Invalid completion queue type (%d)\n",
14265 if (is_kdump_kernel())
14266 ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14269 ret = queue_delayed_work_on(cq->chann, phba->wq,
14270 &cq->sched_spwork, delay);
14272 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14273 "0394 Cannot schedule queue work "
14274 "for cqid=%d on CPU %d\n",
14275 cq->queue_id, cq->chann);
14278 /* wake up worker thread if there are works to be done */
14280 lpfc_worker_wake_up(phba);
14284 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14286 * @work: pointer to work element
14288 * translates from the work handler and calls the slow-path handler.
14291 lpfc_sli4_sp_process_cq(struct work_struct *work)
14293 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
14295 __lpfc_sli4_sp_process_cq(cq);
14299 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
14300 * @work: pointer to work element
14302 * translates from the work handler and calls the slow-path handler.
14305 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
14307 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14308 struct lpfc_queue, sched_spwork);
14310 __lpfc_sli4_sp_process_cq(cq);
14314 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
14315 * @phba: Pointer to HBA context object.
14316 * @cq: Pointer to associated CQ
14317 * @wcqe: Pointer to work-queue completion queue entry.
14319 * This routine process a fast-path work queue completion entry from fast-path
14320 * event queue for FCP command response completion.
14323 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14324 struct lpfc_wcqe_complete *wcqe)
14326 struct lpfc_sli_ring *pring = cq->pring;
14327 struct lpfc_iocbq *cmdiocbq;
14328 struct lpfc_iocbq irspiocbq;
14329 unsigned long iflags;
14331 /* Check for response status */
14332 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14333 /* If resource errors reported from HBA, reduce queue
14334 * depth of the SCSI device.
14336 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
14337 IOSTAT_LOCAL_REJECT)) &&
14338 ((wcqe->parameter & IOERR_PARAM_MASK) ==
14339 IOERR_NO_RESOURCES))
14340 phba->lpfc_rampdown_queue_depth(phba);
14342 /* Log the cmpl status */
14343 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14344 "0373 FCP CQE cmpl: status=x%x: "
14345 "CQE: %08x %08x %08x %08x\n",
14346 bf_get(lpfc_wcqe_c_status, wcqe),
14347 wcqe->word0, wcqe->total_data_placed,
14348 wcqe->parameter, wcqe->word3);
14351 /* Look up the FCP command IOCB and create pseudo response IOCB */
14352 spin_lock_irqsave(&pring->ring_lock, iflags);
14353 pring->stats.iocb_event++;
14354 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14355 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14356 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14357 if (unlikely(!cmdiocbq)) {
14358 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14359 "0374 FCP complete with no corresponding "
14360 "cmdiocb: iotag (%d)\n",
14361 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14364 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
14365 cmdiocbq->isr_timestamp = cq->isr_timestamp;
14367 if (cmdiocbq->iocb_cmpl == NULL) {
14368 if (cmdiocbq->wqe_cmpl) {
14369 /* For FCP the flag is cleared in wqe_cmpl */
14370 if (!(cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
14371 cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
14372 spin_lock_irqsave(&phba->hbalock, iflags);
14373 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
14374 spin_unlock_irqrestore(&phba->hbalock, iflags);
14377 /* Pass the cmd_iocb and the wcqe to the upper layer */
14378 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
14381 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14382 "0375 FCP cmdiocb not callback function "
14384 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14388 /* Only SLI4 non-IO commands stil use IOCB */
14389 /* Fake the irspiocb and copy necessary response information */
14390 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
14392 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
14393 spin_lock_irqsave(&phba->hbalock, iflags);
14394 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
14395 spin_unlock_irqrestore(&phba->hbalock, iflags);
14398 /* Pass the cmd_iocb and the rsp state to the upper layer */
14399 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
14403 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
14404 * @phba: Pointer to HBA context object.
14405 * @cq: Pointer to completion queue.
14406 * @wcqe: Pointer to work-queue completion queue entry.
14408 * This routine handles an fast-path WQ entry consumed event by invoking the
14409 * proper WQ release routine to the slow-path WQ.
14412 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14413 struct lpfc_wcqe_release *wcqe)
14415 struct lpfc_queue *childwq;
14416 bool wqid_matched = false;
14419 /* Check for fast-path FCP work queue release */
14420 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
14421 list_for_each_entry(childwq, &cq->child_list, list) {
14422 if (childwq->queue_id == hba_wqid) {
14423 lpfc_sli4_wq_release(childwq,
14424 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14425 if (childwq->q_flag & HBA_NVMET_WQFULL)
14426 lpfc_nvmet_wqfull_process(phba, childwq);
14427 wqid_matched = true;
14431 /* Report warning log message if no match found */
14432 if (wqid_matched != true)
14433 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14434 "2580 Fast-path wqe consume event carries "
14435 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
14439 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
14440 * @phba: Pointer to HBA context object.
14441 * @cq: Pointer to completion queue.
14442 * @rcqe: Pointer to receive-queue completion queue entry.
14444 * This routine process a receive-queue completion queue entry.
14446 * Return: true if work posted to worker thread, otherwise false.
14449 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14450 struct lpfc_rcqe *rcqe)
14452 bool workposted = false;
14453 struct lpfc_queue *hrq;
14454 struct lpfc_queue *drq;
14455 struct rqb_dmabuf *dma_buf;
14456 struct fc_frame_header *fc_hdr;
14457 struct lpfc_nvmet_tgtport *tgtp;
14458 uint32_t status, rq_id;
14459 unsigned long iflags;
14460 uint32_t fctl, idx;
14462 if ((phba->nvmet_support == 0) ||
14463 (phba->sli4_hba.nvmet_cqset == NULL))
14466 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
14467 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
14468 drq = phba->sli4_hba.nvmet_mrq_data[idx];
14470 /* sanity check on queue memory */
14471 if (unlikely(!hrq) || unlikely(!drq))
14474 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14475 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14477 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14479 if ((phba->nvmet_support == 0) ||
14480 (rq_id != hrq->queue_id))
14483 status = bf_get(lpfc_rcqe_status, rcqe);
14485 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14486 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14487 "6126 Receive Frame Truncated!!\n");
14489 case FC_STATUS_RQ_SUCCESS:
14490 spin_lock_irqsave(&phba->hbalock, iflags);
14491 lpfc_sli4_rq_release(hrq, drq);
14492 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
14494 hrq->RQ_no_buf_found++;
14495 spin_unlock_irqrestore(&phba->hbalock, iflags);
14498 spin_unlock_irqrestore(&phba->hbalock, iflags);
14500 hrq->RQ_buf_posted--;
14501 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14503 /* Just some basic sanity checks on FCP Command frame */
14504 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
14505 fc_hdr->fh_f_ctl[1] << 8 |
14506 fc_hdr->fh_f_ctl[2]);
14508 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14509 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14510 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
14513 if (fc_hdr->fh_type == FC_TYPE_FCP) {
14514 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
14515 lpfc_nvmet_unsol_fcp_event(
14516 phba, idx, dma_buf, cq->isr_timestamp,
14517 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
14521 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
14523 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14524 if (phba->nvmet_support) {
14525 tgtp = phba->targetport->private;
14526 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14527 "6401 RQE Error x%x, posted %d err_cnt "
14529 status, hrq->RQ_buf_posted,
14530 hrq->RQ_no_posted_buf,
14531 atomic_read(&tgtp->rcv_fcp_cmd_in),
14532 atomic_read(&tgtp->rcv_fcp_cmd_out),
14533 atomic_read(&tgtp->xmt_fcp_release));
14537 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14538 hrq->RQ_no_posted_buf++;
14539 /* Post more buffers if possible */
14547 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
14548 * @phba: adapter with cq
14549 * @cq: Pointer to the completion queue.
14550 * @cqe: Pointer to fast-path completion queue entry.
14552 * This routine process a fast-path work queue completion entry from fast-path
14553 * event queue for FCP command response completion.
14555 * Return: true if work posted to worker thread, otherwise false.
14558 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14559 struct lpfc_cqe *cqe)
14561 struct lpfc_wcqe_release wcqe;
14562 bool workposted = false;
14564 /* Copy the work queue CQE and convert endian order if needed */
14565 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14567 /* Check and process for different type of WCQE and dispatch */
14568 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14569 case CQE_CODE_COMPL_WQE:
14570 case CQE_CODE_NVME_ERSP:
14572 /* Process the WQ complete event */
14573 phba->last_completion_time = jiffies;
14574 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
14575 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14576 (struct lpfc_wcqe_complete *)&wcqe);
14578 case CQE_CODE_RELEASE_WQE:
14579 cq->CQ_release_wqe++;
14580 /* Process the WQ release event */
14581 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14582 (struct lpfc_wcqe_release *)&wcqe);
14584 case CQE_CODE_XRI_ABORTED:
14585 cq->CQ_xri_aborted++;
14586 /* Process the WQ XRI abort event */
14587 phba->last_completion_time = jiffies;
14588 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14589 (struct sli4_wcqe_xri_aborted *)&wcqe);
14591 case CQE_CODE_RECEIVE_V1:
14592 case CQE_CODE_RECEIVE:
14593 phba->last_completion_time = jiffies;
14594 if (cq->subtype == LPFC_NVMET) {
14595 workposted = lpfc_sli4_nvmet_handle_rcqe(
14596 phba, cq, (struct lpfc_rcqe *)&wcqe);
14600 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14601 "0144 Not a valid CQE code: x%x\n",
14602 bf_get(lpfc_wcqe_c_code, &wcqe));
14609 * lpfc_sli4_sched_cq_work - Schedules cq work
14610 * @phba: Pointer to HBA context object.
14611 * @cq: Pointer to CQ
14614 * This routine checks the poll mode of the CQ corresponding to
14615 * cq->chann, then either schedules a softirq or queue_work to complete
14618 * queue_work path is taken if in NVMET mode, or if poll_mode is in
14619 * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken.
14622 static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
14623 struct lpfc_queue *cq, uint16_t cqid)
14627 switch (cq->poll_mode) {
14628 case LPFC_IRQ_POLL:
14629 irq_poll_sched(&cq->iop);
14631 case LPFC_QUEUE_WORK:
14633 if (is_kdump_kernel())
14634 ret = queue_work(phba->wq, &cq->irqwork);
14636 ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
14638 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14639 "0383 Cannot schedule queue work "
14640 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14641 cqid, cq->queue_id,
14642 raw_smp_processor_id());
14647 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
14648 * @phba: Pointer to HBA context object.
14649 * @eq: Pointer to the queue structure.
14650 * @eqe: Pointer to fast-path event queue entry.
14652 * This routine process a event queue entry from the fast-path event queue.
14653 * It will check the MajorCode and MinorCode to determine this is for a
14654 * completion event on a completion queue, if not, an error shall be logged
14655 * and just return. Otherwise, it will get to the corresponding completion
14656 * queue and process all the entries on the completion queue, rearm the
14657 * completion queue, and then return.
14660 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14661 struct lpfc_eqe *eqe)
14663 struct lpfc_queue *cq = NULL;
14664 uint32_t qidx = eq->hdwq;
14667 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14668 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14669 "0366 Not a valid completion "
14670 "event: majorcode=x%x, minorcode=x%x\n",
14671 bf_get_le32(lpfc_eqe_major_code, eqe),
14672 bf_get_le32(lpfc_eqe_minor_code, eqe));
14676 /* Get the reference to the corresponding CQ */
14677 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14679 /* Use the fast lookup method first */
14680 if (cqid <= phba->sli4_hba.cq_max) {
14681 cq = phba->sli4_hba.cq_lookup[cqid];
14686 /* Next check for NVMET completion */
14687 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14688 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14689 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14690 /* Process NVMET unsol rcv */
14691 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14696 if (phba->sli4_hba.nvmels_cq &&
14697 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14698 /* Process NVME unsol rcv */
14699 cq = phba->sli4_hba.nvmels_cq;
14702 /* Otherwise this is a Slow path event */
14704 lpfc_sli4_sp_handle_eqe(phba, eqe,
14705 phba->sli4_hba.hdwq[qidx].hba_eq);
14710 if (unlikely(cqid != cq->queue_id)) {
14711 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14712 "0368 Miss-matched fast-path completion "
14713 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14714 cqid, cq->queue_id);
14719 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14720 if (phba->ktime_on)
14721 cq->isr_timestamp = ktime_get_ns();
14723 cq->isr_timestamp = 0;
14725 lpfc_sli4_sched_cq_work(phba, cq, cqid);
14729 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14730 * @cq: Pointer to CQ to be processed
14731 * @poll_mode: Enum lpfc_poll_state to determine poll mode
14733 * This routine calls the cq processing routine with the handler for
14736 * The CQ routine returns two values: the first is the calling status,
14737 * which indicates whether work was queued to the background discovery
14738 * thread. If true, the routine should wakeup the discovery thread;
14739 * the second is the delay parameter. If non-zero, rather than rearming
14740 * the CQ and yet another interrupt, the CQ handler should be queued so
14741 * that it is processed in a subsequent polling action. The value of
14742 * the delay indicates when to reschedule it.
14745 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
14746 enum lpfc_poll_mode poll_mode)
14748 struct lpfc_hba *phba = cq->phba;
14749 unsigned long delay;
14750 bool workposted = false;
14753 /* process and rearm the CQ */
14754 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14755 &delay, poll_mode);
14758 if (is_kdump_kernel())
14759 ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
14762 ret = queue_delayed_work_on(cq->chann, phba->wq,
14763 &cq->sched_irqwork, delay);
14765 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14766 "0367 Cannot schedule queue work "
14767 "for cqid=%d on CPU %d\n",
14768 cq->queue_id, cq->chann);
14771 /* wake up worker thread if there are works to be done */
14773 lpfc_worker_wake_up(phba);
14777 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14779 * @work: pointer to work element
14781 * translates from the work handler and calls the fast-path handler.
14784 lpfc_sli4_hba_process_cq(struct work_struct *work)
14786 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
14788 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
14792 * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
14793 * @work: pointer to work element
14795 * translates from the work handler and calls the fast-path handler.
14798 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
14800 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14801 struct lpfc_queue, sched_irqwork);
14803 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
14807 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
14808 * @irq: Interrupt number.
14809 * @dev_id: The device context pointer.
14811 * This function is directly called from the PCI layer as an interrupt
14812 * service routine when device with SLI-4 interface spec is enabled with
14813 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14814 * ring event in the HBA. However, when the device is enabled with either
14815 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14816 * device-level interrupt handler. When the PCI slot is in error recovery
14817 * or the HBA is undergoing initialization, the interrupt handler will not
14818 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14819 * the intrrupt context. This function is called without any lock held.
14820 * It gets the hbalock to access and update SLI data structures. Note that,
14821 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14822 * equal to that of FCP CQ index.
14824 * The link attention and ELS ring attention events are handled
14825 * by the worker thread. The interrupt handler signals the worker thread
14826 * and returns for these events. This function is called without any lock
14827 * held. It gets the hbalock to access and update SLI data structures.
14829 * This function returns IRQ_HANDLED when interrupt is handled else it
14830 * returns IRQ_NONE.
14833 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14835 struct lpfc_hba *phba;
14836 struct lpfc_hba_eq_hdl *hba_eq_hdl;
14837 struct lpfc_queue *fpeq;
14838 unsigned long iflag;
14841 struct lpfc_eq_intr_info *eqi;
14843 /* Get the driver's phba structure from the dev_id */
14844 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14845 phba = hba_eq_hdl->phba;
14846 hba_eqidx = hba_eq_hdl->idx;
14848 if (unlikely(!phba))
14850 if (unlikely(!phba->sli4_hba.hdwq))
14853 /* Get to the EQ struct associated with this vector */
14854 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
14855 if (unlikely(!fpeq))
14858 /* Check device state for handling interrupt */
14859 if (unlikely(lpfc_intr_state_check(phba))) {
14860 /* Check again for link_state with lock held */
14861 spin_lock_irqsave(&phba->hbalock, iflag);
14862 if (phba->link_state < LPFC_LINK_DOWN)
14863 /* Flush, clear interrupt, and rearm the EQ */
14864 lpfc_sli4_eqcq_flush(phba, fpeq);
14865 spin_unlock_irqrestore(&phba->hbalock, iflag);
14869 eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
14872 fpeq->last_cpu = raw_smp_processor_id();
14874 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
14875 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
14876 phba->cfg_auto_imax &&
14877 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14878 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14879 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14881 /* process and rearm the EQ */
14882 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
14884 if (unlikely(ecount == 0)) {
14885 fpeq->EQ_no_entry++;
14886 if (phba->intr_type == MSIX)
14887 /* MSI-X treated interrupt served as no EQ share INT */
14888 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14889 "0358 MSI-X interrupt with no EQE\n");
14891 /* Non MSI-X treated on interrupt as EQ share INT */
14895 return IRQ_HANDLED;
14896 } /* lpfc_sli4_hba_intr_handler */
14899 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14900 * @irq: Interrupt number.
14901 * @dev_id: The device context pointer.
14903 * This function is the device-level interrupt handler to device with SLI-4
14904 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14905 * interrupt mode is enabled and there is an event in the HBA which requires
14906 * driver attention. This function invokes the slow-path interrupt attention
14907 * handling function and fast-path interrupt attention handling function in
14908 * turn to process the relevant HBA attention events. This function is called
14909 * without any lock held. It gets the hbalock to access and update SLI data
14912 * This function returns IRQ_HANDLED when interrupt is handled, else it
14913 * returns IRQ_NONE.
14916 lpfc_sli4_intr_handler(int irq, void *dev_id)
14918 struct lpfc_hba *phba;
14919 irqreturn_t hba_irq_rc;
14920 bool hba_handled = false;
14923 /* Get the driver's phba structure from the dev_id */
14924 phba = (struct lpfc_hba *)dev_id;
14926 if (unlikely(!phba))
14930 * Invoke fast-path host attention interrupt handling as appropriate.
14932 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
14933 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14934 &phba->sli4_hba.hba_eq_hdl[qidx]);
14935 if (hba_irq_rc == IRQ_HANDLED)
14936 hba_handled |= true;
14939 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14940 } /* lpfc_sli4_intr_handler */
14942 void lpfc_sli4_poll_hbtimer(struct timer_list *t)
14944 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
14945 struct lpfc_queue *eq;
14950 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
14951 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
14952 if (!list_empty(&phba->poll_list))
14953 mod_timer(&phba->cpuhp_poll_timer,
14954 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14959 inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
14961 struct lpfc_hba *phba = eq->phba;
14965 * Unlocking an irq is one of the entry point to check
14966 * for re-schedule, but we are good for io submission
14967 * path as midlayer does a get_cpu to glue us in. Flush
14968 * out the invalidate queue so we can see the updated
14973 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
14974 /* We will not likely get the completion for the caller
14975 * during this iteration but i guess that's fine.
14976 * Future io's coming on this eq should be able to
14977 * pick it up. As for the case of single io's, they
14978 * will be handled through a sched from polling timer
14979 * function which is currently triggered every 1msec.
14981 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
14986 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
14988 struct lpfc_hba *phba = eq->phba;
14990 /* kickstart slowpath processing if needed */
14991 if (list_empty(&phba->poll_list))
14992 mod_timer(&phba->cpuhp_poll_timer,
14993 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14995 list_add_rcu(&eq->_poll_list, &phba->poll_list);
14999 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15001 struct lpfc_hba *phba = eq->phba;
15003 /* Disable slowpath processing for this eq. Kick start the eq
15004 * by RE-ARMING the eq's ASAP
15006 list_del_rcu(&eq->_poll_list);
15009 if (list_empty(&phba->poll_list))
15010 del_timer_sync(&phba->cpuhp_poll_timer);
15013 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
15015 struct lpfc_queue *eq, *next;
15017 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15018 list_del(&eq->_poll_list);
15020 INIT_LIST_HEAD(&phba->poll_list);
15025 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15027 if (mode == eq->mode)
15030 * currently this function is only called during a hotplug
15031 * event and the cpu on which this function is executing
15032 * is going offline. By now the hotplug has instructed
15033 * the scheduler to remove this cpu from cpu active mask.
15034 * So we don't need to work about being put aside by the
15035 * scheduler for a high priority process. Yes, the inte-
15036 * rrupts could come but they are known to retire ASAP.
15039 /* Disable polling in the fastpath */
15040 WRITE_ONCE(eq->mode, mode);
15041 /* flush out the store buffer */
15045 * Add this eq to the polling list and start polling. For
15046 * a grace period both interrupt handler and poller will
15047 * try to process the eq _but_ that's fine. We have a
15048 * synchronization mechanism in place (queue_claimed) to
15049 * deal with it. This is just a draining phase for int-
15050 * errupt handler (not eq's) as we have guranteed through
15051 * barrier that all the CPUs have seen the new CQ_POLLED
15052 * state. which will effectively disable the REARMING of
15053 * the EQ. The whole idea is eq's die off eventually as
15054 * we are not rearming EQ's anymore.
15056 mode ? lpfc_sli4_add_to_poll_list(eq) :
15057 lpfc_sli4_remove_from_poll_list(eq);
15060 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15062 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15065 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15067 struct lpfc_hba *phba = eq->phba;
15069 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15071 /* Kick start for the pending io's in h/w.
15072 * Once we switch back to interrupt processing on a eq
15073 * the io path completion will only arm eq's when it
15074 * receives a completion. But since eq's are in disa-
15075 * rmed state it doesn't receive a completion. This
15076 * creates a deadlock scenaro.
15078 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15082 * lpfc_sli4_queue_free - free a queue structure and associated memory
15083 * @queue: The queue structure to free.
15085 * This function frees a queue structure and the DMAable memory used for
15086 * the host resident queue. This function must be called after destroying the
15087 * queue on the HBA.
15090 lpfc_sli4_queue_free(struct lpfc_queue *queue)
15092 struct lpfc_dmabuf *dmabuf;
15097 if (!list_empty(&queue->wq_list))
15098 list_del(&queue->wq_list);
15100 while (!list_empty(&queue->page_list)) {
15101 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15103 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
15104 dmabuf->virt, dmabuf->phys);
15108 lpfc_free_rq_buffer(queue->phba, queue);
15109 kfree(queue->rqbp);
15112 if (!list_empty(&queue->cpu_list))
15113 list_del(&queue->cpu_list);
15120 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
15121 * @phba: The HBA that this queue is being created on.
15122 * @page_size: The size of a queue page
15123 * @entry_size: The size of each queue entry for this queue.
15124 * @entry_count: The number of entries that this queue will handle.
15125 * @cpu: The cpu that will primarily utilize this queue.
15127 * This function allocates a queue structure and the DMAable memory used for
15128 * the host resident queue. This function must be called before creating the
15129 * queue on the HBA.
15131 struct lpfc_queue *
15132 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
15133 uint32_t entry_size, uint32_t entry_count, int cpu)
15135 struct lpfc_queue *queue;
15136 struct lpfc_dmabuf *dmabuf;
15137 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15140 if (!phba->sli4_hba.pc_sli4_params.supported)
15141 hw_page_size = page_size;
15143 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
15145 /* If needed, Adjust page count to match the max the adapter supports */
15146 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15147 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15149 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15150 GFP_KERNEL, cpu_to_node(cpu));
15154 INIT_LIST_HEAD(&queue->list);
15155 INIT_LIST_HEAD(&queue->_poll_list);
15156 INIT_LIST_HEAD(&queue->wq_list);
15157 INIT_LIST_HEAD(&queue->wqfull_list);
15158 INIT_LIST_HEAD(&queue->page_list);
15159 INIT_LIST_HEAD(&queue->child_list);
15160 INIT_LIST_HEAD(&queue->cpu_list);
15162 /* Set queue parameters now. If the system cannot provide memory
15163 * resources, the free routine needs to know what was allocated.
15165 queue->page_count = pgcnt;
15166 queue->q_pgs = (void **)&queue[1];
15167 queue->entry_cnt_per_pg = hw_page_size / entry_size;
15168 queue->entry_size = entry_size;
15169 queue->entry_count = entry_count;
15170 queue->page_size = hw_page_size;
15171 queue->phba = phba;
15173 for (x = 0; x < queue->page_count; x++) {
15174 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15175 dev_to_node(&phba->pcidev->dev));
15178 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15179 hw_page_size, &dmabuf->phys,
15181 if (!dmabuf->virt) {
15185 dmabuf->buffer_tag = x;
15186 list_add_tail(&dmabuf->list, &queue->page_list);
15187 /* use lpfc_sli4_qe to index a paritcular entry in this page */
15188 queue->q_pgs[x] = dmabuf->virt;
15190 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15191 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
15192 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15193 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
15195 /* notify_interval will be set during q creation */
15199 lpfc_sli4_queue_free(queue);
15204 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
15205 * @phba: HBA structure that indicates port to create a queue on.
15206 * @pci_barset: PCI BAR set flag.
15208 * This function shall perform iomap of the specified PCI BAR address to host
15209 * memory address if not already done so and return it. The returned host
15210 * memory address can be NULL.
15212 static void __iomem *
15213 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15218 switch (pci_barset) {
15219 case WQ_PCI_BAR_0_AND_1:
15220 return phba->pci_bar0_memmap_p;
15221 case WQ_PCI_BAR_2_AND_3:
15222 return phba->pci_bar2_memmap_p;
15223 case WQ_PCI_BAR_4_AND_5:
15224 return phba->pci_bar4_memmap_p;
15232 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
15233 * @phba: HBA structure that EQs are on.
15234 * @startq: The starting EQ index to modify
15235 * @numq: The number of EQs (consecutive indexes) to modify
15236 * @usdelay: amount of delay
15238 * This function revises the EQ delay on 1 or more EQs. The EQ delay
15239 * is set either by writing to a register (if supported by the SLI Port)
15240 * or by mailbox command. The mailbox command allows several EQs to be
15243 * The @phba struct is used to send a mailbox command to HBA. The @startq
15244 * is used to get the starting EQ index to change. The @numq value is
15245 * used to specify how many consecutive EQ indexes, starting at EQ index,
15246 * are to be changed. This function is asynchronous and will wait for any
15247 * mailbox commands to finish before returning.
15249 * On success this function will return a zero. If unable to allocate
15250 * enough memory this function will return -ENOMEM. If a mailbox command
15251 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
15252 * have had their delay multipler changed.
15255 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
15256 uint32_t numq, uint32_t usdelay)
15258 struct lpfc_mbx_modify_eq_delay *eq_delay;
15259 LPFC_MBOXQ_t *mbox;
15260 struct lpfc_queue *eq;
15261 int cnt = 0, rc, length;
15262 uint32_t shdr_status, shdr_add_status;
15265 union lpfc_sli4_cfg_shdr *shdr;
15267 if (startq >= phba->cfg_irq_chann)
15270 if (usdelay > 0xFFFF) {
15271 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15272 "6429 usdelay %d too large. Scaled down to "
15273 "0xFFFF.\n", usdelay);
15277 /* set values by EQ_DELAY register if supported */
15278 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15279 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15280 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15284 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15292 /* Otherwise, set values by mailbox cmd */
15294 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15296 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15297 "6428 Failed allocating mailbox cmd buffer."
15298 " EQ delay was not set.\n");
15301 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
15302 sizeof(struct lpfc_sli4_cfg_mhdr));
15303 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15304 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
15305 length, LPFC_SLI4_MBX_EMBED);
15306 eq_delay = &mbox->u.mqe.un.eq_delay;
15308 /* Calculate delay multiper from maximum interrupt per second */
15309 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
15312 if (dmult > LPFC_DMULT_MAX)
15313 dmult = LPFC_DMULT_MAX;
15315 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15316 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15319 eq->q_mode = usdelay;
15320 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
15321 eq_delay->u.request.eq[cnt].phase = 0;
15322 eq_delay->u.request.eq[cnt].delay_multi = dmult;
15327 eq_delay->u.request.num_eq = cnt;
15329 mbox->vport = phba->pport;
15330 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15331 mbox->ctx_buf = NULL;
15332 mbox->ctx_ndlp = NULL;
15333 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15334 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
15335 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15336 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15337 if (shdr_status || shdr_add_status || rc) {
15338 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15339 "2512 MODIFY_EQ_DELAY mailbox failed with "
15340 "status x%x add_status x%x, mbx status x%x\n",
15341 shdr_status, shdr_add_status, rc);
15343 mempool_free(mbox, phba->mbox_mem_pool);
15348 * lpfc_eq_create - Create an Event Queue on the HBA
15349 * @phba: HBA structure that indicates port to create a queue on.
15350 * @eq: The queue structure to use to create the event queue.
15351 * @imax: The maximum interrupt per second limit.
15353 * This function creates an event queue, as detailed in @eq, on a port,
15354 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
15356 * The @phba struct is used to send mailbox command to HBA. The @eq struct
15357 * is used to get the entry count and entry size that are necessary to
15358 * determine the number of pages to allocate and use for this queue. This
15359 * function will send the EQ_CREATE mailbox command to the HBA to setup the
15360 * event queue. This function is asynchronous and will wait for the mailbox
15361 * command to finish before continuing.
15363 * On success this function will return a zero. If unable to allocate enough
15364 * memory this function will return -ENOMEM. If the queue create mailbox command
15365 * fails this function will return -ENXIO.
15368 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
15370 struct lpfc_mbx_eq_create *eq_create;
15371 LPFC_MBOXQ_t *mbox;
15372 int rc, length, status = 0;
15373 struct lpfc_dmabuf *dmabuf;
15374 uint32_t shdr_status, shdr_add_status;
15375 union lpfc_sli4_cfg_shdr *shdr;
15377 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15379 /* sanity check on queue memory */
15382 if (!phba->sli4_hba.pc_sli4_params.supported)
15383 hw_page_size = SLI4_PAGE_SIZE;
15385 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15388 length = (sizeof(struct lpfc_mbx_eq_create) -
15389 sizeof(struct lpfc_sli4_cfg_mhdr));
15390 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15391 LPFC_MBOX_OPCODE_EQ_CREATE,
15392 length, LPFC_SLI4_MBX_EMBED);
15393 eq_create = &mbox->u.mqe.un.eq_create;
15394 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
15395 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
15397 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
15399 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
15401 /* Use version 2 of CREATE_EQ if eqav is set */
15402 if (phba->sli4_hba.pc_sli4_params.eqav) {
15403 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15404 LPFC_Q_CREATE_VERSION_2);
15405 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
15406 phba->sli4_hba.pc_sli4_params.eqav);
15409 /* don't setup delay multiplier using EQ_CREATE */
15411 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
15413 switch (eq->entry_count) {
15415 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15416 "0360 Unsupported EQ count. (%d)\n",
15418 if (eq->entry_count < 256) {
15422 fallthrough; /* otherwise default to smallest count */
15424 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15428 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15432 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15436 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15440 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15444 list_for_each_entry(dmabuf, &eq->page_list, list) {
15445 memset(dmabuf->virt, 0, hw_page_size);
15446 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15447 putPaddrLow(dmabuf->phys);
15448 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15449 putPaddrHigh(dmabuf->phys);
15451 mbox->vport = phba->pport;
15452 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15453 mbox->ctx_buf = NULL;
15454 mbox->ctx_ndlp = NULL;
15455 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15456 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15457 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15458 if (shdr_status || shdr_add_status || rc) {
15459 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15460 "2500 EQ_CREATE mailbox failed with "
15461 "status x%x add_status x%x, mbx status x%x\n",
15462 shdr_status, shdr_add_status, rc);
15465 eq->type = LPFC_EQ;
15466 eq->subtype = LPFC_NONE;
15467 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
15468 if (eq->queue_id == 0xFFFF)
15470 eq->host_index = 0;
15471 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
15472 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
15474 mempool_free(mbox, phba->mbox_mem_pool);
15478 static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
15480 struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
15482 __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
15488 * lpfc_cq_create - Create a Completion Queue on the HBA
15489 * @phba: HBA structure that indicates port to create a queue on.
15490 * @cq: The queue structure to use to create the completion queue.
15491 * @eq: The event queue to bind this completion queue to.
15492 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15493 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
15495 * This function creates a completion queue, as detailed in @wq, on a port,
15496 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
15498 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15499 * is used to get the entry count and entry size that are necessary to
15500 * determine the number of pages to allocate and use for this queue. The @eq
15501 * is used to indicate which event queue to bind this completion queue to. This
15502 * function will send the CQ_CREATE mailbox command to the HBA to setup the
15503 * completion queue. This function is asynchronous and will wait for the mailbox
15504 * command to finish before continuing.
15506 * On success this function will return a zero. If unable to allocate enough
15507 * memory this function will return -ENOMEM. If the queue create mailbox command
15508 * fails this function will return -ENXIO.
15511 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
15512 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
15514 struct lpfc_mbx_cq_create *cq_create;
15515 struct lpfc_dmabuf *dmabuf;
15516 LPFC_MBOXQ_t *mbox;
15517 int rc, length, status = 0;
15518 uint32_t shdr_status, shdr_add_status;
15519 union lpfc_sli4_cfg_shdr *shdr;
15521 /* sanity check on queue memory */
15525 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15528 length = (sizeof(struct lpfc_mbx_cq_create) -
15529 sizeof(struct lpfc_sli4_cfg_mhdr));
15530 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15531 LPFC_MBOX_OPCODE_CQ_CREATE,
15532 length, LPFC_SLI4_MBX_EMBED);
15533 cq_create = &mbox->u.mqe.un.cq_create;
15534 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
15535 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
15537 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
15538 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
15539 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15540 phba->sli4_hba.pc_sli4_params.cqv);
15541 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
15542 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
15543 (cq->page_size / SLI4_PAGE_SIZE));
15544 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
15546 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
15547 phba->sli4_hba.pc_sli4_params.cqav);
15549 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
15552 switch (cq->entry_count) {
15555 if (phba->sli4_hba.pc_sli4_params.cqv ==
15556 LPFC_Q_CREATE_VERSION_2) {
15557 cq_create->u.request.context.lpfc_cq_context_count =
15559 bf_set(lpfc_cq_context_count,
15560 &cq_create->u.request.context,
15561 LPFC_CQ_CNT_WORD7);
15566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15567 "0361 Unsupported CQ count: "
15568 "entry cnt %d sz %d pg cnt %d\n",
15569 cq->entry_count, cq->entry_size,
15571 if (cq->entry_count < 256) {
15575 fallthrough; /* otherwise default to smallest count */
15577 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15581 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15585 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15589 list_for_each_entry(dmabuf, &cq->page_list, list) {
15590 memset(dmabuf->virt, 0, cq->page_size);
15591 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15592 putPaddrLow(dmabuf->phys);
15593 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15594 putPaddrHigh(dmabuf->phys);
15596 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15598 /* The IOCTL status is embedded in the mailbox subheader. */
15599 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15600 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15601 if (shdr_status || shdr_add_status || rc) {
15602 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15603 "2501 CQ_CREATE mailbox failed with "
15604 "status x%x add_status x%x, mbx status x%x\n",
15605 shdr_status, shdr_add_status, rc);
15609 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15610 if (cq->queue_id == 0xFFFF) {
15614 /* link the cq onto the parent eq child list */
15615 list_add_tail(&cq->list, &eq->child_list);
15616 /* Set up completion queue's type and subtype */
15618 cq->subtype = subtype;
15619 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15620 cq->assoc_qid = eq->queue_id;
15622 cq->host_index = 0;
15623 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15624 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
15626 if (cq->queue_id > phba->sli4_hba.cq_max)
15627 phba->sli4_hba.cq_max = cq->queue_id;
15629 irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
15631 mempool_free(mbox, phba->mbox_mem_pool);
15636 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
15637 * @phba: HBA structure that indicates port to create a queue on.
15638 * @cqp: The queue structure array to use to create the completion queues.
15639 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
15640 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15641 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
15643 * This function creates a set of completion queue, s to support MRQ
15644 * as detailed in @cqp, on a port,
15645 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
15647 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15648 * is used to get the entry count and entry size that are necessary to
15649 * determine the number of pages to allocate and use for this queue. The @eq
15650 * is used to indicate which event queue to bind this completion queue to. This
15651 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
15652 * completion queue. This function is asynchronous and will wait for the mailbox
15653 * command to finish before continuing.
15655 * On success this function will return a zero. If unable to allocate enough
15656 * memory this function will return -ENOMEM. If the queue create mailbox command
15657 * fails this function will return -ENXIO.
15660 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15661 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
15664 struct lpfc_queue *cq;
15665 struct lpfc_queue *eq;
15666 struct lpfc_mbx_cq_create_set *cq_set;
15667 struct lpfc_dmabuf *dmabuf;
15668 LPFC_MBOXQ_t *mbox;
15669 int rc, length, alloclen, status = 0;
15670 int cnt, idx, numcq, page_idx = 0;
15671 uint32_t shdr_status, shdr_add_status;
15672 union lpfc_sli4_cfg_shdr *shdr;
15673 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15675 /* sanity check on queue memory */
15676 numcq = phba->cfg_nvmet_mrq;
15677 if (!cqp || !hdwq || !numcq)
15680 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15684 length = sizeof(struct lpfc_mbx_cq_create_set);
15685 length += ((numcq * cqp[0]->page_count) *
15686 sizeof(struct dma_address));
15687 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15688 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15689 LPFC_SLI4_MBX_NEMBED);
15690 if (alloclen < length) {
15691 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15692 "3098 Allocated DMA memory size (%d) is "
15693 "less than the requested DMA memory size "
15694 "(%d)\n", alloclen, length);
15698 cq_set = mbox->sge_array->addr[0];
15699 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15700 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15702 for (idx = 0; idx < numcq; idx++) {
15704 eq = hdwq[idx].hba_eq;
15709 if (!phba->sli4_hba.pc_sli4_params.supported)
15710 hw_page_size = cq->page_size;
15714 bf_set(lpfc_mbx_cq_create_set_page_size,
15715 &cq_set->u.request,
15716 (hw_page_size / SLI4_PAGE_SIZE));
15717 bf_set(lpfc_mbx_cq_create_set_num_pages,
15718 &cq_set->u.request, cq->page_count);
15719 bf_set(lpfc_mbx_cq_create_set_evt,
15720 &cq_set->u.request, 1);
15721 bf_set(lpfc_mbx_cq_create_set_valid,
15722 &cq_set->u.request, 1);
15723 bf_set(lpfc_mbx_cq_create_set_cqe_size,
15724 &cq_set->u.request, 0);
15725 bf_set(lpfc_mbx_cq_create_set_num_cq,
15726 &cq_set->u.request, numcq);
15727 bf_set(lpfc_mbx_cq_create_set_autovalid,
15728 &cq_set->u.request,
15729 phba->sli4_hba.pc_sli4_params.cqav);
15730 switch (cq->entry_count) {
15733 if (phba->sli4_hba.pc_sli4_params.cqv ==
15734 LPFC_Q_CREATE_VERSION_2) {
15735 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15736 &cq_set->u.request,
15738 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15739 &cq_set->u.request,
15740 LPFC_CQ_CNT_WORD7);
15745 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15746 "3118 Bad CQ count. (%d)\n",
15748 if (cq->entry_count < 256) {
15752 fallthrough; /* otherwise default to smallest */
15754 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15755 &cq_set->u.request, LPFC_CQ_CNT_256);
15758 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15759 &cq_set->u.request, LPFC_CQ_CNT_512);
15762 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15763 &cq_set->u.request, LPFC_CQ_CNT_1024);
15766 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15767 &cq_set->u.request, eq->queue_id);
15770 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15771 &cq_set->u.request, eq->queue_id);
15774 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15775 &cq_set->u.request, eq->queue_id);
15778 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15779 &cq_set->u.request, eq->queue_id);
15782 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15783 &cq_set->u.request, eq->queue_id);
15786 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15787 &cq_set->u.request, eq->queue_id);
15790 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15791 &cq_set->u.request, eq->queue_id);
15794 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15795 &cq_set->u.request, eq->queue_id);
15798 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15799 &cq_set->u.request, eq->queue_id);
15802 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15803 &cq_set->u.request, eq->queue_id);
15806 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15807 &cq_set->u.request, eq->queue_id);
15810 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15811 &cq_set->u.request, eq->queue_id);
15814 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15815 &cq_set->u.request, eq->queue_id);
15818 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15819 &cq_set->u.request, eq->queue_id);
15822 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15823 &cq_set->u.request, eq->queue_id);
15826 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15827 &cq_set->u.request, eq->queue_id);
15831 /* link the cq onto the parent eq child list */
15832 list_add_tail(&cq->list, &eq->child_list);
15833 /* Set up completion queue's type and subtype */
15835 cq->subtype = subtype;
15836 cq->assoc_qid = eq->queue_id;
15838 cq->host_index = 0;
15839 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15840 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15845 list_for_each_entry(dmabuf, &cq->page_list, list) {
15846 memset(dmabuf->virt, 0, hw_page_size);
15847 cnt = page_idx + dmabuf->buffer_tag;
15848 cq_set->u.request.page[cnt].addr_lo =
15849 putPaddrLow(dmabuf->phys);
15850 cq_set->u.request.page[cnt].addr_hi =
15851 putPaddrHigh(dmabuf->phys);
15857 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15859 /* The IOCTL status is embedded in the mailbox subheader. */
15860 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15861 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15862 if (shdr_status || shdr_add_status || rc) {
15863 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15864 "3119 CQ_CREATE_SET mailbox failed with "
15865 "status x%x add_status x%x, mbx status x%x\n",
15866 shdr_status, shdr_add_status, rc);
15870 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15871 if (rc == 0xFFFF) {
15876 for (idx = 0; idx < numcq; idx++) {
15878 cq->queue_id = rc + idx;
15879 if (cq->queue_id > phba->sli4_hba.cq_max)
15880 phba->sli4_hba.cq_max = cq->queue_id;
15884 lpfc_sli4_mbox_cmd_free(phba, mbox);
15889 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15890 * @phba: HBA structure that indicates port to create a queue on.
15891 * @mq: The queue structure to use to create the mailbox queue.
15892 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15893 * @cq: The completion queue to associate with this cq.
15895 * This function provides failback (fb) functionality when the
15896 * mq_create_ext fails on older FW generations. It's purpose is identical
15897 * to mq_create_ext otherwise.
15899 * This routine cannot fail as all attributes were previously accessed and
15900 * initialized in mq_create_ext.
15903 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15904 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15906 struct lpfc_mbx_mq_create *mq_create;
15907 struct lpfc_dmabuf *dmabuf;
15910 length = (sizeof(struct lpfc_mbx_mq_create) -
15911 sizeof(struct lpfc_sli4_cfg_mhdr));
15912 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15913 LPFC_MBOX_OPCODE_MQ_CREATE,
15914 length, LPFC_SLI4_MBX_EMBED);
15915 mq_create = &mbox->u.mqe.un.mq_create;
15916 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15918 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15920 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15921 switch (mq->entry_count) {
15923 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15924 LPFC_MQ_RING_SIZE_16);
15927 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15928 LPFC_MQ_RING_SIZE_32);
15931 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15932 LPFC_MQ_RING_SIZE_64);
15935 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15936 LPFC_MQ_RING_SIZE_128);
15939 list_for_each_entry(dmabuf, &mq->page_list, list) {
15940 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15941 putPaddrLow(dmabuf->phys);
15942 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15943 putPaddrHigh(dmabuf->phys);
15948 * lpfc_mq_create - Create a mailbox Queue on the HBA
15949 * @phba: HBA structure that indicates port to create a queue on.
15950 * @mq: The queue structure to use to create the mailbox queue.
15951 * @cq: The completion queue to associate with this cq.
15952 * @subtype: The queue's subtype.
15954 * This function creates a mailbox queue, as detailed in @mq, on a port,
15955 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15957 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15958 * is used to get the entry count and entry size that are necessary to
15959 * determine the number of pages to allocate and use for this queue. This
15960 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15961 * mailbox queue. This function is asynchronous and will wait for the mailbox
15962 * command to finish before continuing.
15964 * On success this function will return a zero. If unable to allocate enough
15965 * memory this function will return -ENOMEM. If the queue create mailbox command
15966 * fails this function will return -ENXIO.
15969 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15970 struct lpfc_queue *cq, uint32_t subtype)
15972 struct lpfc_mbx_mq_create *mq_create;
15973 struct lpfc_mbx_mq_create_ext *mq_create_ext;
15974 struct lpfc_dmabuf *dmabuf;
15975 LPFC_MBOXQ_t *mbox;
15976 int rc, length, status = 0;
15977 uint32_t shdr_status, shdr_add_status;
15978 union lpfc_sli4_cfg_shdr *shdr;
15979 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15981 /* sanity check on queue memory */
15984 if (!phba->sli4_hba.pc_sli4_params.supported)
15985 hw_page_size = SLI4_PAGE_SIZE;
15987 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15990 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15991 sizeof(struct lpfc_sli4_cfg_mhdr));
15992 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15993 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15994 length, LPFC_SLI4_MBX_EMBED);
15996 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15997 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15998 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15999 &mq_create_ext->u.request, mq->page_count);
16000 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
16001 &mq_create_ext->u.request, 1);
16002 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
16003 &mq_create_ext->u.request, 1);
16004 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
16005 &mq_create_ext->u.request, 1);
16006 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
16007 &mq_create_ext->u.request, 1);
16008 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
16009 &mq_create_ext->u.request, 1);
16010 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
16011 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16012 phba->sli4_hba.pc_sli4_params.mqv);
16013 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
16014 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
16017 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
16019 switch (mq->entry_count) {
16021 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16022 "0362 Unsupported MQ count. (%d)\n",
16024 if (mq->entry_count < 16) {
16028 fallthrough; /* otherwise default to smallest count */
16030 bf_set(lpfc_mq_context_ring_size,
16031 &mq_create_ext->u.request.context,
16032 LPFC_MQ_RING_SIZE_16);
16035 bf_set(lpfc_mq_context_ring_size,
16036 &mq_create_ext->u.request.context,
16037 LPFC_MQ_RING_SIZE_32);
16040 bf_set(lpfc_mq_context_ring_size,
16041 &mq_create_ext->u.request.context,
16042 LPFC_MQ_RING_SIZE_64);
16045 bf_set(lpfc_mq_context_ring_size,
16046 &mq_create_ext->u.request.context,
16047 LPFC_MQ_RING_SIZE_128);
16050 list_for_each_entry(dmabuf, &mq->page_list, list) {
16051 memset(dmabuf->virt, 0, hw_page_size);
16052 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16053 putPaddrLow(dmabuf->phys);
16054 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
16055 putPaddrHigh(dmabuf->phys);
16057 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16058 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16059 &mq_create_ext->u.response);
16060 if (rc != MBX_SUCCESS) {
16061 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16062 "2795 MQ_CREATE_EXT failed with "
16063 "status x%x. Failback to MQ_CREATE.\n",
16065 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16066 mq_create = &mbox->u.mqe.un.mq_create;
16067 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16068 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16069 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16070 &mq_create->u.response);
16073 /* The IOCTL status is embedded in the mailbox subheader. */
16074 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16075 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16076 if (shdr_status || shdr_add_status || rc) {
16077 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16078 "2502 MQ_CREATE mailbox failed with "
16079 "status x%x add_status x%x, mbx status x%x\n",
16080 shdr_status, shdr_add_status, rc);
16084 if (mq->queue_id == 0xFFFF) {
16088 mq->type = LPFC_MQ;
16089 mq->assoc_qid = cq->queue_id;
16090 mq->subtype = subtype;
16091 mq->host_index = 0;
16094 /* link the mq onto the parent cq child list */
16095 list_add_tail(&mq->list, &cq->child_list);
16097 mempool_free(mbox, phba->mbox_mem_pool);
16102 * lpfc_wq_create - Create a Work Queue on the HBA
16103 * @phba: HBA structure that indicates port to create a queue on.
16104 * @wq: The queue structure to use to create the work queue.
16105 * @cq: The completion queue to bind this work queue to.
16106 * @subtype: The subtype of the work queue indicating its functionality.
16108 * This function creates a work queue, as detailed in @wq, on a port, described
16109 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
16111 * The @phba struct is used to send mailbox command to HBA. The @wq struct
16112 * is used to get the entry count and entry size that are necessary to
16113 * determine the number of pages to allocate and use for this queue. The @cq
16114 * is used to indicate which completion queue to bind this work queue to. This
16115 * function will send the WQ_CREATE mailbox command to the HBA to setup the
16116 * work queue. This function is asynchronous and will wait for the mailbox
16117 * command to finish before continuing.
16119 * On success this function will return a zero. If unable to allocate enough
16120 * memory this function will return -ENOMEM. If the queue create mailbox command
16121 * fails this function will return -ENXIO.
16124 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16125 struct lpfc_queue *cq, uint32_t subtype)
16127 struct lpfc_mbx_wq_create *wq_create;
16128 struct lpfc_dmabuf *dmabuf;
16129 LPFC_MBOXQ_t *mbox;
16130 int rc, length, status = 0;
16131 uint32_t shdr_status, shdr_add_status;
16132 union lpfc_sli4_cfg_shdr *shdr;
16133 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16134 struct dma_address *page;
16135 void __iomem *bar_memmap_p;
16136 uint32_t db_offset;
16137 uint16_t pci_barset;
16138 uint8_t dpp_barset;
16139 uint32_t dpp_offset;
16140 uint8_t wq_create_version;
16142 unsigned long pg_addr;
16145 /* sanity check on queue memory */
16148 if (!phba->sli4_hba.pc_sli4_params.supported)
16149 hw_page_size = wq->page_size;
16151 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16154 length = (sizeof(struct lpfc_mbx_wq_create) -
16155 sizeof(struct lpfc_sli4_cfg_mhdr));
16156 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16157 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16158 length, LPFC_SLI4_MBX_EMBED);
16159 wq_create = &mbox->u.mqe.un.wq_create;
16160 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
16161 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16163 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16166 /* wqv is the earliest version supported, NOT the latest */
16167 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16168 phba->sli4_hba.pc_sli4_params.wqv);
16170 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16171 (wq->page_size > SLI4_PAGE_SIZE))
16172 wq_create_version = LPFC_Q_CREATE_VERSION_1;
16174 wq_create_version = LPFC_Q_CREATE_VERSION_0;
16176 switch (wq_create_version) {
16177 case LPFC_Q_CREATE_VERSION_1:
16178 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16180 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16181 LPFC_Q_CREATE_VERSION_1);
16183 switch (wq->entry_size) {
16186 bf_set(lpfc_mbx_wq_create_wqe_size,
16187 &wq_create->u.request_1,
16188 LPFC_WQ_WQE_SIZE_64);
16191 bf_set(lpfc_mbx_wq_create_wqe_size,
16192 &wq_create->u.request_1,
16193 LPFC_WQ_WQE_SIZE_128);
16196 /* Request DPP by default */
16197 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
16198 bf_set(lpfc_mbx_wq_create_page_size,
16199 &wq_create->u.request_1,
16200 (wq->page_size / SLI4_PAGE_SIZE));
16201 page = wq_create->u.request_1.page;
16204 page = wq_create->u.request.page;
16208 list_for_each_entry(dmabuf, &wq->page_list, list) {
16209 memset(dmabuf->virt, 0, hw_page_size);
16210 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
16211 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
16214 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16215 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
16217 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16218 /* The IOCTL status is embedded in the mailbox subheader. */
16219 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16220 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16221 if (shdr_status || shdr_add_status || rc) {
16222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16223 "2503 WQ_CREATE mailbox failed with "
16224 "status x%x add_status x%x, mbx status x%x\n",
16225 shdr_status, shdr_add_status, rc);
16230 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
16231 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
16232 &wq_create->u.response);
16234 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
16235 &wq_create->u.response_1);
16237 if (wq->queue_id == 0xFFFF) {
16242 wq->db_format = LPFC_DB_LIST_FORMAT;
16243 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
16244 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16245 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
16246 &wq_create->u.response);
16247 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
16248 (wq->db_format != LPFC_DB_RING_FORMAT)) {
16249 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16250 "3265 WQ[%d] doorbell format "
16251 "not supported: x%x\n",
16252 wq->queue_id, wq->db_format);
16256 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
16257 &wq_create->u.response);
16258 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16260 if (!bar_memmap_p) {
16261 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16262 "3263 WQ[%d] failed to memmap "
16263 "pci barset:x%x\n",
16264 wq->queue_id, pci_barset);
16268 db_offset = wq_create->u.response.doorbell_offset;
16269 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
16270 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
16271 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16272 "3252 WQ[%d] doorbell offset "
16273 "not supported: x%x\n",
16274 wq->queue_id, db_offset);
16278 wq->db_regaddr = bar_memmap_p + db_offset;
16279 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16280 "3264 WQ[%d]: barset:x%x, offset:x%x, "
16281 "format:x%x\n", wq->queue_id,
16282 pci_barset, db_offset, wq->db_format);
16284 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16286 /* Check if DPP was honored by the firmware */
16287 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
16288 &wq_create->u.response_1);
16289 if (wq->dpp_enable) {
16290 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
16291 &wq_create->u.response_1);
16292 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16294 if (!bar_memmap_p) {
16295 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16296 "3267 WQ[%d] failed to memmap "
16297 "pci barset:x%x\n",
16298 wq->queue_id, pci_barset);
16302 db_offset = wq_create->u.response_1.doorbell_offset;
16303 wq->db_regaddr = bar_memmap_p + db_offset;
16304 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
16305 &wq_create->u.response_1);
16306 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
16307 &wq_create->u.response_1);
16308 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16310 if (!bar_memmap_p) {
16311 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16312 "3268 WQ[%d] failed to memmap "
16313 "pci barset:x%x\n",
16314 wq->queue_id, dpp_barset);
16318 dpp_offset = wq_create->u.response_1.dpp_offset;
16319 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
16320 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16321 "3271 WQ[%d]: barset:x%x, offset:x%x, "
16322 "dpp_id:x%x dpp_barset:x%x "
16323 "dpp_offset:x%x\n",
16324 wq->queue_id, pci_barset, db_offset,
16325 wq->dpp_id, dpp_barset, dpp_offset);
16328 /* Enable combined writes for DPP aperture */
16329 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
16330 rc = set_memory_wc(pg_addr, 1);
16332 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16333 "3272 Cannot setup Combined "
16334 "Write on WQ[%d] - disable DPP\n",
16336 phba->cfg_enable_dpp = 0;
16339 phba->cfg_enable_dpp = 0;
16342 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16344 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
16345 if (wq->pring == NULL) {
16349 wq->type = LPFC_WQ;
16350 wq->assoc_qid = cq->queue_id;
16351 wq->subtype = subtype;
16352 wq->host_index = 0;
16354 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
16356 /* link the wq onto the parent cq child list */
16357 list_add_tail(&wq->list, &cq->child_list);
16359 mempool_free(mbox, phba->mbox_mem_pool);
16364 * lpfc_rq_create - Create a Receive Queue on the HBA
16365 * @phba: HBA structure that indicates port to create a queue on.
16366 * @hrq: The queue structure to use to create the header receive queue.
16367 * @drq: The queue structure to use to create the data receive queue.
16368 * @cq: The completion queue to bind this work queue to.
16369 * @subtype: The subtype of the work queue indicating its functionality.
16371 * This function creates a receive buffer queue pair , as detailed in @hrq and
16372 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16375 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16376 * struct is used to get the entry count that is necessary to determine the
16377 * number of pages to use for this queue. The @cq is used to indicate which
16378 * completion queue to bind received buffers that are posted to these queues to.
16379 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16380 * receive queue pair. This function is asynchronous and will wait for the
16381 * mailbox command to finish before continuing.
16383 * On success this function will return a zero. If unable to allocate enough
16384 * memory this function will return -ENOMEM. If the queue create mailbox command
16385 * fails this function will return -ENXIO.
16388 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16389 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
16391 struct lpfc_mbx_rq_create *rq_create;
16392 struct lpfc_dmabuf *dmabuf;
16393 LPFC_MBOXQ_t *mbox;
16394 int rc, length, status = 0;
16395 uint32_t shdr_status, shdr_add_status;
16396 union lpfc_sli4_cfg_shdr *shdr;
16397 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16398 void __iomem *bar_memmap_p;
16399 uint32_t db_offset;
16400 uint16_t pci_barset;
16402 /* sanity check on queue memory */
16403 if (!hrq || !drq || !cq)
16405 if (!phba->sli4_hba.pc_sli4_params.supported)
16406 hw_page_size = SLI4_PAGE_SIZE;
16408 if (hrq->entry_count != drq->entry_count)
16410 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16413 length = (sizeof(struct lpfc_mbx_rq_create) -
16414 sizeof(struct lpfc_sli4_cfg_mhdr));
16415 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16416 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16417 length, LPFC_SLI4_MBX_EMBED);
16418 rq_create = &mbox->u.mqe.un.rq_create;
16419 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16420 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16421 phba->sli4_hba.pc_sli4_params.rqv);
16422 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16423 bf_set(lpfc_rq_context_rqe_count_1,
16424 &rq_create->u.request.context,
16426 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
16427 bf_set(lpfc_rq_context_rqe_size,
16428 &rq_create->u.request.context,
16430 bf_set(lpfc_rq_context_page_size,
16431 &rq_create->u.request.context,
16432 LPFC_RQ_PAGE_SIZE_4096);
16434 switch (hrq->entry_count) {
16436 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16437 "2535 Unsupported RQ count. (%d)\n",
16439 if (hrq->entry_count < 512) {
16443 fallthrough; /* otherwise default to smallest count */
16445 bf_set(lpfc_rq_context_rqe_count,
16446 &rq_create->u.request.context,
16447 LPFC_RQ_RING_SIZE_512);
16450 bf_set(lpfc_rq_context_rqe_count,
16451 &rq_create->u.request.context,
16452 LPFC_RQ_RING_SIZE_1024);
16455 bf_set(lpfc_rq_context_rqe_count,
16456 &rq_create->u.request.context,
16457 LPFC_RQ_RING_SIZE_2048);
16460 bf_set(lpfc_rq_context_rqe_count,
16461 &rq_create->u.request.context,
16462 LPFC_RQ_RING_SIZE_4096);
16465 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
16466 LPFC_HDR_BUF_SIZE);
16468 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16470 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16472 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16473 memset(dmabuf->virt, 0, hw_page_size);
16474 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16475 putPaddrLow(dmabuf->phys);
16476 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16477 putPaddrHigh(dmabuf->phys);
16479 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16480 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16482 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16483 /* The IOCTL status is embedded in the mailbox subheader. */
16484 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16485 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16486 if (shdr_status || shdr_add_status || rc) {
16487 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16488 "2504 RQ_CREATE mailbox failed with "
16489 "status x%x add_status x%x, mbx status x%x\n",
16490 shdr_status, shdr_add_status, rc);
16494 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16495 if (hrq->queue_id == 0xFFFF) {
16500 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16501 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
16502 &rq_create->u.response);
16503 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
16504 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
16505 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16506 "3262 RQ [%d] doorbell format not "
16507 "supported: x%x\n", hrq->queue_id,
16513 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
16514 &rq_create->u.response);
16515 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
16516 if (!bar_memmap_p) {
16517 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16518 "3269 RQ[%d] failed to memmap pci "
16519 "barset:x%x\n", hrq->queue_id,
16525 db_offset = rq_create->u.response.doorbell_offset;
16526 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
16527 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
16528 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16529 "3270 RQ[%d] doorbell offset not "
16530 "supported: x%x\n", hrq->queue_id,
16535 hrq->db_regaddr = bar_memmap_p + db_offset;
16536 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16537 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
16538 "format:x%x\n", hrq->queue_id, pci_barset,
16539 db_offset, hrq->db_format);
16541 hrq->db_format = LPFC_DB_RING_FORMAT;
16542 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16544 hrq->type = LPFC_HRQ;
16545 hrq->assoc_qid = cq->queue_id;
16546 hrq->subtype = subtype;
16547 hrq->host_index = 0;
16548 hrq->hba_index = 0;
16549 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16551 /* now create the data queue */
16552 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16553 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16554 length, LPFC_SLI4_MBX_EMBED);
16555 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16556 phba->sli4_hba.pc_sli4_params.rqv);
16557 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16558 bf_set(lpfc_rq_context_rqe_count_1,
16559 &rq_create->u.request.context, hrq->entry_count);
16560 if (subtype == LPFC_NVMET)
16561 rq_create->u.request.context.buffer_size =
16562 LPFC_NVMET_DATA_BUF_SIZE;
16564 rq_create->u.request.context.buffer_size =
16565 LPFC_DATA_BUF_SIZE;
16566 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
16568 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
16569 (PAGE_SIZE/SLI4_PAGE_SIZE));
16571 switch (drq->entry_count) {
16573 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16574 "2536 Unsupported RQ count. (%d)\n",
16576 if (drq->entry_count < 512) {
16580 fallthrough; /* otherwise default to smallest count */
16582 bf_set(lpfc_rq_context_rqe_count,
16583 &rq_create->u.request.context,
16584 LPFC_RQ_RING_SIZE_512);
16587 bf_set(lpfc_rq_context_rqe_count,
16588 &rq_create->u.request.context,
16589 LPFC_RQ_RING_SIZE_1024);
16592 bf_set(lpfc_rq_context_rqe_count,
16593 &rq_create->u.request.context,
16594 LPFC_RQ_RING_SIZE_2048);
16597 bf_set(lpfc_rq_context_rqe_count,
16598 &rq_create->u.request.context,
16599 LPFC_RQ_RING_SIZE_4096);
16602 if (subtype == LPFC_NVMET)
16603 bf_set(lpfc_rq_context_buf_size,
16604 &rq_create->u.request.context,
16605 LPFC_NVMET_DATA_BUF_SIZE);
16607 bf_set(lpfc_rq_context_buf_size,
16608 &rq_create->u.request.context,
16609 LPFC_DATA_BUF_SIZE);
16611 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16613 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16615 list_for_each_entry(dmabuf, &drq->page_list, list) {
16616 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16617 putPaddrLow(dmabuf->phys);
16618 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16619 putPaddrHigh(dmabuf->phys);
16621 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16622 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16623 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16624 /* The IOCTL status is embedded in the mailbox subheader. */
16625 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16626 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16627 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16628 if (shdr_status || shdr_add_status || rc) {
16632 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16633 if (drq->queue_id == 0xFFFF) {
16637 drq->type = LPFC_DRQ;
16638 drq->assoc_qid = cq->queue_id;
16639 drq->subtype = subtype;
16640 drq->host_index = 0;
16641 drq->hba_index = 0;
16642 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16644 /* link the header and data RQs onto the parent cq child list */
16645 list_add_tail(&hrq->list, &cq->child_list);
16646 list_add_tail(&drq->list, &cq->child_list);
16649 mempool_free(mbox, phba->mbox_mem_pool);
16654 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
16655 * @phba: HBA structure that indicates port to create a queue on.
16656 * @hrqp: The queue structure array to use to create the header receive queues.
16657 * @drqp: The queue structure array to use to create the data receive queues.
16658 * @cqp: The completion queue array to bind these receive queues to.
16659 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16661 * This function creates a receive buffer queue pair , as detailed in @hrq and
16662 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16665 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16666 * struct is used to get the entry count that is necessary to determine the
16667 * number of pages to use for this queue. The @cq is used to indicate which
16668 * completion queue to bind received buffers that are posted to these queues to.
16669 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16670 * receive queue pair. This function is asynchronous and will wait for the
16671 * mailbox command to finish before continuing.
16673 * On success this function will return a zero. If unable to allocate enough
16674 * memory this function will return -ENOMEM. If the queue create mailbox command
16675 * fails this function will return -ENXIO.
16678 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16679 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16682 struct lpfc_queue *hrq, *drq, *cq;
16683 struct lpfc_mbx_rq_create_v2 *rq_create;
16684 struct lpfc_dmabuf *dmabuf;
16685 LPFC_MBOXQ_t *mbox;
16686 int rc, length, alloclen, status = 0;
16687 int cnt, idx, numrq, page_idx = 0;
16688 uint32_t shdr_status, shdr_add_status;
16689 union lpfc_sli4_cfg_shdr *shdr;
16690 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16692 numrq = phba->cfg_nvmet_mrq;
16693 /* sanity check on array memory */
16694 if (!hrqp || !drqp || !cqp || !numrq)
16696 if (!phba->sli4_hba.pc_sli4_params.supported)
16697 hw_page_size = SLI4_PAGE_SIZE;
16699 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16703 length = sizeof(struct lpfc_mbx_rq_create_v2);
16704 length += ((2 * numrq * hrqp[0]->page_count) *
16705 sizeof(struct dma_address));
16707 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16708 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16709 LPFC_SLI4_MBX_NEMBED);
16710 if (alloclen < length) {
16711 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16712 "3099 Allocated DMA memory size (%d) is "
16713 "less than the requested DMA memory size "
16714 "(%d)\n", alloclen, length);
16721 rq_create = mbox->sge_array->addr[0];
16722 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16724 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16727 for (idx = 0; idx < numrq; idx++) {
16732 /* sanity check on queue memory */
16733 if (!hrq || !drq || !cq) {
16738 if (hrq->entry_count != drq->entry_count) {
16744 bf_set(lpfc_mbx_rq_create_num_pages,
16745 &rq_create->u.request,
16747 bf_set(lpfc_mbx_rq_create_rq_cnt,
16748 &rq_create->u.request, (numrq * 2));
16749 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16751 bf_set(lpfc_rq_context_base_cq,
16752 &rq_create->u.request.context,
16754 bf_set(lpfc_rq_context_data_size,
16755 &rq_create->u.request.context,
16756 LPFC_NVMET_DATA_BUF_SIZE);
16757 bf_set(lpfc_rq_context_hdr_size,
16758 &rq_create->u.request.context,
16759 LPFC_HDR_BUF_SIZE);
16760 bf_set(lpfc_rq_context_rqe_count_1,
16761 &rq_create->u.request.context,
16763 bf_set(lpfc_rq_context_rqe_size,
16764 &rq_create->u.request.context,
16766 bf_set(lpfc_rq_context_page_size,
16767 &rq_create->u.request.context,
16768 (PAGE_SIZE/SLI4_PAGE_SIZE));
16771 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16772 memset(dmabuf->virt, 0, hw_page_size);
16773 cnt = page_idx + dmabuf->buffer_tag;
16774 rq_create->u.request.page[cnt].addr_lo =
16775 putPaddrLow(dmabuf->phys);
16776 rq_create->u.request.page[cnt].addr_hi =
16777 putPaddrHigh(dmabuf->phys);
16783 list_for_each_entry(dmabuf, &drq->page_list, list) {
16784 memset(dmabuf->virt, 0, hw_page_size);
16785 cnt = page_idx + dmabuf->buffer_tag;
16786 rq_create->u.request.page[cnt].addr_lo =
16787 putPaddrLow(dmabuf->phys);
16788 rq_create->u.request.page[cnt].addr_hi =
16789 putPaddrHigh(dmabuf->phys);
16794 hrq->db_format = LPFC_DB_RING_FORMAT;
16795 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16796 hrq->type = LPFC_HRQ;
16797 hrq->assoc_qid = cq->queue_id;
16798 hrq->subtype = subtype;
16799 hrq->host_index = 0;
16800 hrq->hba_index = 0;
16801 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16803 drq->db_format = LPFC_DB_RING_FORMAT;
16804 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16805 drq->type = LPFC_DRQ;
16806 drq->assoc_qid = cq->queue_id;
16807 drq->subtype = subtype;
16808 drq->host_index = 0;
16809 drq->hba_index = 0;
16810 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16812 list_add_tail(&hrq->list, &cq->child_list);
16813 list_add_tail(&drq->list, &cq->child_list);
16816 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16817 /* The IOCTL status is embedded in the mailbox subheader. */
16818 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16819 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16820 if (shdr_status || shdr_add_status || rc) {
16821 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16822 "3120 RQ_CREATE mailbox failed with "
16823 "status x%x add_status x%x, mbx status x%x\n",
16824 shdr_status, shdr_add_status, rc);
16828 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16829 if (rc == 0xFFFF) {
16834 /* Initialize all RQs with associated queue id */
16835 for (idx = 0; idx < numrq; idx++) {
16837 hrq->queue_id = rc + (2 * idx);
16839 drq->queue_id = rc + (2 * idx) + 1;
16843 lpfc_sli4_mbox_cmd_free(phba, mbox);
16848 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16849 * @phba: HBA structure that indicates port to destroy a queue on.
16850 * @eq: The queue structure associated with the queue to destroy.
16852 * This function destroys a queue, as detailed in @eq by sending an mailbox
16853 * command, specific to the type of queue, to the HBA.
16855 * The @eq struct is used to get the queue ID of the queue to destroy.
16857 * On success this function will return a zero. If the queue destroy mailbox
16858 * command fails this function will return -ENXIO.
16861 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16863 LPFC_MBOXQ_t *mbox;
16864 int rc, length, status = 0;
16865 uint32_t shdr_status, shdr_add_status;
16866 union lpfc_sli4_cfg_shdr *shdr;
16868 /* sanity check on queue memory */
16872 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16875 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16876 sizeof(struct lpfc_sli4_cfg_mhdr));
16877 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16878 LPFC_MBOX_OPCODE_EQ_DESTROY,
16879 length, LPFC_SLI4_MBX_EMBED);
16880 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16882 mbox->vport = eq->phba->pport;
16883 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16885 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16886 /* The IOCTL status is embedded in the mailbox subheader. */
16887 shdr = (union lpfc_sli4_cfg_shdr *)
16888 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16889 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16890 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16891 if (shdr_status || shdr_add_status || rc) {
16892 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16893 "2505 EQ_DESTROY mailbox failed with "
16894 "status x%x add_status x%x, mbx status x%x\n",
16895 shdr_status, shdr_add_status, rc);
16899 /* Remove eq from any list */
16900 list_del_init(&eq->list);
16901 mempool_free(mbox, eq->phba->mbox_mem_pool);
16906 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16907 * @phba: HBA structure that indicates port to destroy a queue on.
16908 * @cq: The queue structure associated with the queue to destroy.
16910 * This function destroys a queue, as detailed in @cq by sending an mailbox
16911 * command, specific to the type of queue, to the HBA.
16913 * The @cq struct is used to get the queue ID of the queue to destroy.
16915 * On success this function will return a zero. If the queue destroy mailbox
16916 * command fails this function will return -ENXIO.
16919 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16921 LPFC_MBOXQ_t *mbox;
16922 int rc, length, status = 0;
16923 uint32_t shdr_status, shdr_add_status;
16924 union lpfc_sli4_cfg_shdr *shdr;
16926 /* sanity check on queue memory */
16929 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16932 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16933 sizeof(struct lpfc_sli4_cfg_mhdr));
16934 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16935 LPFC_MBOX_OPCODE_CQ_DESTROY,
16936 length, LPFC_SLI4_MBX_EMBED);
16937 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16939 mbox->vport = cq->phba->pport;
16940 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16941 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16942 /* The IOCTL status is embedded in the mailbox subheader. */
16943 shdr = (union lpfc_sli4_cfg_shdr *)
16944 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16945 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16946 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16947 if (shdr_status || shdr_add_status || rc) {
16948 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16949 "2506 CQ_DESTROY mailbox failed with "
16950 "status x%x add_status x%x, mbx status x%x\n",
16951 shdr_status, shdr_add_status, rc);
16954 /* Remove cq from any list */
16955 list_del_init(&cq->list);
16956 mempool_free(mbox, cq->phba->mbox_mem_pool);
16961 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16962 * @phba: HBA structure that indicates port to destroy a queue on.
16963 * @mq: The queue structure associated with the queue to destroy.
16965 * This function destroys a queue, as detailed in @mq by sending an mailbox
16966 * command, specific to the type of queue, to the HBA.
16968 * The @mq struct is used to get the queue ID of the queue to destroy.
16970 * On success this function will return a zero. If the queue destroy mailbox
16971 * command fails this function will return -ENXIO.
16974 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16976 LPFC_MBOXQ_t *mbox;
16977 int rc, length, status = 0;
16978 uint32_t shdr_status, shdr_add_status;
16979 union lpfc_sli4_cfg_shdr *shdr;
16981 /* sanity check on queue memory */
16984 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16987 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16988 sizeof(struct lpfc_sli4_cfg_mhdr));
16989 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16990 LPFC_MBOX_OPCODE_MQ_DESTROY,
16991 length, LPFC_SLI4_MBX_EMBED);
16992 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16994 mbox->vport = mq->phba->pport;
16995 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16996 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16997 /* The IOCTL status is embedded in the mailbox subheader. */
16998 shdr = (union lpfc_sli4_cfg_shdr *)
16999 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
17000 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17001 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17002 if (shdr_status || shdr_add_status || rc) {
17003 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17004 "2507 MQ_DESTROY mailbox failed with "
17005 "status x%x add_status x%x, mbx status x%x\n",
17006 shdr_status, shdr_add_status, rc);
17009 /* Remove mq from any list */
17010 list_del_init(&mq->list);
17011 mempool_free(mbox, mq->phba->mbox_mem_pool);
17016 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
17017 * @phba: HBA structure that indicates port to destroy a queue on.
17018 * @wq: The queue structure associated with the queue to destroy.
17020 * This function destroys a queue, as detailed in @wq by sending an mailbox
17021 * command, specific to the type of queue, to the HBA.
17023 * The @wq struct is used to get the queue ID of the queue to destroy.
17025 * On success this function will return a zero. If the queue destroy mailbox
17026 * command fails this function will return -ENXIO.
17029 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
17031 LPFC_MBOXQ_t *mbox;
17032 int rc, length, status = 0;
17033 uint32_t shdr_status, shdr_add_status;
17034 union lpfc_sli4_cfg_shdr *shdr;
17036 /* sanity check on queue memory */
17039 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
17042 length = (sizeof(struct lpfc_mbx_wq_destroy) -
17043 sizeof(struct lpfc_sli4_cfg_mhdr));
17044 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17045 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17046 length, LPFC_SLI4_MBX_EMBED);
17047 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17049 mbox->vport = wq->phba->pport;
17050 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17051 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17052 shdr = (union lpfc_sli4_cfg_shdr *)
17053 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17054 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17055 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17056 if (shdr_status || shdr_add_status || rc) {
17057 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17058 "2508 WQ_DESTROY mailbox failed with "
17059 "status x%x add_status x%x, mbx status x%x\n",
17060 shdr_status, shdr_add_status, rc);
17063 /* Remove wq from any list */
17064 list_del_init(&wq->list);
17067 mempool_free(mbox, wq->phba->mbox_mem_pool);
17072 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
17073 * @phba: HBA structure that indicates port to destroy a queue on.
17074 * @hrq: The queue structure associated with the queue to destroy.
17075 * @drq: The queue structure associated with the queue to destroy.
17077 * This function destroys a queue, as detailed in @rq by sending an mailbox
17078 * command, specific to the type of queue, to the HBA.
17080 * The @rq struct is used to get the queue ID of the queue to destroy.
17082 * On success this function will return a zero. If the queue destroy mailbox
17083 * command fails this function will return -ENXIO.
17086 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17087 struct lpfc_queue *drq)
17089 LPFC_MBOXQ_t *mbox;
17090 int rc, length, status = 0;
17091 uint32_t shdr_status, shdr_add_status;
17092 union lpfc_sli4_cfg_shdr *shdr;
17094 /* sanity check on queue memory */
17097 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17100 length = (sizeof(struct lpfc_mbx_rq_destroy) -
17101 sizeof(struct lpfc_sli4_cfg_mhdr));
17102 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17103 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17104 length, LPFC_SLI4_MBX_EMBED);
17105 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17107 mbox->vport = hrq->phba->pport;
17108 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17109 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17110 /* The IOCTL status is embedded in the mailbox subheader. */
17111 shdr = (union lpfc_sli4_cfg_shdr *)
17112 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17113 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17114 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17115 if (shdr_status || shdr_add_status || rc) {
17116 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17117 "2509 RQ_DESTROY mailbox failed with "
17118 "status x%x add_status x%x, mbx status x%x\n",
17119 shdr_status, shdr_add_status, rc);
17120 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17123 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17125 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17126 shdr = (union lpfc_sli4_cfg_shdr *)
17127 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17128 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17129 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17130 if (shdr_status || shdr_add_status || rc) {
17131 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17132 "2510 RQ_DESTROY mailbox failed with "
17133 "status x%x add_status x%x, mbx status x%x\n",
17134 shdr_status, shdr_add_status, rc);
17137 list_del_init(&hrq->list);
17138 list_del_init(&drq->list);
17139 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17144 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
17145 * @phba: The virtual port for which this call being executed.
17146 * @pdma_phys_addr0: Physical address of the 1st SGL page.
17147 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
17148 * @xritag: the xritag that ties this io to the SGL pages.
17150 * This routine will post the sgl pages for the IO that has the xritag
17151 * that is in the iocbq structure. The xritag is assigned during iocbq
17152 * creation and persists for as long as the driver is loaded.
17153 * if the caller has fewer than 256 scatter gather segments to map then
17154 * pdma_phys_addr1 should be 0.
17155 * If the caller needs to map more than 256 scatter gather segment then
17156 * pdma_phys_addr1 should be a valid physical address.
17157 * physical address for SGLs must be 64 byte aligned.
17158 * If you are going to map 2 SGL's then the first one must have 256 entries
17159 * the second sgl can have between 1 and 256 entries.
17163 * -ENXIO, -ENOMEM - Failure
17166 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
17167 dma_addr_t pdma_phys_addr0,
17168 dma_addr_t pdma_phys_addr1,
17171 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
17172 LPFC_MBOXQ_t *mbox;
17174 uint32_t shdr_status, shdr_add_status;
17176 union lpfc_sli4_cfg_shdr *shdr;
17178 if (xritag == NO_XRI) {
17179 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17180 "0364 Invalid param:\n");
17184 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17188 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17189 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17190 sizeof(struct lpfc_mbx_post_sgl_pages) -
17191 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17193 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
17194 &mbox->u.mqe.un.post_sgl_pages;
17195 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
17196 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
17198 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
17199 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
17200 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
17201 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
17203 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
17204 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
17205 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
17206 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
17207 if (!phba->sli4_hba.intr_enable)
17208 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17210 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17211 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17213 /* The IOCTL status is embedded in the mailbox subheader. */
17214 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
17215 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17216 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17217 if (!phba->sli4_hba.intr_enable)
17218 mempool_free(mbox, phba->mbox_mem_pool);
17219 else if (rc != MBX_TIMEOUT)
17220 mempool_free(mbox, phba->mbox_mem_pool);
17221 if (shdr_status || shdr_add_status || rc) {
17222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17223 "2511 POST_SGL mailbox failed with "
17224 "status x%x add_status x%x, mbx status x%x\n",
17225 shdr_status, shdr_add_status, rc);
17231 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
17232 * @phba: pointer to lpfc hba data structure.
17234 * This routine is invoked to post rpi header templates to the
17235 * HBA consistent with the SLI-4 interface spec. This routine
17236 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17237 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17240 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17241 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
17244 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
17249 * Fetch the next logical xri. Because this index is logical,
17250 * the driver starts at 0 each time.
17252 spin_lock_irq(&phba->hbalock);
17253 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
17254 phba->sli4_hba.max_cfg_param.max_xri, 0);
17255 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
17256 spin_unlock_irq(&phba->hbalock);
17259 set_bit(xri, phba->sli4_hba.xri_bmask);
17260 phba->sli4_hba.max_cfg_param.xri_used++;
17262 spin_unlock_irq(&phba->hbalock);
17267 * __lpfc_sli4_free_xri - Release an xri for reuse.
17268 * @phba: pointer to lpfc hba data structure.
17269 * @xri: xri to release.
17271 * This routine is invoked to release an xri to the pool of
17272 * available rpis maintained by the driver.
17275 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17277 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
17278 phba->sli4_hba.max_cfg_param.xri_used--;
17283 * lpfc_sli4_free_xri - Release an xri for reuse.
17284 * @phba: pointer to lpfc hba data structure.
17285 * @xri: xri to release.
17287 * This routine is invoked to release an xri to the pool of
17288 * available rpis maintained by the driver.
17291 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17293 spin_lock_irq(&phba->hbalock);
17294 __lpfc_sli4_free_xri(phba, xri);
17295 spin_unlock_irq(&phba->hbalock);
17299 * lpfc_sli4_next_xritag - Get an xritag for the io
17300 * @phba: Pointer to HBA context object.
17302 * This function gets an xritag for the iocb. If there is no unused xritag
17303 * it will return 0xffff.
17304 * The function returns the allocated xritag if successful, else returns zero.
17305 * Zero is not a valid xritag.
17306 * The caller is not required to hold any lock.
17309 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
17311 uint16_t xri_index;
17313 xri_index = lpfc_sli4_alloc_xri(phba);
17314 if (xri_index == NO_XRI)
17315 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17316 "2004 Failed to allocate XRI.last XRITAG is %d"
17317 " Max XRI is %d, Used XRI is %d\n",
17319 phba->sli4_hba.max_cfg_param.max_xri,
17320 phba->sli4_hba.max_cfg_param.xri_used);
17325 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
17326 * @phba: pointer to lpfc hba data structure.
17327 * @post_sgl_list: pointer to els sgl entry list.
17328 * @post_cnt: number of els sgl entries on the list.
17330 * This routine is invoked to post a block of driver's sgl pages to the
17331 * HBA using non-embedded mailbox command. No Lock is held. This routine
17332 * is only called when the driver is loading and after all IO has been
17336 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
17337 struct list_head *post_sgl_list,
17340 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
17341 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17342 struct sgl_page_pairs *sgl_pg_pairs;
17344 LPFC_MBOXQ_t *mbox;
17345 uint32_t reqlen, alloclen, pg_pairs;
17347 uint16_t xritag_start = 0;
17349 uint32_t shdr_status, shdr_add_status;
17350 union lpfc_sli4_cfg_shdr *shdr;
17352 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
17353 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17354 if (reqlen > SLI4_PAGE_SIZE) {
17355 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17356 "2559 Block sgl registration required DMA "
17357 "size (%d) great than a page\n", reqlen);
17361 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17365 /* Allocate DMA memory and set up the non-embedded mailbox command */
17366 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17367 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
17368 LPFC_SLI4_MBX_NEMBED);
17370 if (alloclen < reqlen) {
17371 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17372 "0285 Allocated DMA memory size (%d) is "
17373 "less than the requested DMA memory "
17374 "size (%d)\n", alloclen, reqlen);
17375 lpfc_sli4_mbox_cmd_free(phba, mbox);
17378 /* Set up the SGL pages in the non-embedded DMA pages */
17379 viraddr = mbox->sge_array->addr[0];
17380 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17381 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17384 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
17385 /* Set up the sge entry */
17386 sgl_pg_pairs->sgl_pg0_addr_lo =
17387 cpu_to_le32(putPaddrLow(sglq_entry->phys));
17388 sgl_pg_pairs->sgl_pg0_addr_hi =
17389 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
17390 sgl_pg_pairs->sgl_pg1_addr_lo =
17391 cpu_to_le32(putPaddrLow(0));
17392 sgl_pg_pairs->sgl_pg1_addr_hi =
17393 cpu_to_le32(putPaddrHigh(0));
17395 /* Keep the first xritag on the list */
17397 xritag_start = sglq_entry->sli4_xritag;
17402 /* Complete initialization and perform endian conversion. */
17403 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17404 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
17405 sgl->word0 = cpu_to_le32(sgl->word0);
17407 if (!phba->sli4_hba.intr_enable)
17408 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17410 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17411 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17413 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
17414 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17415 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17416 if (!phba->sli4_hba.intr_enable)
17417 lpfc_sli4_mbox_cmd_free(phba, mbox);
17418 else if (rc != MBX_TIMEOUT)
17419 lpfc_sli4_mbox_cmd_free(phba, mbox);
17420 if (shdr_status || shdr_add_status || rc) {
17421 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17422 "2513 POST_SGL_BLOCK mailbox command failed "
17423 "status x%x add_status x%x mbx status x%x\n",
17424 shdr_status, shdr_add_status, rc);
17431 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
17432 * @phba: pointer to lpfc hba data structure.
17433 * @nblist: pointer to nvme buffer list.
17434 * @count: number of scsi buffers on the list.
17436 * This routine is invoked to post a block of @count scsi sgl pages from a
17437 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
17442 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
17445 struct lpfc_io_buf *lpfc_ncmd;
17446 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17447 struct sgl_page_pairs *sgl_pg_pairs;
17449 LPFC_MBOXQ_t *mbox;
17450 uint32_t reqlen, alloclen, pg_pairs;
17452 uint16_t xritag_start = 0;
17454 uint32_t shdr_status, shdr_add_status;
17455 dma_addr_t pdma_phys_bpl1;
17456 union lpfc_sli4_cfg_shdr *shdr;
17458 /* Calculate the requested length of the dma memory */
17459 reqlen = count * sizeof(struct sgl_page_pairs) +
17460 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17461 if (reqlen > SLI4_PAGE_SIZE) {
17462 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
17463 "6118 Block sgl registration required DMA "
17464 "size (%d) great than a page\n", reqlen);
17467 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17469 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17470 "6119 Failed to allocate mbox cmd memory\n");
17474 /* Allocate DMA memory and set up the non-embedded mailbox command */
17475 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17476 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17477 reqlen, LPFC_SLI4_MBX_NEMBED);
17479 if (alloclen < reqlen) {
17480 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17481 "6120 Allocated DMA memory size (%d) is "
17482 "less than the requested DMA memory "
17483 "size (%d)\n", alloclen, reqlen);
17484 lpfc_sli4_mbox_cmd_free(phba, mbox);
17488 /* Get the first SGE entry from the non-embedded DMA memory */
17489 viraddr = mbox->sge_array->addr[0];
17491 /* Set up the SGL pages in the non-embedded DMA pages */
17492 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17493 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17496 list_for_each_entry(lpfc_ncmd, nblist, list) {
17497 /* Set up the sge entry */
17498 sgl_pg_pairs->sgl_pg0_addr_lo =
17499 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
17500 sgl_pg_pairs->sgl_pg0_addr_hi =
17501 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
17502 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
17503 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
17506 pdma_phys_bpl1 = 0;
17507 sgl_pg_pairs->sgl_pg1_addr_lo =
17508 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
17509 sgl_pg_pairs->sgl_pg1_addr_hi =
17510 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
17511 /* Keep the first xritag on the list */
17513 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
17517 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17518 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
17519 /* Perform endian conversion if necessary */
17520 sgl->word0 = cpu_to_le32(sgl->word0);
17522 if (!phba->sli4_hba.intr_enable) {
17523 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17525 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17526 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17528 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
17529 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17530 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17531 if (!phba->sli4_hba.intr_enable)
17532 lpfc_sli4_mbox_cmd_free(phba, mbox);
17533 else if (rc != MBX_TIMEOUT)
17534 lpfc_sli4_mbox_cmd_free(phba, mbox);
17535 if (shdr_status || shdr_add_status || rc) {
17536 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17537 "6125 POST_SGL_BLOCK mailbox command failed "
17538 "status x%x add_status x%x mbx status x%x\n",
17539 shdr_status, shdr_add_status, rc);
17546 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
17547 * @phba: pointer to lpfc hba data structure.
17548 * @post_nblist: pointer to the nvme buffer list.
17549 * @sb_count: number of nvme buffers.
17551 * This routine walks a list of nvme buffers that was passed in. It attempts
17552 * to construct blocks of nvme buffer sgls which contains contiguous xris and
17553 * uses the non-embedded SGL block post mailbox commands to post to the port.
17554 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
17555 * embedded SGL post mailbox command for posting. The @post_nblist passed in
17556 * must be local list, thus no lock is needed when manipulate the list.
17558 * Returns: 0 = failure, non-zero number of successfully posted buffers.
17561 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
17562 struct list_head *post_nblist, int sb_count)
17564 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
17565 int status, sgl_size;
17566 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
17567 dma_addr_t pdma_phys_sgl1;
17568 int last_xritag = NO_XRI;
17570 LIST_HEAD(prep_nblist);
17571 LIST_HEAD(blck_nblist);
17572 LIST_HEAD(nvme_nblist);
17578 sgl_size = phba->cfg_sg_dma_buf_size;
17579 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
17580 list_del_init(&lpfc_ncmd->list);
17582 if ((last_xritag != NO_XRI) &&
17583 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
17584 /* a hole in xri block, form a sgl posting block */
17585 list_splice_init(&prep_nblist, &blck_nblist);
17586 post_cnt = block_cnt - 1;
17587 /* prepare list for next posting block */
17588 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17591 /* prepare list for next posting block */
17592 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17593 /* enough sgls for non-embed sgl mbox command */
17594 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
17595 list_splice_init(&prep_nblist, &blck_nblist);
17596 post_cnt = block_cnt;
17601 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17603 /* end of repost sgl list condition for NVME buffers */
17604 if (num_posting == sb_count) {
17605 if (post_cnt == 0) {
17606 /* last sgl posting block */
17607 list_splice_init(&prep_nblist, &blck_nblist);
17608 post_cnt = block_cnt;
17609 } else if (block_cnt == 1) {
17610 /* last single sgl with non-contiguous xri */
17611 if (sgl_size > SGL_PAGE_SIZE)
17613 lpfc_ncmd->dma_phys_sgl +
17616 pdma_phys_sgl1 = 0;
17617 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17618 status = lpfc_sli4_post_sgl(
17619 phba, lpfc_ncmd->dma_phys_sgl,
17620 pdma_phys_sgl1, cur_xritag);
17622 /* Post error. Buffer unavailable. */
17623 lpfc_ncmd->flags |=
17624 LPFC_SBUF_NOT_POSTED;
17626 /* Post success. Bffer available. */
17627 lpfc_ncmd->flags &=
17628 ~LPFC_SBUF_NOT_POSTED;
17629 lpfc_ncmd->status = IOSTAT_SUCCESS;
17632 /* success, put on NVME buffer sgl list */
17633 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17637 /* continue until a nembed page worth of sgls */
17641 /* post block of NVME buffer list sgls */
17642 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
17645 /* don't reset xirtag due to hole in xri block */
17646 if (block_cnt == 0)
17647 last_xritag = NO_XRI;
17649 /* reset NVME buffer post count for next round of posting */
17652 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
17653 while (!list_empty(&blck_nblist)) {
17654 list_remove_head(&blck_nblist, lpfc_ncmd,
17655 struct lpfc_io_buf, list);
17657 /* Post error. Mark buffer unavailable. */
17658 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
17660 /* Post success, Mark buffer available. */
17661 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
17662 lpfc_ncmd->status = IOSTAT_SUCCESS;
17665 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17668 /* Push NVME buffers with sgl posted to the available list */
17669 lpfc_io_buf_replenish(phba, &nvme_nblist);
17675 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
17676 * @phba: pointer to lpfc_hba struct that the frame was received on
17677 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17679 * This function checks the fields in the @fc_hdr to see if the FC frame is a
17680 * valid type of frame that the LPFC driver will handle. This function will
17681 * return a zero if the frame is a valid frame or a non zero value when the
17682 * frame does not pass the check.
17685 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
17687 /* make rctl_names static to save stack space */
17688 struct fc_vft_header *fc_vft_hdr;
17689 uint32_t *header = (uint32_t *) fc_hdr;
17691 #define FC_RCTL_MDS_DIAGS 0xF4
17693 switch (fc_hdr->fh_r_ctl) {
17694 case FC_RCTL_DD_UNCAT: /* uncategorized information */
17695 case FC_RCTL_DD_SOL_DATA: /* solicited data */
17696 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
17697 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
17698 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
17699 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
17700 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
17701 case FC_RCTL_DD_CMD_STATUS: /* command status */
17702 case FC_RCTL_ELS_REQ: /* extended link services request */
17703 case FC_RCTL_ELS_REP: /* extended link services reply */
17704 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
17705 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
17706 case FC_RCTL_BA_NOP: /* basic link service NOP */
17707 case FC_RCTL_BA_ABTS: /* basic link service abort */
17708 case FC_RCTL_BA_RMC: /* remove connection */
17709 case FC_RCTL_BA_ACC: /* basic accept */
17710 case FC_RCTL_BA_RJT: /* basic reject */
17711 case FC_RCTL_BA_PRMT:
17712 case FC_RCTL_ACK_1: /* acknowledge_1 */
17713 case FC_RCTL_ACK_0: /* acknowledge_0 */
17714 case FC_RCTL_P_RJT: /* port reject */
17715 case FC_RCTL_F_RJT: /* fabric reject */
17716 case FC_RCTL_P_BSY: /* port busy */
17717 case FC_RCTL_F_BSY: /* fabric busy to data frame */
17718 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
17719 case FC_RCTL_LCR: /* link credit reset */
17720 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
17721 case FC_RCTL_END: /* end */
17723 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
17724 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17725 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17726 return lpfc_fc_frame_check(phba, fc_hdr);
17731 switch (fc_hdr->fh_type) {
17744 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
17745 "2538 Received frame rctl:x%x, type:x%x, "
17746 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
17747 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17748 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17749 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17750 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17751 be32_to_cpu(header[6]));
17754 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
17755 "2539 Dropped frame rctl:x%x type:x%x\n",
17756 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17761 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17762 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17764 * This function processes the FC header to retrieve the VFI from the VF
17765 * header, if one exists. This function will return the VFI if one exists
17766 * or 0 if no VSAN Header exists.
17769 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17771 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17773 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17775 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17779 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17780 * @phba: Pointer to the HBA structure to search for the vport on
17781 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17782 * @fcfi: The FC Fabric ID that the frame came from
17783 * @did: Destination ID to match against
17785 * This function searches the @phba for a vport that matches the content of the
17786 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17787 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17788 * returns the matching vport pointer or NULL if unable to match frame to a
17791 static struct lpfc_vport *
17792 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17793 uint16_t fcfi, uint32_t did)
17795 struct lpfc_vport **vports;
17796 struct lpfc_vport *vport = NULL;
17799 if (did == Fabric_DID)
17800 return phba->pport;
17801 if ((phba->pport->fc_flag & FC_PT2PT) &&
17802 !(phba->link_state == LPFC_HBA_READY))
17803 return phba->pport;
17805 vports = lpfc_create_vport_work_array(phba);
17806 if (vports != NULL) {
17807 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17808 if (phba->fcf.fcfi == fcfi &&
17809 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17810 vports[i]->fc_myDID == did) {
17816 lpfc_destroy_vport_work_array(phba, vports);
17821 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17822 * @vport: The vport to work on.
17824 * This function updates the receive sequence time stamp for this vport. The
17825 * receive sequence time stamp indicates the time that the last frame of the
17826 * the sequence that has been idle for the longest amount of time was received.
17827 * the driver uses this time stamp to indicate if any received sequences have
17831 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17833 struct lpfc_dmabuf *h_buf;
17834 struct hbq_dmabuf *dmabuf = NULL;
17836 /* get the oldest sequence on the rcv list */
17837 h_buf = list_get_first(&vport->rcv_buffer_list,
17838 struct lpfc_dmabuf, list);
17841 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17842 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17846 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17847 * @vport: The vport that the received sequences were sent to.
17849 * This function cleans up all outstanding received sequences. This is called
17850 * by the driver when a link event or user action invalidates all the received
17854 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17856 struct lpfc_dmabuf *h_buf, *hnext;
17857 struct lpfc_dmabuf *d_buf, *dnext;
17858 struct hbq_dmabuf *dmabuf = NULL;
17860 /* start with the oldest sequence on the rcv list */
17861 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17862 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17863 list_del_init(&dmabuf->hbuf.list);
17864 list_for_each_entry_safe(d_buf, dnext,
17865 &dmabuf->dbuf.list, list) {
17866 list_del_init(&d_buf->list);
17867 lpfc_in_buf_free(vport->phba, d_buf);
17869 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17874 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17875 * @vport: The vport that the received sequences were sent to.
17877 * This function determines whether any received sequences have timed out by
17878 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17879 * indicates that there is at least one timed out sequence this routine will
17880 * go through the received sequences one at a time from most inactive to most
17881 * active to determine which ones need to be cleaned up. Once it has determined
17882 * that a sequence needs to be cleaned up it will simply free up the resources
17883 * without sending an abort.
17886 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17888 struct lpfc_dmabuf *h_buf, *hnext;
17889 struct lpfc_dmabuf *d_buf, *dnext;
17890 struct hbq_dmabuf *dmabuf = NULL;
17891 unsigned long timeout;
17892 int abort_count = 0;
17894 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17895 vport->rcv_buffer_time_stamp);
17896 if (list_empty(&vport->rcv_buffer_list) ||
17897 time_before(jiffies, timeout))
17899 /* start with the oldest sequence on the rcv list */
17900 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17901 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17902 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17903 dmabuf->time_stamp);
17904 if (time_before(jiffies, timeout))
17907 list_del_init(&dmabuf->hbuf.list);
17908 list_for_each_entry_safe(d_buf, dnext,
17909 &dmabuf->dbuf.list, list) {
17910 list_del_init(&d_buf->list);
17911 lpfc_in_buf_free(vport->phba, d_buf);
17913 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17916 lpfc_update_rcv_time_stamp(vport);
17920 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17921 * @vport: pointer to a vitural port
17922 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17924 * This function searches through the existing incomplete sequences that have
17925 * been sent to this @vport. If the frame matches one of the incomplete
17926 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17927 * make up that sequence. If no sequence is found that matches this frame then
17928 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17929 * This function returns a pointer to the first dmabuf in the sequence list that
17930 * the frame was linked to.
17932 static struct hbq_dmabuf *
17933 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17935 struct fc_frame_header *new_hdr;
17936 struct fc_frame_header *temp_hdr;
17937 struct lpfc_dmabuf *d_buf;
17938 struct lpfc_dmabuf *h_buf;
17939 struct hbq_dmabuf *seq_dmabuf = NULL;
17940 struct hbq_dmabuf *temp_dmabuf = NULL;
17943 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17944 dmabuf->time_stamp = jiffies;
17945 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17947 /* Use the hdr_buf to find the sequence that this frame belongs to */
17948 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17949 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17950 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17951 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17952 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17954 /* found a pending sequence that matches this frame */
17955 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17960 * This indicates first frame received for this sequence.
17961 * Queue the buffer on the vport's rcv_buffer_list.
17963 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17964 lpfc_update_rcv_time_stamp(vport);
17967 temp_hdr = seq_dmabuf->hbuf.virt;
17968 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17969 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17970 list_del_init(&seq_dmabuf->hbuf.list);
17971 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17972 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17973 lpfc_update_rcv_time_stamp(vport);
17976 /* move this sequence to the tail to indicate a young sequence */
17977 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17978 seq_dmabuf->time_stamp = jiffies;
17979 lpfc_update_rcv_time_stamp(vport);
17980 if (list_empty(&seq_dmabuf->dbuf.list)) {
17981 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17984 /* find the correct place in the sequence to insert this frame */
17985 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17987 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17988 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17990 * If the frame's sequence count is greater than the frame on
17991 * the list then insert the frame right after this frame
17993 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17994 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17995 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
18000 if (&d_buf->list == &seq_dmabuf->dbuf.list)
18002 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
18011 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
18012 * @vport: pointer to a vitural port
18013 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18015 * This function tries to abort from the partially assembed sequence, described
18016 * by the information from basic abbort @dmabuf. It checks to see whether such
18017 * partially assembled sequence held by the driver. If so, it shall free up all
18018 * the frames from the partially assembled sequence.
18021 * true -- if there is matching partially assembled sequence present and all
18022 * the frames freed with the sequence;
18023 * false -- if there is no matching partially assembled sequence present so
18024 * nothing got aborted in the lower layer driver
18027 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
18028 struct hbq_dmabuf *dmabuf)
18030 struct fc_frame_header *new_hdr;
18031 struct fc_frame_header *temp_hdr;
18032 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
18033 struct hbq_dmabuf *seq_dmabuf = NULL;
18035 /* Use the hdr_buf to find the sequence that matches this frame */
18036 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18037 INIT_LIST_HEAD(&dmabuf->hbuf.list);
18038 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18039 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18040 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18041 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18042 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18043 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18045 /* found a pending sequence that matches this frame */
18046 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18050 /* Free up all the frames from the partially assembled sequence */
18052 list_for_each_entry_safe(d_buf, n_buf,
18053 &seq_dmabuf->dbuf.list, list) {
18054 list_del_init(&d_buf->list);
18055 lpfc_in_buf_free(vport->phba, d_buf);
18063 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
18064 * @vport: pointer to a vitural port
18065 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18067 * This function tries to abort from the assembed sequence from upper level
18068 * protocol, described by the information from basic abbort @dmabuf. It
18069 * checks to see whether such pending context exists at upper level protocol.
18070 * If so, it shall clean up the pending context.
18073 * true -- if there is matching pending context of the sequence cleaned
18075 * false -- if there is no matching pending context of the sequence present
18079 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18081 struct lpfc_hba *phba = vport->phba;
18084 /* Accepting abort at ulp with SLI4 only */
18085 if (phba->sli_rev < LPFC_SLI_REV4)
18088 /* Register all caring upper level protocols to attend abort */
18089 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18097 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
18098 * @phba: Pointer to HBA context object.
18099 * @cmd_iocbq: pointer to the command iocbq structure.
18100 * @rsp_iocbq: pointer to the response iocbq structure.
18102 * This function handles the sequence abort response iocb command complete
18103 * event. It properly releases the memory allocated to the sequence abort
18107 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
18108 struct lpfc_iocbq *cmd_iocbq,
18109 struct lpfc_iocbq *rsp_iocbq)
18111 struct lpfc_nodelist *ndlp;
18114 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
18115 lpfc_nlp_put(ndlp);
18116 lpfc_sli_release_iocbq(phba, cmd_iocbq);
18119 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
18120 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
18121 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18122 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
18123 rsp_iocbq->iocb.ulpStatus,
18124 rsp_iocbq->iocb.un.ulpWord[4]);
18128 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
18129 * @phba: Pointer to HBA context object.
18130 * @xri: xri id in transaction.
18132 * This function validates the xri maps to the known range of XRIs allocated an
18133 * used by the driver.
18136 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18141 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
18142 if (xri == phba->sli4_hba.xri_ids[i])
18149 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
18150 * @vport: pointer to a virtual port.
18151 * @fc_hdr: pointer to a FC frame header.
18152 * @aborted: was the partially assembled receive sequence successfully aborted
18154 * This function sends a basic response to a previous unsol sequence abort
18155 * event after aborting the sequence handling.
18158 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
18159 struct fc_frame_header *fc_hdr, bool aborted)
18161 struct lpfc_hba *phba = vport->phba;
18162 struct lpfc_iocbq *ctiocb = NULL;
18163 struct lpfc_nodelist *ndlp;
18164 uint16_t oxid, rxid, xri, lxri;
18165 uint32_t sid, fctl;
18169 if (!lpfc_is_link_up(phba))
18172 sid = sli4_sid_from_fc_hdr(fc_hdr);
18173 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
18174 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
18176 ndlp = lpfc_findnode_did(vport, sid);
18178 ndlp = lpfc_nlp_init(vport, sid);
18180 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
18181 "1268 Failed to allocate ndlp for "
18182 "oxid:x%x SID:x%x\n", oxid, sid);
18185 /* Put ndlp onto pport node list */
18186 lpfc_enqueue_node(vport, ndlp);
18189 /* Allocate buffer for rsp iocb */
18190 ctiocb = lpfc_sli_get_iocbq(phba);
18194 /* Extract the F_CTL field from FC_HDR */
18195 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
18197 icmd = &ctiocb->iocb;
18198 icmd->un.xseq64.bdl.bdeSize = 0;
18199 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
18200 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
18201 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
18202 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
18204 /* Fill in the rest of iocb fields */
18205 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
18206 icmd->ulpBdeCount = 0;
18208 icmd->ulpClass = CLASS3;
18209 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
18210 ctiocb->context1 = lpfc_nlp_get(ndlp);
18211 if (!ctiocb->context1) {
18212 lpfc_sli_release_iocbq(phba, ctiocb);
18216 ctiocb->vport = phba->pport;
18217 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
18218 ctiocb->sli4_lxritag = NO_XRI;
18219 ctiocb->sli4_xritag = NO_XRI;
18221 if (fctl & FC_FC_EX_CTX)
18222 /* Exchange responder sent the abort so we
18228 lxri = lpfc_sli4_xri_inrange(phba, xri);
18229 if (lxri != NO_XRI)
18230 lpfc_set_rrq_active(phba, ndlp, lxri,
18231 (xri == oxid) ? rxid : oxid, 0);
18232 /* For BA_ABTS from exchange responder, if the logical xri with
18233 * the oxid maps to the FCP XRI range, the port no longer has
18234 * that exchange context, send a BLS_RJT. Override the IOCB for
18237 if ((fctl & FC_FC_EX_CTX) &&
18238 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
18239 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
18240 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
18241 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
18242 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
18245 /* If BA_ABTS failed to abort a partially assembled receive sequence,
18246 * the driver no longer has that exchange, send a BLS_RJT. Override
18247 * the IOCB for a BA_RJT.
18249 if (aborted == false) {
18250 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
18251 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
18252 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
18253 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
18256 if (fctl & FC_FC_EX_CTX) {
18257 /* ABTS sent by responder to CT exchange, construction
18258 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
18259 * field and RX_ID from ABTS for RX_ID field.
18261 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
18263 /* ABTS sent by initiator to CT exchange, construction
18264 * of BA_ACC will need to allocate a new XRI as for the
18267 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
18269 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
18270 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
18272 /* Xmit CT abts response on exchange <xid> */
18273 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
18274 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
18275 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
18277 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
18278 if (rc == IOCB_ERROR) {
18279 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18280 "2925 Failed to issue CT ABTS RSP x%x on "
18281 "xri x%x, Data x%x\n",
18282 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
18284 lpfc_nlp_put(ndlp);
18285 ctiocb->context1 = NULL;
18286 lpfc_sli_release_iocbq(phba, ctiocb);
18291 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
18292 * @vport: Pointer to the vport on which this sequence was received
18293 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18295 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
18296 * receive sequence is only partially assembed by the driver, it shall abort
18297 * the partially assembled frames for the sequence. Otherwise, if the
18298 * unsolicited receive sequence has been completely assembled and passed to
18299 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
18300 * unsolicited sequence has been aborted. After that, it will issue a basic
18301 * accept to accept the abort.
18304 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
18305 struct hbq_dmabuf *dmabuf)
18307 struct lpfc_hba *phba = vport->phba;
18308 struct fc_frame_header fc_hdr;
18312 /* Make a copy of fc_hdr before the dmabuf being released */
18313 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
18314 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
18316 if (fctl & FC_FC_EX_CTX) {
18317 /* ABTS by responder to exchange, no cleanup needed */
18320 /* ABTS by initiator to exchange, need to do cleanup */
18321 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
18322 if (aborted == false)
18323 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
18325 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18327 if (phba->nvmet_support) {
18328 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
18332 /* Respond with BA_ACC or BA_RJT accordingly */
18333 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
18337 * lpfc_seq_complete - Indicates if a sequence is complete
18338 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18340 * This function checks the sequence, starting with the frame described by
18341 * @dmabuf, to see if all the frames associated with this sequence are present.
18342 * the frames associated with this sequence are linked to the @dmabuf using the
18343 * dbuf list. This function looks for two major things. 1) That the first frame
18344 * has a sequence count of zero. 2) There is a frame with last frame of sequence
18345 * set. 3) That there are no holes in the sequence count. The function will
18346 * return 1 when the sequence is complete, otherwise it will return 0.
18349 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
18351 struct fc_frame_header *hdr;
18352 struct lpfc_dmabuf *d_buf;
18353 struct hbq_dmabuf *seq_dmabuf;
18357 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18358 /* make sure first fame of sequence has a sequence count of zero */
18359 if (hdr->fh_seq_cnt != seq_count)
18361 fctl = (hdr->fh_f_ctl[0] << 16 |
18362 hdr->fh_f_ctl[1] << 8 |
18364 /* If last frame of sequence we can return success. */
18365 if (fctl & FC_FC_END_SEQ)
18367 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
18368 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18369 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18370 /* If there is a hole in the sequence count then fail. */
18371 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
18373 fctl = (hdr->fh_f_ctl[0] << 16 |
18374 hdr->fh_f_ctl[1] << 8 |
18376 /* If last frame of sequence we can return success. */
18377 if (fctl & FC_FC_END_SEQ)
18384 * lpfc_prep_seq - Prep sequence for ULP processing
18385 * @vport: Pointer to the vport on which this sequence was received
18386 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
18388 * This function takes a sequence, described by a list of frames, and creates
18389 * a list of iocbq structures to describe the sequence. This iocbq list will be
18390 * used to issue to the generic unsolicited sequence handler. This routine
18391 * returns a pointer to the first iocbq in the list. If the function is unable
18392 * to allocate an iocbq then it throw out the received frames that were not
18393 * able to be described and return a pointer to the first iocbq. If unable to
18394 * allocate any iocbqs (including the first) this function will return NULL.
18396 static struct lpfc_iocbq *
18397 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
18399 struct hbq_dmabuf *hbq_buf;
18400 struct lpfc_dmabuf *d_buf, *n_buf;
18401 struct lpfc_iocbq *first_iocbq, *iocbq;
18402 struct fc_frame_header *fc_hdr;
18404 uint32_t len, tot_len;
18405 struct ulp_bde64 *pbde;
18407 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18408 /* remove from receive buffer list */
18409 list_del_init(&seq_dmabuf->hbuf.list);
18410 lpfc_update_rcv_time_stamp(vport);
18411 /* get the Remote Port's SID */
18412 sid = sli4_sid_from_fc_hdr(fc_hdr);
18414 /* Get an iocbq struct to fill in. */
18415 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
18417 /* Initialize the first IOCB. */
18418 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
18419 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
18420 first_iocbq->vport = vport;
18422 /* Check FC Header to see what TYPE of frame we are rcv'ing */
18423 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
18424 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
18425 first_iocbq->iocb.un.rcvels.parmRo =
18426 sli4_did_from_fc_hdr(fc_hdr);
18427 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
18429 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
18430 first_iocbq->iocb.ulpContext = NO_XRI;
18431 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
18432 be16_to_cpu(fc_hdr->fh_ox_id);
18433 /* iocbq is prepped for internal consumption. Physical vpi. */
18434 first_iocbq->iocb.unsli3.rcvsli3.vpi =
18435 vport->phba->vpi_ids[vport->vpi];
18436 /* put the first buffer into the first IOCBq */
18437 tot_len = bf_get(lpfc_rcqe_length,
18438 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
18440 first_iocbq->context2 = &seq_dmabuf->dbuf;
18441 first_iocbq->context3 = NULL;
18442 first_iocbq->iocb.ulpBdeCount = 1;
18443 if (tot_len > LPFC_DATA_BUF_SIZE)
18444 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
18445 LPFC_DATA_BUF_SIZE;
18447 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
18449 first_iocbq->iocb.un.rcvels.remoteID = sid;
18451 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18453 iocbq = first_iocbq;
18455 * Each IOCBq can have two Buffers assigned, so go through the list
18456 * of buffers for this sequence and save two buffers in each IOCBq
18458 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
18460 lpfc_in_buf_free(vport->phba, d_buf);
18463 if (!iocbq->context3) {
18464 iocbq->context3 = d_buf;
18465 iocbq->iocb.ulpBdeCount++;
18466 /* We need to get the size out of the right CQE */
18467 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18468 len = bf_get(lpfc_rcqe_length,
18469 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18470 pbde = (struct ulp_bde64 *)
18471 &iocbq->iocb.unsli3.sli3Words[4];
18472 if (len > LPFC_DATA_BUF_SIZE)
18473 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
18475 pbde->tus.f.bdeSize = len;
18477 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
18480 iocbq = lpfc_sli_get_iocbq(vport->phba);
18483 first_iocbq->iocb.ulpStatus =
18484 IOSTAT_FCP_RSP_ERROR;
18485 first_iocbq->iocb.un.ulpWord[4] =
18486 IOERR_NO_RESOURCES;
18488 lpfc_in_buf_free(vport->phba, d_buf);
18491 /* We need to get the size out of the right CQE */
18492 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18493 len = bf_get(lpfc_rcqe_length,
18494 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18495 iocbq->context2 = d_buf;
18496 iocbq->context3 = NULL;
18497 iocbq->iocb.ulpBdeCount = 1;
18498 if (len > LPFC_DATA_BUF_SIZE)
18499 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
18500 LPFC_DATA_BUF_SIZE;
18502 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
18505 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18507 iocbq->iocb.un.rcvels.remoteID = sid;
18508 list_add_tail(&iocbq->list, &first_iocbq->list);
18511 /* Free the sequence's header buffer */
18513 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
18515 return first_iocbq;
18519 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
18520 struct hbq_dmabuf *seq_dmabuf)
18522 struct fc_frame_header *fc_hdr;
18523 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
18524 struct lpfc_hba *phba = vport->phba;
18526 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18527 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
18529 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18530 "2707 Ring %d handler: Failed to allocate "
18531 "iocb Rctl x%x Type x%x received\n",
18533 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18536 if (!lpfc_complete_unsol_iocb(phba,
18537 phba->sli4_hba.els_wq->pring,
18538 iocbq, fc_hdr->fh_r_ctl,
18540 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18541 "2540 Ring %d handler: unexpected Rctl "
18542 "x%x Type x%x received\n",
18544 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18546 /* Free iocb created in lpfc_prep_seq */
18547 list_for_each_entry_safe(curr_iocb, next_iocb,
18548 &iocbq->list, list) {
18549 list_del_init(&curr_iocb->list);
18550 lpfc_sli_release_iocbq(phba, curr_iocb);
18552 lpfc_sli_release_iocbq(phba, iocbq);
18556 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
18557 struct lpfc_iocbq *rspiocb)
18559 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
18561 if (pcmd && pcmd->virt)
18562 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18564 lpfc_sli_release_iocbq(phba, cmdiocb);
18565 lpfc_drain_txq(phba);
18569 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
18570 struct hbq_dmabuf *dmabuf)
18572 struct fc_frame_header *fc_hdr;
18573 struct lpfc_hba *phba = vport->phba;
18574 struct lpfc_iocbq *iocbq = NULL;
18575 union lpfc_wqe *wqe;
18576 struct lpfc_dmabuf *pcmd = NULL;
18577 uint32_t frame_len;
18579 unsigned long iflags;
18581 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18582 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
18584 /* Send the received frame back */
18585 iocbq = lpfc_sli_get_iocbq(phba);
18587 /* Queue cq event and wakeup worker thread to process it */
18588 spin_lock_irqsave(&phba->hbalock, iflags);
18589 list_add_tail(&dmabuf->cq_event.list,
18590 &phba->sli4_hba.sp_queue_event);
18591 phba->hba_flag |= HBA_SP_QUEUE_EVT;
18592 spin_unlock_irqrestore(&phba->hbalock, iflags);
18593 lpfc_worker_wake_up(phba);
18597 /* Allocate buffer for command payload */
18598 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
18600 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
18602 if (!pcmd || !pcmd->virt)
18605 INIT_LIST_HEAD(&pcmd->list);
18607 /* copyin the payload */
18608 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
18610 /* fill in BDE's for command */
18611 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
18612 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
18613 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
18614 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
18616 iocbq->context2 = pcmd;
18617 iocbq->vport = vport;
18618 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
18619 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
18622 * Setup rest of the iocb as though it were a WQE
18623 * Build the SEND_FRAME WQE
18625 wqe = (union lpfc_wqe *)&iocbq->iocb;
18627 wqe->send_frame.frame_len = frame_len;
18628 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
18629 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
18630 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
18631 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
18632 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
18633 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
18635 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
18636 iocbq->iocb.ulpLe = 1;
18637 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
18638 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
18639 if (rc == IOCB_ERROR)
18642 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18646 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18647 "2023 Unable to process MDS loopback frame\n");
18648 if (pcmd && pcmd->virt)
18649 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18652 lpfc_sli_release_iocbq(phba, iocbq);
18653 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18657 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
18658 * @phba: Pointer to HBA context object.
18659 * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
18661 * This function is called with no lock held. This function processes all
18662 * the received buffers and gives it to upper layers when a received buffer
18663 * indicates that it is the final frame in the sequence. The interrupt
18664 * service routine processes received buffers at interrupt contexts.
18665 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
18666 * appropriate receive function when the final frame in a sequence is received.
18669 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
18670 struct hbq_dmabuf *dmabuf)
18672 struct hbq_dmabuf *seq_dmabuf;
18673 struct fc_frame_header *fc_hdr;
18674 struct lpfc_vport *vport;
18678 /* Process each received buffer */
18679 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18681 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18682 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18683 vport = phba->pport;
18684 /* Handle MDS Loopback frames */
18685 if (!(phba->pport->load_flag & FC_UNLOADING))
18686 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18688 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18692 /* check to see if this a valid type of frame */
18693 if (lpfc_fc_frame_check(phba, fc_hdr)) {
18694 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18698 if ((bf_get(lpfc_cqe_code,
18699 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
18700 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
18701 &dmabuf->cq_event.cqe.rcqe_cmpl);
18703 fcfi = bf_get(lpfc_rcqe_fcf_id,
18704 &dmabuf->cq_event.cqe.rcqe_cmpl);
18706 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
18707 vport = phba->pport;
18708 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18709 "2023 MDS Loopback %d bytes\n",
18710 bf_get(lpfc_rcqe_length,
18711 &dmabuf->cq_event.cqe.rcqe_cmpl));
18712 /* Handle MDS Loopback frames */
18713 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18717 /* d_id this frame is directed to */
18718 did = sli4_did_from_fc_hdr(fc_hdr);
18720 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
18722 /* throw out the frame */
18723 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18727 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
18728 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18729 (did != Fabric_DID)) {
18731 * Throw out the frame if we are not pt2pt.
18732 * The pt2pt protocol allows for discovery frames
18733 * to be received without a registered VPI.
18735 if (!(vport->fc_flag & FC_PT2PT) ||
18736 (phba->link_state == LPFC_HBA_READY)) {
18737 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18742 /* Handle the basic abort sequence (BA_ABTS) event */
18743 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18744 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18748 /* Link this frame */
18749 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18751 /* unable to add frame to vport - throw it out */
18752 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18755 /* If not last frame in sequence continue processing frames. */
18756 if (!lpfc_seq_complete(seq_dmabuf))
18759 /* Send the complete sequence to the upper layer protocol */
18760 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
18764 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18765 * @phba: pointer to lpfc hba data structure.
18767 * This routine is invoked to post rpi header templates to the
18768 * HBA consistent with the SLI-4 interface spec. This routine
18769 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18770 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18772 * This routine does not require any locks. It's usage is expected
18773 * to be driver load or reset recovery when the driver is
18778 * -EIO - The mailbox failed to complete successfully.
18779 * When this error occurs, the driver is not guaranteed
18780 * to have any rpi regions posted to the device and
18781 * must either attempt to repost the regions or take a
18785 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18787 struct lpfc_rpi_hdr *rpi_page;
18791 /* SLI4 ports that support extents do not require RPI headers. */
18792 if (!phba->sli4_hba.rpi_hdrs_in_use)
18794 if (phba->sli4_hba.extents_in_use)
18797 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18799 * Assign the rpi headers a physical rpi only if the driver
18800 * has not initialized those resources. A port reset only
18801 * needs the headers posted.
18803 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18805 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18807 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18808 if (rc != MBX_SUCCESS) {
18809 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18810 "2008 Error %d posting all rpi "
18818 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18819 LPFC_RPI_RSRC_RDY);
18824 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18825 * @phba: pointer to lpfc hba data structure.
18826 * @rpi_page: pointer to the rpi memory region.
18828 * This routine is invoked to post a single rpi header to the
18829 * HBA consistent with the SLI-4 interface spec. This memory region
18830 * maps up to 64 rpi context regions.
18834 * -ENOMEM - No available memory
18835 * -EIO - The mailbox failed to complete successfully.
18838 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18840 LPFC_MBOXQ_t *mboxq;
18841 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18843 uint32_t shdr_status, shdr_add_status;
18844 union lpfc_sli4_cfg_shdr *shdr;
18846 /* SLI4 ports that support extents do not require RPI headers. */
18847 if (!phba->sli4_hba.rpi_hdrs_in_use)
18849 if (phba->sli4_hba.extents_in_use)
18852 /* The port is notified of the header region via a mailbox command. */
18853 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18855 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18856 "2001 Unable to allocate memory for issuing "
18857 "SLI_CONFIG_SPECIAL mailbox command\n");
18861 /* Post all rpi memory regions to the port. */
18862 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18863 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18864 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18865 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18866 sizeof(struct lpfc_sli4_cfg_mhdr),
18867 LPFC_SLI4_MBX_EMBED);
18870 /* Post the physical rpi to the port for this rpi header. */
18871 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18872 rpi_page->start_rpi);
18873 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18874 hdr_tmpl, rpi_page->page_count);
18876 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18877 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18878 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18879 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18880 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18881 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18882 mempool_free(mboxq, phba->mbox_mem_pool);
18883 if (shdr_status || shdr_add_status || rc) {
18884 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18885 "2514 POST_RPI_HDR mailbox failed with "
18886 "status x%x add_status x%x, mbx status x%x\n",
18887 shdr_status, shdr_add_status, rc);
18891 * The next_rpi stores the next logical module-64 rpi value used
18892 * to post physical rpis in subsequent rpi postings.
18894 spin_lock_irq(&phba->hbalock);
18895 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18896 spin_unlock_irq(&phba->hbalock);
18902 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18903 * @phba: pointer to lpfc hba data structure.
18905 * This routine is invoked to post rpi header templates to the
18906 * HBA consistent with the SLI-4 interface spec. This routine
18907 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18908 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18911 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
18912 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18915 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18918 uint16_t max_rpi, rpi_limit;
18919 uint16_t rpi_remaining, lrpi = 0;
18920 struct lpfc_rpi_hdr *rpi_hdr;
18921 unsigned long iflag;
18924 * Fetch the next logical rpi. Because this index is logical,
18925 * the driver starts at 0 each time.
18927 spin_lock_irqsave(&phba->hbalock, iflag);
18928 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18929 rpi_limit = phba->sli4_hba.next_rpi;
18931 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18932 if (rpi >= rpi_limit)
18933 rpi = LPFC_RPI_ALLOC_ERROR;
18935 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18936 phba->sli4_hba.max_cfg_param.rpi_used++;
18937 phba->sli4_hba.rpi_count++;
18939 lpfc_printf_log(phba, KERN_INFO,
18940 LOG_NODE | LOG_DISCOVERY,
18941 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
18942 (int) rpi, max_rpi, rpi_limit);
18945 * Don't try to allocate more rpi header regions if the device limit
18946 * has been exhausted.
18948 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18949 (phba->sli4_hba.rpi_count >= max_rpi)) {
18950 spin_unlock_irqrestore(&phba->hbalock, iflag);
18955 * RPI header postings are not required for SLI4 ports capable of
18958 if (!phba->sli4_hba.rpi_hdrs_in_use) {
18959 spin_unlock_irqrestore(&phba->hbalock, iflag);
18964 * If the driver is running low on rpi resources, allocate another
18965 * page now. Note that the next_rpi value is used because
18966 * it represents how many are actually in use whereas max_rpi notes
18967 * how many are supported max by the device.
18969 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18970 spin_unlock_irqrestore(&phba->hbalock, iflag);
18971 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18972 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18974 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18975 "2002 Error Could not grow rpi "
18978 lrpi = rpi_hdr->start_rpi;
18979 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18980 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18988 * __lpfc_sli4_free_rpi - Release an rpi for reuse.
18989 * @phba: pointer to lpfc hba data structure.
18990 * @rpi: rpi to free
18992 * This routine is invoked to release an rpi to the pool of
18993 * available rpis maintained by the driver.
18996 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18999 * if the rpi value indicates a prior unreg has already
19000 * been done, skip the unreg.
19002 if (rpi == LPFC_RPI_ALLOC_ERROR)
19005 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
19006 phba->sli4_hba.rpi_count--;
19007 phba->sli4_hba.max_cfg_param.rpi_used--;
19009 lpfc_printf_log(phba, KERN_INFO,
19010 LOG_NODE | LOG_DISCOVERY,
19011 "2016 rpi %x not inuse\n",
19017 * lpfc_sli4_free_rpi - Release an rpi for reuse.
19018 * @phba: pointer to lpfc hba data structure.
19019 * @rpi: rpi to free
19021 * This routine is invoked to release an rpi to the pool of
19022 * available rpis maintained by the driver.
19025 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19027 spin_lock_irq(&phba->hbalock);
19028 __lpfc_sli4_free_rpi(phba, rpi);
19029 spin_unlock_irq(&phba->hbalock);
19033 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
19034 * @phba: pointer to lpfc hba data structure.
19036 * This routine is invoked to remove the memory region that
19037 * provided rpi via a bitmask.
19040 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
19042 kfree(phba->sli4_hba.rpi_bmask);
19043 kfree(phba->sli4_hba.rpi_ids);
19044 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
19048 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
19049 * @ndlp: pointer to lpfc nodelist data structure.
19050 * @cmpl: completion call-back.
19051 * @arg: data to load as MBox 'caller buffer information'
19053 * This routine is invoked to remove the memory region that
19054 * provided rpi via a bitmask.
19057 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19058 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
19060 LPFC_MBOXQ_t *mboxq;
19061 struct lpfc_hba *phba = ndlp->phba;
19064 /* The port is notified of the header region via a mailbox command. */
19065 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19069 /* If cmpl assigned, then this nlp_get pairs with
19070 * lpfc_mbx_cmpl_resume_rpi.
19072 * Else cmpl is NULL, then this nlp_get pairs with
19073 * lpfc_sli_def_mbox_cmpl.
19075 if (!lpfc_nlp_get(ndlp)) {
19076 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19077 "2122 %s: Failed to get nlp ref\n",
19079 mempool_free(mboxq, phba->mbox_mem_pool);
19083 /* Post all rpi memory regions to the port. */
19084 lpfc_resume_rpi(mboxq, ndlp);
19086 mboxq->mbox_cmpl = cmpl;
19087 mboxq->ctx_buf = arg;
19089 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19090 mboxq->ctx_ndlp = ndlp;
19091 mboxq->vport = ndlp->vport;
19092 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19093 if (rc == MBX_NOT_FINISHED) {
19094 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19095 "2010 Resume RPI Mailbox failed "
19096 "status %d, mbxStatus x%x\n", rc,
19097 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19098 lpfc_nlp_put(ndlp);
19099 mempool_free(mboxq, phba->mbox_mem_pool);
19106 * lpfc_sli4_init_vpi - Initialize a vpi with the port
19107 * @vport: Pointer to the vport for which the vpi is being initialized
19109 * This routine is invoked to activate a vpi with the port.
19113 * -Evalue otherwise
19116 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
19118 LPFC_MBOXQ_t *mboxq;
19120 int retval = MBX_SUCCESS;
19122 struct lpfc_hba *phba = vport->phba;
19123 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19126 lpfc_init_vpi(phba, mboxq, vport->vpi);
19127 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
19128 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
19129 if (rc != MBX_SUCCESS) {
19130 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19131 "2022 INIT VPI Mailbox failed "
19132 "status %d, mbxStatus x%x\n", rc,
19133 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19136 if (rc != MBX_TIMEOUT)
19137 mempool_free(mboxq, vport->phba->mbox_mem_pool);
19143 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
19144 * @phba: pointer to lpfc hba data structure.
19145 * @mboxq: Pointer to mailbox object.
19147 * This routine is invoked to manually add a single FCF record. The caller
19148 * must pass a completely initialized FCF_Record. This routine takes
19149 * care of the nonembedded mailbox operations.
19152 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
19155 union lpfc_sli4_cfg_shdr *shdr;
19156 uint32_t shdr_status, shdr_add_status;
19158 virt_addr = mboxq->sge_array->addr[0];
19159 /* The IOCTL status is embedded in the mailbox subheader. */
19160 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
19161 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19162 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19164 if ((shdr_status || shdr_add_status) &&
19165 (shdr_status != STATUS_FCF_IN_USE))
19166 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19167 "2558 ADD_FCF_RECORD mailbox failed with "
19168 "status x%x add_status x%x\n",
19169 shdr_status, shdr_add_status);
19171 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19175 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
19176 * @phba: pointer to lpfc hba data structure.
19177 * @fcf_record: pointer to the initialized fcf record to add.
19179 * This routine is invoked to manually add a single FCF record. The caller
19180 * must pass a completely initialized FCF_Record. This routine takes
19181 * care of the nonembedded mailbox operations.
19184 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
19187 LPFC_MBOXQ_t *mboxq;
19190 struct lpfc_mbx_sge sge;
19191 uint32_t alloc_len, req_len;
19194 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19196 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19197 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
19201 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
19204 /* Allocate DMA memory and set up the non-embedded mailbox command */
19205 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19206 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
19207 req_len, LPFC_SLI4_MBX_NEMBED);
19208 if (alloc_len < req_len) {
19209 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19210 "2523 Allocated DMA memory size (x%x) is "
19211 "less than the requested DMA memory "
19212 "size (x%x)\n", alloc_len, req_len);
19213 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19218 * Get the first SGE entry from the non-embedded DMA memory. This
19219 * routine only uses a single SGE.
19221 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
19222 virt_addr = mboxq->sge_array->addr[0];
19224 * Configure the FCF record for FCFI 0. This is the driver's
19225 * hardcoded default and gets used in nonFIP mode.
19227 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
19228 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
19229 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
19232 * Copy the fcf_index and the FCF Record Data. The data starts after
19233 * the FCoE header plus word10. The data copy needs to be endian
19236 bytep += sizeof(uint32_t);
19237 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
19238 mboxq->vport = phba->pport;
19239 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
19240 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19241 if (rc == MBX_NOT_FINISHED) {
19242 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19243 "2515 ADD_FCF_RECORD mailbox failed with "
19244 "status 0x%x\n", rc);
19245 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19254 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
19255 * @phba: pointer to lpfc hba data structure.
19256 * @fcf_record: pointer to the fcf record to write the default data.
19257 * @fcf_index: FCF table entry index.
19259 * This routine is invoked to build the driver's default FCF record. The
19260 * values used are hardcoded. This routine handles memory initialization.
19264 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
19265 struct fcf_record *fcf_record,
19266 uint16_t fcf_index)
19268 memset(fcf_record, 0, sizeof(struct fcf_record));
19269 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
19270 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
19271 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
19272 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
19273 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
19274 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
19275 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
19276 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
19277 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
19278 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
19279 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
19280 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
19281 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
19282 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
19283 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
19284 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
19285 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
19286 /* Set the VLAN bit map */
19287 if (phba->valid_vlan) {
19288 fcf_record->vlan_bitmap[phba->vlan_id / 8]
19289 = 1 << (phba->vlan_id % 8);
19294 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
19295 * @phba: pointer to lpfc hba data structure.
19296 * @fcf_index: FCF table entry offset.
19298 * This routine is invoked to scan the entire FCF table by reading FCF
19299 * record and processing it one at a time starting from the @fcf_index
19300 * for initial FCF discovery or fast FCF failover rediscovery.
19302 * Return 0 if the mailbox command is submitted successfully, none 0
19306 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19309 LPFC_MBOXQ_t *mboxq;
19311 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
19312 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
19313 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19315 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19316 "2000 Failed to allocate mbox for "
19319 goto fail_fcf_scan;
19321 /* Construct the read FCF record mailbox command */
19322 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19325 goto fail_fcf_scan;
19327 /* Issue the mailbox command asynchronously */
19328 mboxq->vport = phba->pport;
19329 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
19331 spin_lock_irq(&phba->hbalock);
19332 phba->hba_flag |= FCF_TS_INPROG;
19333 spin_unlock_irq(&phba->hbalock);
19335 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19336 if (rc == MBX_NOT_FINISHED)
19339 /* Reset eligible FCF count for new scan */
19340 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
19341 phba->fcf.eligible_fcf_cnt = 0;
19347 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19348 /* FCF scan failed, clear FCF_TS_INPROG flag */
19349 spin_lock_irq(&phba->hbalock);
19350 phba->hba_flag &= ~FCF_TS_INPROG;
19351 spin_unlock_irq(&phba->hbalock);
19357 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
19358 * @phba: pointer to lpfc hba data structure.
19359 * @fcf_index: FCF table entry offset.
19361 * This routine is invoked to read an FCF record indicated by @fcf_index
19362 * and to use it for FLOGI roundrobin FCF failover.
19364 * Return 0 if the mailbox command is submitted successfully, none 0
19368 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19371 LPFC_MBOXQ_t *mboxq;
19373 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19375 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19376 "2763 Failed to allocate mbox for "
19379 goto fail_fcf_read;
19381 /* Construct the read FCF record mailbox command */
19382 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19385 goto fail_fcf_read;
19387 /* Issue the mailbox command asynchronously */
19388 mboxq->vport = phba->pport;
19389 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
19390 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19391 if (rc == MBX_NOT_FINISHED)
19397 if (error && mboxq)
19398 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19403 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
19404 * @phba: pointer to lpfc hba data structure.
19405 * @fcf_index: FCF table entry offset.
19407 * This routine is invoked to read an FCF record indicated by @fcf_index to
19408 * determine whether it's eligible for FLOGI roundrobin failover list.
19410 * Return 0 if the mailbox command is submitted successfully, none 0
19414 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19417 LPFC_MBOXQ_t *mboxq;
19419 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19421 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19422 "2758 Failed to allocate mbox for "
19425 goto fail_fcf_read;
19427 /* Construct the read FCF record mailbox command */
19428 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19431 goto fail_fcf_read;
19433 /* Issue the mailbox command asynchronously */
19434 mboxq->vport = phba->pport;
19435 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
19436 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19437 if (rc == MBX_NOT_FINISHED)
19443 if (error && mboxq)
19444 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19449 * lpfc_check_next_fcf_pri_level
19450 * @phba: pointer to the lpfc_hba struct for this port.
19451 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
19452 * routine when the rr_bmask is empty. The FCF indecies are put into the
19453 * rr_bmask based on their priority level. Starting from the highest priority
19454 * to the lowest. The most likely FCF candidate will be in the highest
19455 * priority group. When this routine is called it searches the fcf_pri list for
19456 * next lowest priority group and repopulates the rr_bmask with only those
19459 * 1=success 0=failure
19462 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
19464 uint16_t next_fcf_pri;
19465 uint16_t last_index;
19466 struct lpfc_fcf_pri *fcf_pri;
19470 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
19471 LPFC_SLI4_FCF_TBL_INDX_MAX);
19472 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19473 "3060 Last IDX %d\n", last_index);
19475 /* Verify the priority list has 2 or more entries */
19476 spin_lock_irq(&phba->hbalock);
19477 if (list_empty(&phba->fcf.fcf_pri_list) ||
19478 list_is_singular(&phba->fcf.fcf_pri_list)) {
19479 spin_unlock_irq(&phba->hbalock);
19480 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19481 "3061 Last IDX %d\n", last_index);
19482 return 0; /* Empty rr list */
19484 spin_unlock_irq(&phba->hbalock);
19488 * Clear the rr_bmask and set all of the bits that are at this
19491 memset(phba->fcf.fcf_rr_bmask, 0,
19492 sizeof(*phba->fcf.fcf_rr_bmask));
19493 spin_lock_irq(&phba->hbalock);
19494 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19495 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
19498 * the 1st priority that has not FLOGI failed
19499 * will be the highest.
19502 next_fcf_pri = fcf_pri->fcf_rec.priority;
19503 spin_unlock_irq(&phba->hbalock);
19504 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19505 rc = lpfc_sli4_fcf_rr_index_set(phba,
19506 fcf_pri->fcf_rec.fcf_index);
19510 spin_lock_irq(&phba->hbalock);
19513 * if next_fcf_pri was not set above and the list is not empty then
19514 * we have failed flogis on all of them. So reset flogi failed
19515 * and start at the beginning.
19517 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
19518 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19519 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
19521 * the 1st priority that has not FLOGI failed
19522 * will be the highest.
19525 next_fcf_pri = fcf_pri->fcf_rec.priority;
19526 spin_unlock_irq(&phba->hbalock);
19527 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19528 rc = lpfc_sli4_fcf_rr_index_set(phba,
19529 fcf_pri->fcf_rec.fcf_index);
19533 spin_lock_irq(&phba->hbalock);
19537 spin_unlock_irq(&phba->hbalock);
19542 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
19543 * @phba: pointer to lpfc hba data structure.
19545 * This routine is to get the next eligible FCF record index in a round
19546 * robin fashion. If the next eligible FCF record index equals to the
19547 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
19548 * shall be returned, otherwise, the next eligible FCF record's index
19549 * shall be returned.
19552 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
19554 uint16_t next_fcf_index;
19557 /* Search start from next bit of currently registered FCF index */
19558 next_fcf_index = phba->fcf.current_rec.fcf_indx;
19561 /* Determine the next fcf index to check */
19562 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
19563 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19564 LPFC_SLI4_FCF_TBL_INDX_MAX,
19567 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
19568 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19570 * If we have wrapped then we need to clear the bits that
19571 * have been tested so that we can detect when we should
19572 * change the priority level.
19574 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19575 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
19579 /* Check roundrobin failover list empty condition */
19580 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
19581 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
19583 * If next fcf index is not found check if there are lower
19584 * Priority level fcf's in the fcf_priority list.
19585 * Set up the rr_bmask with all of the avaiable fcf bits
19586 * at that level and continue the selection process.
19588 if (lpfc_check_next_fcf_pri_level(phba))
19589 goto initial_priority;
19590 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
19591 "2844 No roundrobin failover FCF available\n");
19593 return LPFC_FCOE_FCF_NEXT_NONE;
19596 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
19597 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
19598 LPFC_FCF_FLOGI_FAILED) {
19599 if (list_is_singular(&phba->fcf.fcf_pri_list))
19600 return LPFC_FCOE_FCF_NEXT_NONE;
19602 goto next_priority;
19605 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19606 "2845 Get next roundrobin failover FCF (x%x)\n",
19609 return next_fcf_index;
19613 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
19614 * @phba: pointer to lpfc hba data structure.
19615 * @fcf_index: index into the FCF table to 'set'
19617 * This routine sets the FCF record index in to the eligible bmask for
19618 * roundrobin failover search. It checks to make sure that the index
19619 * does not go beyond the range of the driver allocated bmask dimension
19620 * before setting the bit.
19622 * Returns 0 if the index bit successfully set, otherwise, it returns
19626 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
19628 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19629 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19630 "2610 FCF (x%x) reached driver's book "
19631 "keeping dimension:x%x\n",
19632 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19635 /* Set the eligible FCF record index bmask */
19636 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19638 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19639 "2790 Set FCF (x%x) to roundrobin FCF failover "
19640 "bmask\n", fcf_index);
19646 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
19647 * @phba: pointer to lpfc hba data structure.
19648 * @fcf_index: index into the FCF table to 'clear'
19650 * This routine clears the FCF record index from the eligible bmask for
19651 * roundrobin failover search. It checks to make sure that the index
19652 * does not go beyond the range of the driver allocated bmask dimension
19653 * before clearing the bit.
19656 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
19658 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
19659 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19660 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19661 "2762 FCF (x%x) reached driver's book "
19662 "keeping dimension:x%x\n",
19663 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19666 /* Clear the eligible FCF record index bmask */
19667 spin_lock_irq(&phba->hbalock);
19668 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
19670 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
19671 list_del_init(&fcf_pri->list);
19675 spin_unlock_irq(&phba->hbalock);
19676 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19678 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19679 "2791 Clear FCF (x%x) from roundrobin failover "
19680 "bmask\n", fcf_index);
19684 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
19685 * @phba: pointer to lpfc hba data structure.
19686 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
19688 * This routine is the completion routine for the rediscover FCF table mailbox
19689 * command. If the mailbox command returned failure, it will try to stop the
19690 * FCF rediscover wait timer.
19693 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
19695 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19696 uint32_t shdr_status, shdr_add_status;
19698 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19700 shdr_status = bf_get(lpfc_mbox_hdr_status,
19701 &redisc_fcf->header.cfg_shdr.response);
19702 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19703 &redisc_fcf->header.cfg_shdr.response);
19704 if (shdr_status || shdr_add_status) {
19705 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19706 "2746 Requesting for FCF rediscovery failed "
19707 "status x%x add_status x%x\n",
19708 shdr_status, shdr_add_status);
19709 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
19710 spin_lock_irq(&phba->hbalock);
19711 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
19712 spin_unlock_irq(&phba->hbalock);
19714 * CVL event triggered FCF rediscover request failed,
19715 * last resort to re-try current registered FCF entry.
19717 lpfc_retry_pport_discovery(phba);
19719 spin_lock_irq(&phba->hbalock);
19720 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
19721 spin_unlock_irq(&phba->hbalock);
19723 * DEAD FCF event triggered FCF rediscover request
19724 * failed, last resort to fail over as a link down
19725 * to FCF registration.
19727 lpfc_sli4_fcf_dead_failthrough(phba);
19730 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19731 "2775 Start FCF rediscover quiescent timer\n");
19733 * Start FCF rediscovery wait timer for pending FCF
19734 * before rescan FCF record table.
19736 lpfc_fcf_redisc_wait_start_timer(phba);
19739 mempool_free(mbox, phba->mbox_mem_pool);
19743 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
19744 * @phba: pointer to lpfc hba data structure.
19746 * This routine is invoked to request for rediscovery of the entire FCF table
19750 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
19752 LPFC_MBOXQ_t *mbox;
19753 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19756 /* Cancel retry delay timers to all vports before FCF rediscover */
19757 lpfc_cancel_all_vport_retry_delay_timer(phba);
19759 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19761 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19762 "2745 Failed to allocate mbox for "
19763 "requesting FCF rediscover.\n");
19767 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19768 sizeof(struct lpfc_sli4_cfg_mhdr));
19769 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19770 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19771 length, LPFC_SLI4_MBX_EMBED);
19773 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19774 /* Set count to 0 for invalidating the entire FCF database */
19775 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19777 /* Issue the mailbox command asynchronously */
19778 mbox->vport = phba->pport;
19779 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19780 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19782 if (rc == MBX_NOT_FINISHED) {
19783 mempool_free(mbox, phba->mbox_mem_pool);
19790 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
19791 * @phba: pointer to lpfc hba data structure.
19793 * This function is the failover routine as a last resort to the FCF DEAD
19794 * event when driver failed to perform fast FCF failover.
19797 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19799 uint32_t link_state;
19802 * Last resort as FCF DEAD event failover will treat this as
19803 * a link down, but save the link state because we don't want
19804 * it to be changed to Link Down unless it is already down.
19806 link_state = phba->link_state;
19807 lpfc_linkdown(phba);
19808 phba->link_state = link_state;
19810 /* Unregister FCF if no devices connected to it */
19811 lpfc_unregister_unused_fcf(phba);
19815 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
19816 * @phba: pointer to lpfc hba data structure.
19817 * @rgn23_data: pointer to configure region 23 data.
19819 * This function gets SLI3 port configure region 23 data through memory dump
19820 * mailbox command. When it successfully retrieves data, the size of the data
19821 * will be returned, otherwise, 0 will be returned.
19824 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19826 LPFC_MBOXQ_t *pmb = NULL;
19828 uint32_t offset = 0;
19834 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19836 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19837 "2600 failed to allocate mailbox memory\n");
19843 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19844 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19846 if (rc != MBX_SUCCESS) {
19847 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19848 "2601 failed to read config "
19849 "region 23, rc 0x%x Status 0x%x\n",
19850 rc, mb->mbxStatus);
19851 mb->un.varDmp.word_cnt = 0;
19854 * dump mem may return a zero when finished or we got a
19855 * mailbox error, either way we are done.
19857 if (mb->un.varDmp.word_cnt == 0)
19860 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19861 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19863 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19864 rgn23_data + offset,
19865 mb->un.varDmp.word_cnt);
19866 offset += mb->un.varDmp.word_cnt;
19867 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19869 mempool_free(pmb, phba->mbox_mem_pool);
19874 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19875 * @phba: pointer to lpfc hba data structure.
19876 * @rgn23_data: pointer to configure region 23 data.
19878 * This function gets SLI4 port configure region 23 data through memory dump
19879 * mailbox command. When it successfully retrieves data, the size of the data
19880 * will be returned, otherwise, 0 will be returned.
19883 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19885 LPFC_MBOXQ_t *mboxq = NULL;
19886 struct lpfc_dmabuf *mp = NULL;
19887 struct lpfc_mqe *mqe;
19888 uint32_t data_length = 0;
19894 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19896 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19897 "3105 failed to allocate mailbox memory\n");
19901 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19903 mqe = &mboxq->u.mqe;
19904 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19905 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19908 data_length = mqe->un.mb_words[5];
19909 if (data_length == 0)
19911 if (data_length > DMP_RGN23_SIZE) {
19915 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19917 mempool_free(mboxq, phba->mbox_mem_pool);
19919 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19922 return data_length;
19926 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19927 * @phba: pointer to lpfc hba data structure.
19929 * This function read region 23 and parse TLV for port status to
19930 * decide if the user disaled the port. If the TLV indicates the
19931 * port is disabled, the hba_flag is set accordingly.
19934 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19936 uint8_t *rgn23_data = NULL;
19937 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19938 uint32_t offset = 0;
19940 /* Get adapter Region 23 data */
19941 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19945 if (phba->sli_rev < LPFC_SLI_REV4)
19946 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19948 if_type = bf_get(lpfc_sli_intf_if_type,
19949 &phba->sli4_hba.sli_intf);
19950 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19952 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19958 /* Check the region signature first */
19959 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19960 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19961 "2619 Config region 23 has bad signature\n");
19966 /* Check the data structure version */
19967 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19968 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19969 "2620 Config region 23 has bad version\n");
19974 /* Parse TLV entries in the region */
19975 while (offset < data_size) {
19976 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19979 * If the TLV is not driver specific TLV or driver id is
19980 * not linux driver id, skip the record.
19982 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19983 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19984 (rgn23_data[offset + 3] != 0)) {
19985 offset += rgn23_data[offset + 1] * 4 + 4;
19989 /* Driver found a driver specific TLV in the config region */
19990 sub_tlv_len = rgn23_data[offset + 1] * 4;
19995 * Search for configured port state sub-TLV.
19997 while ((offset < data_size) &&
19998 (tlv_offset < sub_tlv_len)) {
19999 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
20004 if (rgn23_data[offset] != PORT_STE_TYPE) {
20005 offset += rgn23_data[offset + 1] * 4 + 4;
20006 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
20010 /* This HBA contains PORT_STE configured */
20011 if (!rgn23_data[offset + 2])
20012 phba->hba_flag |= LINK_DISABLED;
20024 * lpfc_wr_object - write an object to the firmware
20025 * @phba: HBA structure that indicates port to create a queue on.
20026 * @dmabuf_list: list of dmabufs to write to the port.
20027 * @size: the total byte value of the objects to write to the port.
20028 * @offset: the current offset to be used to start the transfer.
20030 * This routine will create a wr_object mailbox command to send to the port.
20031 * the mailbox command will be constructed using the dma buffers described in
20032 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
20033 * BDEs that the imbedded mailbox can support. The @offset variable will be
20034 * used to indicate the starting offset of the transfer and will also return
20035 * the offset after the write object mailbox has completed. @size is used to
20036 * determine the end of the object and whether the eof bit should be set.
20038 * Return 0 is successful and offset will contain the the new offset to use
20039 * for the next write.
20040 * Return negative value for error cases.
20043 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
20044 uint32_t size, uint32_t *offset)
20046 struct lpfc_mbx_wr_object *wr_object;
20047 LPFC_MBOXQ_t *mbox;
20049 uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
20051 struct lpfc_dmabuf *dmabuf;
20052 uint32_t written = 0;
20053 bool check_change_status = false;
20055 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20059 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
20060 LPFC_MBOX_OPCODE_WRITE_OBJECT,
20061 sizeof(struct lpfc_mbx_wr_object) -
20062 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
20064 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
20065 wr_object->u.request.write_offset = *offset;
20066 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
20067 wr_object->u.request.object_name[0] =
20068 cpu_to_le32(wr_object->u.request.object_name[0]);
20069 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
20070 list_for_each_entry(dmabuf, dmabuf_list, list) {
20071 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
20073 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
20074 wr_object->u.request.bde[i].addrHigh =
20075 putPaddrHigh(dmabuf->phys);
20076 if (written + SLI4_PAGE_SIZE >= size) {
20077 wr_object->u.request.bde[i].tus.f.bdeSize =
20079 written += (size - written);
20080 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
20081 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
20082 check_change_status = true;
20084 wr_object->u.request.bde[i].tus.f.bdeSize =
20086 written += SLI4_PAGE_SIZE;
20090 wr_object->u.request.bde_count = i;
20091 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
20092 if (!phba->sli4_hba.intr_enable)
20093 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
20095 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
20096 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
20098 /* The IOCTL status is embedded in the mailbox subheader. */
20099 shdr_status = bf_get(lpfc_mbox_hdr_status,
20100 &wr_object->header.cfg_shdr.response);
20101 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20102 &wr_object->header.cfg_shdr.response);
20103 if (check_change_status) {
20104 shdr_change_status = bf_get(lpfc_wr_object_change_status,
20105 &wr_object->u.response);
20107 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20108 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20109 shdr_csf = bf_get(lpfc_wr_object_csf,
20110 &wr_object->u.response);
20112 shdr_change_status =
20113 LPFC_CHANGE_STATUS_PCI_RESET;
20116 switch (shdr_change_status) {
20117 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20118 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20119 "3198 Firmware write complete: System "
20120 "reboot required to instantiate\n");
20122 case (LPFC_CHANGE_STATUS_FW_RESET):
20123 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20124 "3199 Firmware write complete: Firmware"
20125 " reset required to instantiate\n");
20127 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20128 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20129 "3200 Firmware write complete: Port "
20130 "Migration or PCI Reset required to "
20133 case (LPFC_CHANGE_STATUS_PCI_RESET):
20134 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20135 "3201 Firmware write complete: PCI "
20136 "Reset required to instantiate\n");
20142 if (!phba->sli4_hba.intr_enable)
20143 mempool_free(mbox, phba->mbox_mem_pool);
20144 else if (rc != MBX_TIMEOUT)
20145 mempool_free(mbox, phba->mbox_mem_pool);
20146 if (shdr_status || shdr_add_status || rc) {
20147 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20148 "3025 Write Object mailbox failed with "
20149 "status x%x add_status x%x, mbx status x%x\n",
20150 shdr_status, shdr_add_status, rc);
20152 *offset = shdr_add_status;
20154 *offset += wr_object->u.response.actual_write_length;
20159 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
20160 * @vport: pointer to vport data structure.
20162 * This function iterate through the mailboxq and clean up all REG_LOGIN
20163 * and REG_VPI mailbox commands associated with the vport. This function
20164 * is called when driver want to restart discovery of the vport due to
20165 * a Clear Virtual Link event.
20168 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
20170 struct lpfc_hba *phba = vport->phba;
20171 LPFC_MBOXQ_t *mb, *nextmb;
20172 struct lpfc_dmabuf *mp;
20173 struct lpfc_nodelist *ndlp;
20174 struct lpfc_nodelist *act_mbx_ndlp = NULL;
20175 LIST_HEAD(mbox_cmd_list);
20176 uint8_t restart_loop;
20178 /* Clean up internally queued mailbox commands with the vport */
20179 spin_lock_irq(&phba->hbalock);
20180 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
20181 if (mb->vport != vport)
20184 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20185 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20188 list_move_tail(&mb->list, &mbox_cmd_list);
20190 /* Clean up active mailbox command with the vport */
20191 mb = phba->sli.mbox_active;
20192 if (mb && (mb->vport == vport)) {
20193 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
20194 (mb->u.mb.mbxCommand == MBX_REG_VPI))
20195 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20196 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20197 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20198 /* Put reference count for delayed processing */
20199 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
20200 /* Unregister the RPI when mailbox complete */
20201 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20204 /* Cleanup any mailbox completions which are not yet processed */
20207 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
20209 * If this mailox is already processed or it is
20210 * for another vport ignore it.
20212 if ((mb->vport != vport) ||
20213 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
20216 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20217 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20220 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20221 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20222 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20223 /* Unregister the RPI when mailbox complete */
20224 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20226 spin_unlock_irq(&phba->hbalock);
20227 spin_lock(&ndlp->lock);
20228 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20229 spin_unlock(&ndlp->lock);
20230 spin_lock_irq(&phba->hbalock);
20234 } while (restart_loop);
20236 spin_unlock_irq(&phba->hbalock);
20238 /* Release the cleaned-up mailbox commands */
20239 while (!list_empty(&mbox_cmd_list)) {
20240 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
20241 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20242 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
20244 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
20247 mb->ctx_buf = NULL;
20248 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20249 mb->ctx_ndlp = NULL;
20251 spin_lock(&ndlp->lock);
20252 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20253 spin_unlock(&ndlp->lock);
20254 lpfc_nlp_put(ndlp);
20257 mempool_free(mb, phba->mbox_mem_pool);
20260 /* Release the ndlp with the cleaned-up active mailbox command */
20261 if (act_mbx_ndlp) {
20262 spin_lock(&act_mbx_ndlp->lock);
20263 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20264 spin_unlock(&act_mbx_ndlp->lock);
20265 lpfc_nlp_put(act_mbx_ndlp);
20270 * lpfc_drain_txq - Drain the txq
20271 * @phba: Pointer to HBA context object.
20273 * This function attempt to submit IOCBs on the txq
20274 * to the adapter. For SLI4 adapters, the txq contains
20275 * ELS IOCBs that have been deferred because the there
20276 * are no SGLs. This congestion can occur with large
20277 * vport counts during node discovery.
20281 lpfc_drain_txq(struct lpfc_hba *phba)
20283 LIST_HEAD(completions);
20284 struct lpfc_sli_ring *pring;
20285 struct lpfc_iocbq *piocbq = NULL;
20286 unsigned long iflags = 0;
20287 char *fail_msg = NULL;
20288 struct lpfc_sglq *sglq;
20289 union lpfc_wqe128 wqe;
20290 uint32_t txq_cnt = 0;
20291 struct lpfc_queue *wq;
20293 if (phba->link_flag & LS_MDS_LOOPBACK) {
20294 /* MDS WQE are posted only to first WQ*/
20295 wq = phba->sli4_hba.hdwq[0].io_wq;
20300 wq = phba->sli4_hba.els_wq;
20303 pring = lpfc_phba_elsring(phba);
20306 if (unlikely(!pring) || list_empty(&pring->txq))
20309 spin_lock_irqsave(&pring->ring_lock, iflags);
20310 list_for_each_entry(piocbq, &pring->txq, list) {
20314 if (txq_cnt > pring->txq_max)
20315 pring->txq_max = txq_cnt;
20317 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20319 while (!list_empty(&pring->txq)) {
20320 spin_lock_irqsave(&pring->ring_lock, iflags);
20322 piocbq = lpfc_sli_ringtx_get(phba, pring);
20324 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20325 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20326 "2823 txq empty and txq_cnt is %d\n ",
20330 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
20332 __lpfc_sli_ringtx_put(phba, pring, piocbq);
20333 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20338 /* The xri and iocb resources secured,
20339 * attempt to issue request
20341 piocbq->sli4_lxritag = sglq->sli4_lxritag;
20342 piocbq->sli4_xritag = sglq->sli4_xritag;
20343 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
20344 fail_msg = "to convert bpl to sgl";
20345 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
20346 fail_msg = "to convert iocb to wqe";
20347 else if (lpfc_sli4_wq_put(wq, &wqe))
20348 fail_msg = " - Wq is full";
20350 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
20353 /* Failed means we can't issue and need to cancel */
20354 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20355 "2822 IOCB failed %s iotag 0x%x "
20358 piocbq->iotag, piocbq->sli4_xritag);
20359 list_add_tail(&piocbq->list, &completions);
20361 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20364 /* Cancel all the IOCBs that cannot be issued */
20365 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
20366 IOERR_SLI_ABORTED);
20372 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
20373 * @phba: Pointer to HBA context object.
20374 * @pwqeq: Pointer to command WQE.
20375 * @sglq: Pointer to the scatter gather queue object.
20377 * This routine converts the bpl or bde that is in the WQE
20378 * to a sgl list for the sli4 hardware. The physical address
20379 * of the bpl/bde is converted back to a virtual address.
20380 * If the WQE contains a BPL then the list of BDE's is
20381 * converted to sli4_sge's. If the WQE contains a single
20382 * BDE then it is converted to a single sli_sge.
20383 * The WQE is still in cpu endianness so the contents of
20384 * the bpl can be used without byte swapping.
20386 * Returns valid XRI = Success, NO_XRI = Failure.
20389 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
20390 struct lpfc_sglq *sglq)
20392 uint16_t xritag = NO_XRI;
20393 struct ulp_bde64 *bpl = NULL;
20394 struct ulp_bde64 bde;
20395 struct sli4_sge *sgl = NULL;
20396 struct lpfc_dmabuf *dmabuf;
20397 union lpfc_wqe128 *wqe;
20400 uint32_t offset = 0; /* accumulated offset in the sg request list */
20401 int inbound = 0; /* number of sg reply entries inbound from firmware */
20404 if (!pwqeq || !sglq)
20407 sgl = (struct sli4_sge *)sglq->sgl;
20409 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
20411 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
20412 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
20413 return sglq->sli4_xritag;
20414 numBdes = pwqeq->rsvd2;
20416 /* The addrHigh and addrLow fields within the WQE
20417 * have not been byteswapped yet so there is no
20418 * need to swap them back.
20420 if (pwqeq->context3)
20421 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
20425 bpl = (struct ulp_bde64 *)dmabuf->virt;
20429 for (i = 0; i < numBdes; i++) {
20430 /* Should already be byte swapped. */
20431 sgl->addr_hi = bpl->addrHigh;
20432 sgl->addr_lo = bpl->addrLow;
20434 sgl->word2 = le32_to_cpu(sgl->word2);
20435 if ((i+1) == numBdes)
20436 bf_set(lpfc_sli4_sge_last, sgl, 1);
20438 bf_set(lpfc_sli4_sge_last, sgl, 0);
20439 /* swap the size field back to the cpu so we
20440 * can assign it to the sgl.
20442 bde.tus.w = le32_to_cpu(bpl->tus.w);
20443 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
20444 /* The offsets in the sgl need to be accumulated
20445 * separately for the request and reply lists.
20446 * The request is always first, the reply follows.
20449 case CMD_GEN_REQUEST64_WQE:
20450 /* add up the reply sg entries */
20451 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
20453 /* first inbound? reset the offset */
20456 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20457 bf_set(lpfc_sli4_sge_type, sgl,
20458 LPFC_SGE_TYPE_DATA);
20459 offset += bde.tus.f.bdeSize;
20461 case CMD_FCP_TRSP64_WQE:
20462 bf_set(lpfc_sli4_sge_offset, sgl, 0);
20463 bf_set(lpfc_sli4_sge_type, sgl,
20464 LPFC_SGE_TYPE_DATA);
20466 case CMD_FCP_TSEND64_WQE:
20467 case CMD_FCP_TRECEIVE64_WQE:
20468 bf_set(lpfc_sli4_sge_type, sgl,
20469 bpl->tus.f.bdeFlags);
20473 offset += bde.tus.f.bdeSize;
20474 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20477 sgl->word2 = cpu_to_le32(sgl->word2);
20481 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
20482 /* The addrHigh and addrLow fields of the BDE have not
20483 * been byteswapped yet so they need to be swapped
20484 * before putting them in the sgl.
20486 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
20487 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
20488 sgl->word2 = le32_to_cpu(sgl->word2);
20489 bf_set(lpfc_sli4_sge_last, sgl, 1);
20490 sgl->word2 = cpu_to_le32(sgl->word2);
20491 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
20493 return sglq->sli4_xritag;
20497 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
20498 * @phba: Pointer to HBA context object.
20499 * @qp: Pointer to HDW queue.
20500 * @pwqe: Pointer to command WQE.
20503 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20504 struct lpfc_iocbq *pwqe)
20506 union lpfc_wqe128 *wqe = &pwqe->wqe;
20507 struct lpfc_async_xchg_ctx *ctxp;
20508 struct lpfc_queue *wq;
20509 struct lpfc_sglq *sglq;
20510 struct lpfc_sli_ring *pring;
20511 unsigned long iflags;
20514 /* NVME_LS and NVME_LS ABTS requests. */
20515 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
20516 pring = phba->sli4_hba.nvmels_wq->pring;
20517 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20519 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
20521 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20524 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20525 pwqe->sli4_xritag = sglq->sli4_xritag;
20526 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
20527 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20530 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20531 pwqe->sli4_xritag);
20532 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
20534 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20538 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20539 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20541 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20545 /* NVME_FCREQ and NVME_ABTS requests */
20546 if (pwqe->iocb_flag & LPFC_IO_NVME ||
20547 pwqe->iocb_flag & LPFC_IO_FCP) {
20548 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
20552 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20554 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20556 ret = lpfc_sli4_wq_put(wq, wqe);
20558 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20561 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20562 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20564 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20568 /* NVMET requests */
20569 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
20570 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
20574 ctxp = pwqe->context2;
20575 sglq = ctxp->ctxbuf->sglq;
20576 if (pwqe->sli4_xritag == NO_XRI) {
20577 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20578 pwqe->sli4_xritag = sglq->sli4_xritag;
20580 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20581 pwqe->sli4_xritag);
20582 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20584 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20586 ret = lpfc_sli4_wq_put(wq, wqe);
20588 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20591 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20592 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20594 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20601 * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
20602 * @phba: Pointer to HBA context object.
20603 * @cmdiocb: Pointer to driver command iocb object.
20604 * @cmpl: completion function.
20606 * Fill the appropriate fields for the abort WQE and call
20607 * internal routine lpfc_sli4_issue_wqe to send the WQE
20608 * This function is called with hbalock held and no ring_lock held.
20610 * RETURNS 0 - SUCCESS
20614 lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
20617 struct lpfc_vport *vport = cmdiocb->vport;
20618 struct lpfc_iocbq *abtsiocb = NULL;
20619 union lpfc_wqe128 *abtswqe;
20620 struct lpfc_io_buf *lpfc_cmd;
20621 int retval = IOCB_ERROR;
20622 u16 xritag = cmdiocb->sli4_xritag;
20625 * The scsi command can not be in txq and it is in flight because the
20626 * pCmd is still pointing at the SCSI command we have to abort. There
20627 * is no need to search the txcmplq. Just send an abort to the FW.
20630 abtsiocb = __lpfc_sli_get_iocbq(phba);
20632 return WQE_NORESOURCE;
20634 /* Indicate the IO is being aborted by the driver. */
20635 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
20637 abtswqe = &abtsiocb->wqe;
20638 memset(abtswqe, 0, sizeof(*abtswqe));
20640 if (!lpfc_is_link_up(phba))
20641 bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
20642 bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
20643 abtswqe->abort_cmd.rsrvd5 = 0;
20644 abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
20645 bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
20646 bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
20647 bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
20648 bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
20649 bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
20650 bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
20652 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
20653 abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
20654 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
20655 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
20656 abtsiocb->iocb_flag |= LPFC_IO_FCP;
20657 if (cmdiocb->iocb_flag & LPFC_IO_NVME)
20658 abtsiocb->iocb_flag |= LPFC_IO_NVME;
20659 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
20660 abtsiocb->iocb_flag |= LPFC_IO_FOF;
20661 abtsiocb->vport = vport;
20662 abtsiocb->wqe_cmpl = cmpl;
20664 lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
20665 retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
20667 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
20668 "0359 Abort xri x%x, original iotag x%x, "
20669 "abort cmd iotag x%x retval x%x\n",
20670 xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
20673 cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
20674 __lpfc_sli_release_iocbq(phba, abtsiocb);
20680 #ifdef LPFC_MXP_STAT
20682 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
20683 * @phba: pointer to lpfc hba data structure.
20684 * @hwqid: belong to which HWQ.
20686 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
20687 * 15 seconds after a test case is running.
20689 * The user should call lpfc_debugfs_multixripools_write before running a test
20690 * case to clear stat_snapshot_taken. Then the user starts a test case. During
20691 * test case is running, stat_snapshot_taken is incremented by 1 every time when
20692 * this routine is called from heartbeat timer. When stat_snapshot_taken is
20693 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
20695 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
20697 struct lpfc_sli4_hdw_queue *qp;
20698 struct lpfc_multixri_pool *multixri_pool;
20699 struct lpfc_pvt_pool *pvt_pool;
20700 struct lpfc_pbl_pool *pbl_pool;
20703 qp = &phba->sli4_hba.hdwq[hwqid];
20704 multixri_pool = qp->p_multixri_pool;
20705 if (!multixri_pool)
20708 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
20709 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20710 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20711 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20713 multixri_pool->stat_pbl_count = pbl_pool->count;
20714 multixri_pool->stat_pvt_count = pvt_pool->count;
20715 multixri_pool->stat_busy_count = txcmplq_cnt;
20718 multixri_pool->stat_snapshot_taken++;
20723 * lpfc_adjust_pvt_pool_count - Adjust private pool count
20724 * @phba: pointer to lpfc hba data structure.
20725 * @hwqid: belong to which HWQ.
20727 * This routine moves some XRIs from private to public pool when private pool
20730 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
20732 struct lpfc_multixri_pool *multixri_pool;
20734 u32 prev_io_req_count;
20736 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20737 if (!multixri_pool)
20739 io_req_count = multixri_pool->io_req_count;
20740 prev_io_req_count = multixri_pool->prev_io_req_count;
20742 if (prev_io_req_count != io_req_count) {
20743 /* Private pool is busy */
20744 multixri_pool->prev_io_req_count = io_req_count;
20746 /* Private pool is not busy.
20747 * Move XRIs from private to public pool.
20749 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
20754 * lpfc_adjust_high_watermark - Adjust high watermark
20755 * @phba: pointer to lpfc hba data structure.
20756 * @hwqid: belong to which HWQ.
20758 * This routine sets high watermark as number of outstanding XRIs,
20759 * but make sure the new value is between xri_limit/2 and xri_limit.
20761 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
20769 struct lpfc_multixri_pool *multixri_pool;
20770 struct lpfc_sli4_hdw_queue *qp;
20772 qp = &phba->sli4_hba.hdwq[hwqid];
20773 multixri_pool = qp->p_multixri_pool;
20774 if (!multixri_pool)
20776 xri_limit = multixri_pool->xri_limit;
20778 watermark_max = xri_limit;
20779 watermark_min = xri_limit / 2;
20781 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20782 abts_io_bufs = qp->abts_scsi_io_bufs;
20783 abts_io_bufs += qp->abts_nvme_io_bufs;
20785 new_watermark = txcmplq_cnt + abts_io_bufs;
20786 new_watermark = min(watermark_max, new_watermark);
20787 new_watermark = max(watermark_min, new_watermark);
20788 multixri_pool->pvt_pool.high_watermark = new_watermark;
20790 #ifdef LPFC_MXP_STAT
20791 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
20797 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
20798 * @phba: pointer to lpfc hba data structure.
20799 * @hwqid: belong to which HWQ.
20801 * This routine is called from hearbeat timer when pvt_pool is idle.
20802 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
20803 * The first step moves (all - low_watermark) amount of XRIs.
20804 * The second step moves the rest of XRIs.
20806 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
20808 struct lpfc_pbl_pool *pbl_pool;
20809 struct lpfc_pvt_pool *pvt_pool;
20810 struct lpfc_sli4_hdw_queue *qp;
20811 struct lpfc_io_buf *lpfc_ncmd;
20812 struct lpfc_io_buf *lpfc_ncmd_next;
20813 unsigned long iflag;
20814 struct list_head tmp_list;
20817 qp = &phba->sli4_hba.hdwq[hwqid];
20818 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20819 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20822 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
20823 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
20825 if (pvt_pool->count > pvt_pool->low_watermark) {
20826 /* Step 1: move (all - low_watermark) from pvt_pool
20830 /* Move low watermark of bufs from pvt_pool to tmp_list */
20831 INIT_LIST_HEAD(&tmp_list);
20832 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20833 &pvt_pool->list, list) {
20834 list_move_tail(&lpfc_ncmd->list, &tmp_list);
20836 if (tmp_count >= pvt_pool->low_watermark)
20840 /* Move all bufs from pvt_pool to pbl_pool */
20841 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20843 /* Move all bufs from tmp_list to pvt_pool */
20844 list_splice(&tmp_list, &pvt_pool->list);
20846 pbl_pool->count += (pvt_pool->count - tmp_count);
20847 pvt_pool->count = tmp_count;
20849 /* Step 2: move the rest from pvt_pool to pbl_pool */
20850 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20851 pbl_pool->count += pvt_pool->count;
20852 pvt_pool->count = 0;
20855 spin_unlock(&pvt_pool->lock);
20856 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20860 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20861 * @phba: pointer to lpfc hba data structure
20862 * @qp: pointer to HDW queue
20863 * @pbl_pool: specified public free XRI pool
20864 * @pvt_pool: specified private free XRI pool
20865 * @count: number of XRIs to move
20867 * This routine tries to move some free common bufs from the specified pbl_pool
20868 * to the specified pvt_pool. It might move less than count XRIs if there's not
20869 * enough in public pool.
20872 * true - if XRIs are successfully moved from the specified pbl_pool to the
20873 * specified pvt_pool
20874 * false - if the specified pbl_pool is empty or locked by someone else
20877 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20878 struct lpfc_pbl_pool *pbl_pool,
20879 struct lpfc_pvt_pool *pvt_pool, u32 count)
20881 struct lpfc_io_buf *lpfc_ncmd;
20882 struct lpfc_io_buf *lpfc_ncmd_next;
20883 unsigned long iflag;
20886 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20888 if (pbl_pool->count) {
20889 /* Move a batch of XRIs from public to private pool */
20890 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
20891 list_for_each_entry_safe(lpfc_ncmd,
20895 list_move_tail(&lpfc_ncmd->list,
20904 spin_unlock(&pvt_pool->lock);
20905 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20908 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20915 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20916 * @phba: pointer to lpfc hba data structure.
20917 * @hwqid: belong to which HWQ.
20918 * @count: number of XRIs to move
20920 * This routine tries to find some free common bufs in one of public pools with
20921 * Round Robin method. The search always starts from local hwqid, then the next
20922 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20923 * a batch of free common bufs are moved to private pool on hwqid.
20924 * It might move less than count XRIs if there's not enough in public pool.
20926 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20928 struct lpfc_multixri_pool *multixri_pool;
20929 struct lpfc_multixri_pool *next_multixri_pool;
20930 struct lpfc_pvt_pool *pvt_pool;
20931 struct lpfc_pbl_pool *pbl_pool;
20932 struct lpfc_sli4_hdw_queue *qp;
20937 qp = &phba->sli4_hba.hdwq[hwqid];
20938 multixri_pool = qp->p_multixri_pool;
20939 pvt_pool = &multixri_pool->pvt_pool;
20940 pbl_pool = &multixri_pool->pbl_pool;
20942 /* Check if local pbl_pool is available */
20943 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20945 #ifdef LPFC_MXP_STAT
20946 multixri_pool->local_pbl_hit_count++;
20951 hwq_count = phba->cfg_hdw_queue;
20953 /* Get the next hwqid which was found last time */
20954 next_hwqid = multixri_pool->rrb_next_hwqid;
20957 /* Go to next hwq */
20958 next_hwqid = (next_hwqid + 1) % hwq_count;
20960 next_multixri_pool =
20961 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20962 pbl_pool = &next_multixri_pool->pbl_pool;
20964 /* Check if the public free xri pool is available */
20965 ret = _lpfc_move_xri_pbl_to_pvt(
20966 phba, qp, pbl_pool, pvt_pool, count);
20968 /* Exit while-loop if success or all hwqid are checked */
20969 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20971 /* Starting point for the next time */
20972 multixri_pool->rrb_next_hwqid = next_hwqid;
20975 /* stats: all public pools are empty*/
20976 multixri_pool->pbl_empty_count++;
20979 #ifdef LPFC_MXP_STAT
20981 if (next_hwqid == hwqid)
20982 multixri_pool->local_pbl_hit_count++;
20984 multixri_pool->other_pbl_hit_count++;
20990 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20991 * @phba: pointer to lpfc hba data structure.
20992 * @hwqid: belong to which HWQ.
20994 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20997 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20999 struct lpfc_multixri_pool *multixri_pool;
21000 struct lpfc_pvt_pool *pvt_pool;
21002 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21003 pvt_pool = &multixri_pool->pvt_pool;
21005 if (pvt_pool->count < pvt_pool->low_watermark)
21006 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21010 * lpfc_release_io_buf - Return one IO buf back to free pool
21011 * @phba: pointer to lpfc hba data structure.
21012 * @lpfc_ncmd: IO buf to be returned.
21013 * @qp: belong to which HWQ.
21015 * This routine returns one IO buf back to free pool. If this is an urgent IO,
21016 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
21017 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
21018 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
21019 * lpfc_io_buf_list_put.
21021 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
21022 struct lpfc_sli4_hdw_queue *qp)
21024 unsigned long iflag;
21025 struct lpfc_pbl_pool *pbl_pool;
21026 struct lpfc_pvt_pool *pvt_pool;
21027 struct lpfc_epd_pool *epd_pool;
21033 /* MUST zero fields if buffer is reused by another protocol */
21034 lpfc_ncmd->nvmeCmd = NULL;
21035 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
21036 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
21038 if (phba->cfg_xpsgl && !phba->nvmet_support &&
21039 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
21040 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
21042 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
21043 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
21045 if (phba->cfg_xri_rebalancing) {
21046 if (lpfc_ncmd->expedite) {
21047 /* Return to expedite pool */
21048 epd_pool = &phba->epd_pool;
21049 spin_lock_irqsave(&epd_pool->lock, iflag);
21050 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
21052 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21056 /* Avoid invalid access if an IO sneaks in and is being rejected
21057 * just _after_ xri pools are destroyed in lpfc_offline.
21058 * Nothing much can be done at this point.
21060 if (!qp->p_multixri_pool)
21063 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21064 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21066 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21067 abts_io_bufs = qp->abts_scsi_io_bufs;
21068 abts_io_bufs += qp->abts_nvme_io_bufs;
21070 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21071 xri_limit = qp->p_multixri_pool->xri_limit;
21073 #ifdef LPFC_MXP_STAT
21074 if (xri_owned <= xri_limit)
21075 qp->p_multixri_pool->below_limit_count++;
21077 qp->p_multixri_pool->above_limit_count++;
21080 /* XRI goes to either public or private free xri pool
21081 * based on watermark and xri_limit
21083 if ((pvt_pool->count < pvt_pool->low_watermark) ||
21084 (xri_owned < xri_limit &&
21085 pvt_pool->count < pvt_pool->high_watermark)) {
21086 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21087 qp, free_pvt_pool);
21088 list_add_tail(&lpfc_ncmd->list,
21091 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21093 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21094 qp, free_pub_pool);
21095 list_add_tail(&lpfc_ncmd->list,
21098 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21101 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
21103 list_add_tail(&lpfc_ncmd->list,
21104 &qp->lpfc_io_buf_list_put);
21106 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
21112 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
21113 * @phba: pointer to lpfc hba data structure.
21114 * @qp: pointer to HDW queue
21115 * @pvt_pool: pointer to private pool data structure.
21116 * @ndlp: pointer to lpfc nodelist data structure.
21118 * This routine tries to get one free IO buf from private pool.
21121 * pointer to one free IO buf - if private pool is not empty
21122 * NULL - if private pool is empty
21124 static struct lpfc_io_buf *
21125 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
21126 struct lpfc_sli4_hdw_queue *qp,
21127 struct lpfc_pvt_pool *pvt_pool,
21128 struct lpfc_nodelist *ndlp)
21130 struct lpfc_io_buf *lpfc_ncmd;
21131 struct lpfc_io_buf *lpfc_ncmd_next;
21132 unsigned long iflag;
21134 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
21135 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21136 &pvt_pool->list, list) {
21137 if (lpfc_test_rrq_active(
21138 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
21140 list_del(&lpfc_ncmd->list);
21142 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21145 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21151 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
21152 * @phba: pointer to lpfc hba data structure.
21154 * This routine tries to get one free IO buf from expedite pool.
21157 * pointer to one free IO buf - if expedite pool is not empty
21158 * NULL - if expedite pool is empty
21160 static struct lpfc_io_buf *
21161 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
21163 struct lpfc_io_buf *lpfc_ncmd;
21164 struct lpfc_io_buf *lpfc_ncmd_next;
21165 unsigned long iflag;
21166 struct lpfc_epd_pool *epd_pool;
21168 epd_pool = &phba->epd_pool;
21171 spin_lock_irqsave(&epd_pool->lock, iflag);
21172 if (epd_pool->count > 0) {
21173 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21174 &epd_pool->list, list) {
21175 list_del(&lpfc_ncmd->list);
21180 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21186 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
21187 * @phba: pointer to lpfc hba data structure.
21188 * @ndlp: pointer to lpfc nodelist data structure.
21189 * @hwqid: belong to which HWQ
21190 * @expedite: 1 means this request is urgent.
21192 * This routine will do the following actions and then return a pointer to
21195 * 1. If private free xri count is empty, move some XRIs from public to
21197 * 2. Get one XRI from private free xri pool.
21198 * 3. If we fail to get one from pvt_pool and this is an expedite request,
21199 * get one free xri from expedite pool.
21201 * Note: ndlp is only used on SCSI side for RRQ testing.
21202 * The caller should pass NULL for ndlp on NVME side.
21205 * pointer to one free IO buf - if private pool is not empty
21206 * NULL - if private pool is empty
21208 static struct lpfc_io_buf *
21209 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
21210 struct lpfc_nodelist *ndlp,
21211 int hwqid, int expedite)
21213 struct lpfc_sli4_hdw_queue *qp;
21214 struct lpfc_multixri_pool *multixri_pool;
21215 struct lpfc_pvt_pool *pvt_pool;
21216 struct lpfc_io_buf *lpfc_ncmd;
21218 qp = &phba->sli4_hba.hdwq[hwqid];
21220 multixri_pool = qp->p_multixri_pool;
21221 pvt_pool = &multixri_pool->pvt_pool;
21222 multixri_pool->io_req_count++;
21224 /* If pvt_pool is empty, move some XRIs from public to private pool */
21225 if (pvt_pool->count == 0)
21226 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21228 /* Get one XRI from private free xri pool */
21229 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
21232 lpfc_ncmd->hdwq = qp;
21233 lpfc_ncmd->hdwq_no = hwqid;
21234 } else if (expedite) {
21235 /* If we fail to get one from pvt_pool and this is an expedite
21236 * request, get one free xri from expedite pool.
21238 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
21244 static inline struct lpfc_io_buf *
21245 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
21247 struct lpfc_sli4_hdw_queue *qp;
21248 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
21250 qp = &phba->sli4_hba.hdwq[idx];
21251 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
21252 &qp->lpfc_io_buf_list_get, list) {
21253 if (lpfc_test_rrq_active(phba, ndlp,
21254 lpfc_cmd->cur_iocbq.sli4_lxritag))
21257 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
21260 list_del_init(&lpfc_cmd->list);
21262 lpfc_cmd->hdwq = qp;
21263 lpfc_cmd->hdwq_no = idx;
21270 * lpfc_get_io_buf - Get one IO buffer from free pool
21271 * @phba: The HBA for which this call is being executed.
21272 * @ndlp: pointer to lpfc nodelist data structure.
21273 * @hwqid: belong to which HWQ
21274 * @expedite: 1 means this request is urgent.
21276 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
21277 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
21278 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
21280 * Note: ndlp is only used on SCSI side for RRQ testing.
21281 * The caller should pass NULL for ndlp on NVME side.
21285 * Pointer to lpfc_io_buf - Success
21287 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
21288 struct lpfc_nodelist *ndlp,
21289 u32 hwqid, int expedite)
21291 struct lpfc_sli4_hdw_queue *qp;
21292 unsigned long iflag;
21293 struct lpfc_io_buf *lpfc_cmd;
21295 qp = &phba->sli4_hba.hdwq[hwqid];
21298 if (phba->cfg_xri_rebalancing)
21299 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
21300 phba, ndlp, hwqid, expedite);
21302 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
21303 qp, alloc_xri_get);
21304 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
21305 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
21307 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
21308 qp, alloc_xri_put);
21309 list_splice(&qp->lpfc_io_buf_list_put,
21310 &qp->lpfc_io_buf_list_get);
21311 qp->get_io_bufs += qp->put_io_bufs;
21312 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
21313 qp->put_io_bufs = 0;
21314 spin_unlock(&qp->io_buf_list_put_lock);
21315 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
21317 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
21319 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
21326 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
21327 * @phba: The HBA for which this call is being executed.
21328 * @lpfc_buf: IO buf structure to append the SGL chunk
21330 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
21331 * and will allocate an SGL chunk if the pool is empty.
21335 * Pointer to sli4_hybrid_sgl - Success
21337 struct sli4_hybrid_sgl *
21338 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
21340 struct sli4_hybrid_sgl *list_entry = NULL;
21341 struct sli4_hybrid_sgl *tmp = NULL;
21342 struct sli4_hybrid_sgl *allocated_sgl = NULL;
21343 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21344 struct list_head *buf_list = &hdwq->sgl_list;
21345 unsigned long iflags;
21347 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21349 if (likely(!list_empty(buf_list))) {
21350 /* break off 1 chunk from the sgl_list */
21351 list_for_each_entry_safe(list_entry, tmp,
21352 buf_list, list_node) {
21353 list_move_tail(&list_entry->list_node,
21354 &lpfc_buf->dma_sgl_xtra_list);
21358 /* allocate more */
21359 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21360 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21361 cpu_to_node(hdwq->io_wq->chann));
21363 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21364 "8353 error kmalloc memory for HDWQ "
21366 lpfc_buf->hdwq_no, __func__);
21370 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
21371 GFP_ATOMIC, &tmp->dma_phys_sgl);
21372 if (!tmp->dma_sgl) {
21373 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21374 "8354 error pool_alloc memory for HDWQ "
21376 lpfc_buf->hdwq_no, __func__);
21381 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21382 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
21385 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
21386 struct sli4_hybrid_sgl,
21389 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21391 return allocated_sgl;
21395 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
21396 * @phba: The HBA for which this call is being executed.
21397 * @lpfc_buf: IO buf structure with the SGL chunk
21399 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
21406 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
21409 struct sli4_hybrid_sgl *list_entry = NULL;
21410 struct sli4_hybrid_sgl *tmp = NULL;
21411 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21412 struct list_head *buf_list = &hdwq->sgl_list;
21413 unsigned long iflags;
21415 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21417 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
21418 list_for_each_entry_safe(list_entry, tmp,
21419 &lpfc_buf->dma_sgl_xtra_list,
21421 list_move_tail(&list_entry->list_node,
21428 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21433 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
21434 * @phba: phba object
21435 * @hdwq: hdwq to cleanup sgl buff resources on
21437 * This routine frees all SGL chunks of hdwq SGL chunk pool.
21443 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
21444 struct lpfc_sli4_hdw_queue *hdwq)
21446 struct list_head *buf_list = &hdwq->sgl_list;
21447 struct sli4_hybrid_sgl *list_entry = NULL;
21448 struct sli4_hybrid_sgl *tmp = NULL;
21449 unsigned long iflags;
21451 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21453 /* Free sgl pool */
21454 list_for_each_entry_safe(list_entry, tmp,
21455 buf_list, list_node) {
21456 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
21457 list_entry->dma_sgl,
21458 list_entry->dma_phys_sgl);
21459 list_del(&list_entry->list_node);
21463 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21467 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
21468 * @phba: The HBA for which this call is being executed.
21469 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
21471 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
21472 * and will allocate an CMD/RSP buffer if the pool is empty.
21476 * Pointer to fcp_cmd_rsp_buf - Success
21478 struct fcp_cmd_rsp_buf *
21479 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21480 struct lpfc_io_buf *lpfc_buf)
21482 struct fcp_cmd_rsp_buf *list_entry = NULL;
21483 struct fcp_cmd_rsp_buf *tmp = NULL;
21484 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
21485 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21486 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21487 unsigned long iflags;
21489 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21491 if (likely(!list_empty(buf_list))) {
21492 /* break off 1 chunk from the list */
21493 list_for_each_entry_safe(list_entry, tmp,
21496 list_move_tail(&list_entry->list_node,
21497 &lpfc_buf->dma_cmd_rsp_list);
21501 /* allocate more */
21502 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21503 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21504 cpu_to_node(hdwq->io_wq->chann));
21506 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21507 "8355 error kmalloc memory for HDWQ "
21509 lpfc_buf->hdwq_no, __func__);
21513 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
21515 &tmp->fcp_cmd_rsp_dma_handle);
21517 if (!tmp->fcp_cmnd) {
21518 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21519 "8356 error pool_alloc memory for HDWQ "
21521 lpfc_buf->hdwq_no, __func__);
21526 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
21527 sizeof(struct fcp_cmnd));
21529 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21530 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
21533 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
21534 struct fcp_cmd_rsp_buf,
21537 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21539 return allocated_buf;
21543 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
21544 * @phba: The HBA for which this call is being executed.
21545 * @lpfc_buf: IO buf structure with the CMD/RSP buf
21547 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
21554 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21555 struct lpfc_io_buf *lpfc_buf)
21558 struct fcp_cmd_rsp_buf *list_entry = NULL;
21559 struct fcp_cmd_rsp_buf *tmp = NULL;
21560 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21561 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21562 unsigned long iflags;
21564 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21566 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
21567 list_for_each_entry_safe(list_entry, tmp,
21568 &lpfc_buf->dma_cmd_rsp_list,
21570 list_move_tail(&list_entry->list_node,
21577 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21582 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
21583 * @phba: phba object
21584 * @hdwq: hdwq to cleanup cmd rsp buff resources on
21586 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
21592 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21593 struct lpfc_sli4_hdw_queue *hdwq)
21595 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21596 struct fcp_cmd_rsp_buf *list_entry = NULL;
21597 struct fcp_cmd_rsp_buf *tmp = NULL;
21598 unsigned long iflags;
21600 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21602 /* Free cmd_rsp buf pool */
21603 list_for_each_entry_safe(list_entry, tmp,
21606 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
21607 list_entry->fcp_cmnd,
21608 list_entry->fcp_cmd_rsp_dma_handle);
21609 list_del(&list_entry->list_node);
21613 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);