1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
38 #include <linux/crash_dump.h>
40 #include <asm/set_memory.h>
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_compat.h"
55 #include "lpfc_debugfs.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_version.h"
59 /* There are only four IOCB completion types. */
60 typedef enum _lpfc_iocb_type {
68 /* Provide function prototypes local to this module. */
69 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
71 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint8_t *, uint32_t *);
73 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
75 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
77 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
78 struct hbq_dmabuf *dmabuf);
79 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
80 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
81 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
83 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
84 struct lpfc_queue *eq,
85 struct lpfc_eqe *eqe);
86 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
87 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
88 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
89 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
90 struct lpfc_queue *cq,
91 struct lpfc_cqe *cqe);
93 union lpfc_wqe128 lpfc_iread_cmd_template;
94 union lpfc_wqe128 lpfc_iwrite_cmd_template;
95 union lpfc_wqe128 lpfc_icmnd_cmd_template;
98 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
103 /* Setup WQE templates for IOs */
104 void lpfc_wqe_cmd_template(void)
106 union lpfc_wqe128 *wqe;
109 wqe = &lpfc_iread_cmd_template;
110 memset(wqe, 0, sizeof(union lpfc_wqe128));
112 /* Word 0, 1, 2 - BDE is variable */
114 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
116 /* Word 4 - total_xfer_len is variable */
118 /* Word 5 - is zero */
120 /* Word 6 - ctxt_tag, xri_tag is variable */
123 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
124 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
125 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
126 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
128 /* Word 8 - abort_tag is variable */
130 /* Word 9 - reqtag is variable */
132 /* Word 10 - dbde, wqes is variable */
133 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
134 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
135 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
136 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
137 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
139 /* Word 11 - pbde is variable */
140 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
141 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
142 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
144 /* Word 12 - is zero */
146 /* Word 13, 14, 15 - PBDE is variable */
148 /* IWRITE template */
149 wqe = &lpfc_iwrite_cmd_template;
150 memset(wqe, 0, sizeof(union lpfc_wqe128));
152 /* Word 0, 1, 2 - BDE is variable */
154 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
156 /* Word 4 - total_xfer_len is variable */
158 /* Word 5 - initial_xfer_len is variable */
160 /* Word 6 - ctxt_tag, xri_tag is variable */
163 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
164 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
165 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
166 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
168 /* Word 8 - abort_tag is variable */
170 /* Word 9 - reqtag is variable */
172 /* Word 10 - dbde, wqes is variable */
173 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
174 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
175 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
176 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
177 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
179 /* Word 11 - pbde is variable */
180 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
181 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
182 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
184 /* Word 12 - is zero */
186 /* Word 13, 14, 15 - PBDE is variable */
189 wqe = &lpfc_icmnd_cmd_template;
190 memset(wqe, 0, sizeof(union lpfc_wqe128));
192 /* Word 0, 1, 2 - BDE is variable */
194 /* Word 3 - payload_offset_len is variable */
196 /* Word 4, 5 - is zero */
198 /* Word 6 - ctxt_tag, xri_tag is variable */
201 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
202 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
203 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
204 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
206 /* Word 8 - abort_tag is variable */
208 /* Word 9 - reqtag is variable */
210 /* Word 10 - dbde, wqes is variable */
211 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
212 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
213 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
214 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
215 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
218 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
219 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
220 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
222 /* Word 12, 13, 14, 15 - is zero */
225 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
227 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
228 * @srcp: Source memory pointer.
229 * @destp: Destination memory pointer.
230 * @cnt: Number of words required to be copied.
231 * Must be a multiple of sizeof(uint64_t)
233 * This function is used for copying data between driver memory
234 * and the SLI WQ. This function also changes the endianness
235 * of each word if native endianness is different from SLI
236 * endianness. This function can be called with or without
240 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
242 uint64_t *src = srcp;
243 uint64_t *dest = destp;
246 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
250 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
254 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
255 * @q: The Work Queue to operate on.
256 * @wqe: The work Queue Entry to put on the Work queue.
258 * This routine will copy the contents of @wqe to the next available entry on
259 * the @q. This function will then ring the Work Queue Doorbell to signal the
260 * HBA to start processing the Work Queue Entry. This function returns 0 if
261 * successful. If no entries are available on @q then this function will return
263 * The caller is expected to hold the hbalock when calling this routine.
266 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
268 union lpfc_wqe *temp_wqe;
269 struct lpfc_register doorbell;
276 /* sanity check on queue memory */
280 temp_wqe = lpfc_sli4_qe(q, q->host_index);
282 /* If the host has not yet processed the next entry then we are done */
283 idx = ((q->host_index + 1) % q->entry_count);
284 if (idx == q->hba_index) {
289 /* set consumption flag every once in a while */
290 if (!((q->host_index + 1) % q->notify_interval))
291 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
293 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
294 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
295 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
296 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
297 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
298 /* write to DPP aperture taking advatage of Combined Writes */
299 tmp = (uint8_t *)temp_wqe;
301 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
302 __raw_writeq(*((uint64_t *)(tmp + i)),
305 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
306 __raw_writel(*((uint32_t *)(tmp + i)),
310 /* ensure WQE bcopy and DPP flushed before doorbell write */
313 /* Update the host index before invoking device */
314 host_index = q->host_index;
320 if (q->db_format == LPFC_DB_LIST_FORMAT) {
321 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
322 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
323 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
324 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
326 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
329 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
330 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
332 /* Leave bits <23:16> clear for if_type 6 dpp */
333 if_type = bf_get(lpfc_sli_intf_if_type,
334 &q->phba->sli4_hba.sli_intf);
335 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
336 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
339 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
340 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
341 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
345 writel(doorbell.word0, q->db_regaddr);
351 * lpfc_sli4_wq_release - Updates internal hba index for WQ
352 * @q: The Work Queue to operate on.
353 * @index: The index to advance the hba index to.
355 * This routine will update the HBA index of a queue to reflect consumption of
356 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
357 * an entry the host calls this function to update the queue's internal
361 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
363 /* sanity check on queue memory */
367 q->hba_index = index;
371 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
372 * @q: The Mailbox Queue to operate on.
373 * @mqe: The Mailbox Queue Entry to put on the Work queue.
375 * This routine will copy the contents of @mqe to the next available entry on
376 * the @q. This function will then ring the Work Queue Doorbell to signal the
377 * HBA to start processing the Work Queue Entry. This function returns 0 if
378 * successful. If no entries are available on @q then this function will return
380 * The caller is expected to hold the hbalock when calling this routine.
383 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
385 struct lpfc_mqe *temp_mqe;
386 struct lpfc_register doorbell;
388 /* sanity check on queue memory */
391 temp_mqe = lpfc_sli4_qe(q, q->host_index);
393 /* If the host has not yet processed the next entry then we are done */
394 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
396 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
397 /* Save off the mailbox pointer for completion */
398 q->phba->mbox = (MAILBOX_t *)temp_mqe;
400 /* Update the host index before invoking device */
401 q->host_index = ((q->host_index + 1) % q->entry_count);
405 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
406 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
407 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
412 * lpfc_sli4_mq_release - Updates internal hba index for MQ
413 * @q: The Mailbox Queue to operate on.
415 * This routine will update the HBA index of a queue to reflect consumption of
416 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
417 * an entry the host calls this function to update the queue's internal
418 * pointers. This routine returns the number of entries that were consumed by
422 lpfc_sli4_mq_release(struct lpfc_queue *q)
424 /* sanity check on queue memory */
428 /* Clear the mailbox pointer for completion */
429 q->phba->mbox = NULL;
430 q->hba_index = ((q->hba_index + 1) % q->entry_count);
435 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
436 * @q: The Event Queue to get the first valid EQE from
438 * This routine will get the first valid Event Queue Entry from @q, update
439 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
440 * the Queue (no more work to do), or the Queue is full of EQEs that have been
441 * processed, but not popped back to the HBA then this routine will return NULL.
443 static struct lpfc_eqe *
444 lpfc_sli4_eq_get(struct lpfc_queue *q)
446 struct lpfc_eqe *eqe;
448 /* sanity check on queue memory */
451 eqe = lpfc_sli4_qe(q, q->host_index);
453 /* If the next EQE is not valid then we are done */
454 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
458 * insert barrier for instruction interlock : data from the hardware
459 * must have the valid bit checked before it can be copied and acted
460 * upon. Speculative instructions were allowing a bcopy at the start
461 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
462 * after our return, to copy data before the valid bit check above
463 * was done. As such, some of the copied data was stale. The barrier
464 * ensures the check is before any data is copied.
471 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
472 * @q: The Event Queue to disable interrupts
476 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
478 struct lpfc_register doorbell;
481 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
482 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
483 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
484 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
485 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
486 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
490 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
491 * @q: The Event Queue to disable interrupts
495 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
497 struct lpfc_register doorbell;
500 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
501 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
505 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
506 * @phba: adapter with EQ
507 * @q: The Event Queue that the host has completed processing for.
508 * @count: Number of elements that have been consumed
509 * @arm: Indicates whether the host wants to arms this CQ.
511 * This routine will notify the HBA, by ringing the doorbell, that count
512 * number of EQEs have been processed. The @arm parameter indicates whether
513 * the queue should be rearmed when ringing the doorbell.
516 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
517 uint32_t count, bool arm)
519 struct lpfc_register doorbell;
521 /* sanity check on queue memory */
522 if (unlikely(!q || (count == 0 && !arm)))
525 /* ring doorbell for number popped */
528 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
529 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
531 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
532 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
533 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
534 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
535 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
536 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
537 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
538 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
539 readl(q->phba->sli4_hba.EQDBregaddr);
543 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
544 * @phba: adapter with EQ
545 * @q: The Event Queue that the host has completed processing for.
546 * @count: Number of elements that have been consumed
547 * @arm: Indicates whether the host wants to arms this CQ.
549 * This routine will notify the HBA, by ringing the doorbell, that count
550 * number of EQEs have been processed. The @arm parameter indicates whether
551 * the queue should be rearmed when ringing the doorbell.
554 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
555 uint32_t count, bool arm)
557 struct lpfc_register doorbell;
559 /* sanity check on queue memory */
560 if (unlikely(!q || (count == 0 && !arm)))
563 /* ring doorbell for number popped */
566 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
567 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
568 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
569 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
570 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
571 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
572 readl(q->phba->sli4_hba.EQDBregaddr);
576 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
577 struct lpfc_eqe *eqe)
579 if (!phba->sli4_hba.pc_sli4_params.eqav)
580 bf_set_le32(lpfc_eqe_valid, eqe, 0);
582 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
584 /* if the index wrapped around, toggle the valid bit */
585 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
586 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
590 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
592 struct lpfc_eqe *eqe = NULL;
593 u32 eq_count = 0, cq_count = 0;
594 struct lpfc_cqe *cqe = NULL;
595 struct lpfc_queue *cq = NULL, *childq = NULL;
598 /* walk all the EQ entries and drop on the floor */
599 eqe = lpfc_sli4_eq_get(eq);
601 /* Get the reference to the corresponding CQ */
602 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
605 list_for_each_entry(childq, &eq->child_list, list) {
606 if (childq->queue_id == cqid) {
611 /* If CQ is valid, iterate through it and drop all the CQEs */
613 cqe = lpfc_sli4_cq_get(cq);
615 __lpfc_sli4_consume_cqe(phba, cq, cqe);
617 cqe = lpfc_sli4_cq_get(cq);
619 /* Clear and re-arm the CQ */
620 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
624 __lpfc_sli4_consume_eqe(phba, eq, eqe);
626 eqe = lpfc_sli4_eq_get(eq);
629 /* Clear and re-arm the EQ */
630 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
634 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
637 struct lpfc_eqe *eqe;
638 int count = 0, consumed = 0;
640 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
643 eqe = lpfc_sli4_eq_get(eq);
645 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
646 __lpfc_sli4_consume_eqe(phba, eq, eqe);
649 if (!(++count % eq->max_proc_limit))
652 if (!(count % eq->notify_interval)) {
653 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
658 eqe = lpfc_sli4_eq_get(eq);
660 eq->EQ_processed += count;
662 /* Track the max number of EQEs processed in 1 intr */
663 if (count > eq->EQ_max_eqe)
664 eq->EQ_max_eqe = count;
666 xchg(&eq->queue_claimed, 0);
669 /* Always clear the EQ. */
670 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
676 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
677 * @q: The Completion Queue to get the first valid CQE from
679 * This routine will get the first valid Completion Queue Entry from @q, update
680 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
681 * the Queue (no more work to do), or the Queue is full of CQEs that have been
682 * processed, but not popped back to the HBA then this routine will return NULL.
684 static struct lpfc_cqe *
685 lpfc_sli4_cq_get(struct lpfc_queue *q)
687 struct lpfc_cqe *cqe;
689 /* sanity check on queue memory */
692 cqe = lpfc_sli4_qe(q, q->host_index);
694 /* If the next CQE is not valid then we are done */
695 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
699 * insert barrier for instruction interlock : data from the hardware
700 * must have the valid bit checked before it can be copied and acted
701 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
702 * instructions allowing action on content before valid bit checked,
703 * add barrier here as well. May not be needed as "content" is a
704 * single 32-bit entity here (vs multi word structure for cq's).
711 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
712 struct lpfc_cqe *cqe)
714 if (!phba->sli4_hba.pc_sli4_params.cqav)
715 bf_set_le32(lpfc_cqe_valid, cqe, 0);
717 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
719 /* if the index wrapped around, toggle the valid bit */
720 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
721 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
725 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
726 * @phba: the adapter with the CQ
727 * @q: The Completion Queue that the host has completed processing for.
728 * @count: the number of elements that were consumed
729 * @arm: Indicates whether the host wants to arms this CQ.
731 * This routine will notify the HBA, by ringing the doorbell, that the
732 * CQEs have been processed. The @arm parameter specifies whether the
733 * queue should be rearmed when ringing the doorbell.
736 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
737 uint32_t count, bool arm)
739 struct lpfc_register doorbell;
741 /* sanity check on queue memory */
742 if (unlikely(!q || (count == 0 && !arm)))
745 /* ring doorbell for number popped */
748 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
749 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
750 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
751 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
752 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
753 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
754 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
758 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
759 * @phba: the adapter with the CQ
760 * @q: The Completion Queue that the host has completed processing for.
761 * @count: the number of elements that were consumed
762 * @arm: Indicates whether the host wants to arms this CQ.
764 * This routine will notify the HBA, by ringing the doorbell, that the
765 * CQEs have been processed. The @arm parameter specifies whether the
766 * queue should be rearmed when ringing the doorbell.
769 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
770 uint32_t count, bool arm)
772 struct lpfc_register doorbell;
774 /* sanity check on queue memory */
775 if (unlikely(!q || (count == 0 && !arm)))
778 /* ring doorbell for number popped */
781 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
782 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
783 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
784 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
788 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
790 * This routine will copy the contents of @wqe to the next available entry on
791 * the @q. This function will then ring the Receive Queue Doorbell to signal the
792 * HBA to start processing the Receive Queue Entry. This function returns the
793 * index that the rqe was copied to if successful. If no entries are available
794 * on @q then this function will return -ENOMEM.
795 * The caller is expected to hold the hbalock when calling this routine.
798 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
799 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
801 struct lpfc_rqe *temp_hrqe;
802 struct lpfc_rqe *temp_drqe;
803 struct lpfc_register doorbell;
807 /* sanity check on queue memory */
808 if (unlikely(!hq) || unlikely(!dq))
810 hq_put_index = hq->host_index;
811 dq_put_index = dq->host_index;
812 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
813 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
815 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
817 if (hq_put_index != dq_put_index)
819 /* If the host has not yet processed the next entry then we are done */
820 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
822 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
823 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
825 /* Update the host index to point to the next slot */
826 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
827 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
830 /* Ring The Header Receive Queue Doorbell */
831 if (!(hq->host_index % hq->notify_interval)) {
833 if (hq->db_format == LPFC_DB_RING_FORMAT) {
834 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
835 hq->notify_interval);
836 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
837 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
838 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
839 hq->notify_interval);
840 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
842 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
846 writel(doorbell.word0, hq->db_regaddr);
852 * lpfc_sli4_rq_release - Updates internal hba index for RQ
854 * This routine will update the HBA index of a queue to reflect consumption of
855 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
856 * consumed an entry the host calls this function to update the queue's
857 * internal pointers. This routine returns the number of entries that were
858 * consumed by the HBA.
861 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
863 /* sanity check on queue memory */
864 if (unlikely(!hq) || unlikely(!dq))
867 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
869 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
870 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
875 * lpfc_cmd_iocb - Get next command iocb entry in the ring
876 * @phba: Pointer to HBA context object.
877 * @pring: Pointer to driver SLI ring object.
879 * This function returns pointer to next command iocb entry
880 * in the command ring. The caller must hold hbalock to prevent
881 * other threads consume the next command iocb.
882 * SLI-2/SLI-3 provide different sized iocbs.
884 static inline IOCB_t *
885 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
887 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
888 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
892 * lpfc_resp_iocb - Get next response iocb entry in the ring
893 * @phba: Pointer to HBA context object.
894 * @pring: Pointer to driver SLI ring object.
896 * This function returns pointer to next response iocb entry
897 * in the response ring. The caller must hold hbalock to make sure
898 * that no other thread consume the next response iocb.
899 * SLI-2/SLI-3 provide different sized iocbs.
901 static inline IOCB_t *
902 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
904 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
905 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
909 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
910 * @phba: Pointer to HBA context object.
912 * This function is called with hbalock held. This function
913 * allocates a new driver iocb object from the iocb pool. If the
914 * allocation is successful, it returns pointer to the newly
915 * allocated iocb object else it returns NULL.
918 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
920 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
921 struct lpfc_iocbq * iocbq = NULL;
923 lockdep_assert_held(&phba->hbalock);
925 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
928 if (phba->iocb_cnt > phba->iocb_max)
929 phba->iocb_max = phba->iocb_cnt;
934 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
935 * @phba: Pointer to HBA context object.
936 * @xritag: XRI value.
938 * This function clears the sglq pointer from the array of acive
939 * sglq's. The xritag that is passed in is used to index into the
940 * array. Before the xritag can be used it needs to be adjusted
941 * by subtracting the xribase.
943 * Returns sglq ponter = success, NULL = Failure.
946 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
948 struct lpfc_sglq *sglq;
950 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
951 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
956 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
957 * @phba: Pointer to HBA context object.
958 * @xritag: XRI value.
960 * This function returns the sglq pointer from the array of acive
961 * sglq's. The xritag that is passed in is used to index into the
962 * array. Before the xritag can be used it needs to be adjusted
963 * by subtracting the xribase.
965 * Returns sglq ponter = success, NULL = Failure.
968 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
970 struct lpfc_sglq *sglq;
972 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
977 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
978 * @phba: Pointer to HBA context object.
979 * @xritag: xri used in this exchange.
980 * @rrq: The RRQ to be cleared.
984 lpfc_clr_rrq_active(struct lpfc_hba *phba,
986 struct lpfc_node_rrq *rrq)
988 struct lpfc_nodelist *ndlp = NULL;
991 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
993 /* The target DID could have been swapped (cable swap)
994 * we should use the ndlp from the findnode if it is
997 if ((!ndlp) && rrq->ndlp)
1003 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
1006 rrq->rrq_stop_time = 0;
1009 mempool_free(rrq, phba->rrq_pool);
1013 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1014 * @phba: Pointer to HBA context object.
1016 * This function is called with hbalock held. This function
1017 * Checks if stop_time (ratov from setting rrq active) has
1018 * been reached, if it has and the send_rrq flag is set then
1019 * it will call lpfc_send_rrq. If the send_rrq flag is not set
1020 * then it will just call the routine to clear the rrq and
1021 * free the rrq resource.
1022 * The timer is set to the next rrq that is going to expire before
1023 * leaving the routine.
1027 lpfc_handle_rrq_active(struct lpfc_hba *phba)
1029 struct lpfc_node_rrq *rrq;
1030 struct lpfc_node_rrq *nextrrq;
1031 unsigned long next_time;
1032 unsigned long iflags;
1033 LIST_HEAD(send_rrq);
1035 spin_lock_irqsave(&phba->hbalock, iflags);
1036 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1037 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1038 list_for_each_entry_safe(rrq, nextrrq,
1039 &phba->active_rrq_list, list) {
1040 if (time_after(jiffies, rrq->rrq_stop_time))
1041 list_move(&rrq->list, &send_rrq);
1042 else if (time_before(rrq->rrq_stop_time, next_time))
1043 next_time = rrq->rrq_stop_time;
1045 spin_unlock_irqrestore(&phba->hbalock, iflags);
1046 if ((!list_empty(&phba->active_rrq_list)) &&
1047 (!(phba->pport->load_flag & FC_UNLOADING)))
1048 mod_timer(&phba->rrq_tmr, next_time);
1049 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1050 list_del(&rrq->list);
1051 if (!rrq->send_rrq) {
1052 /* this call will free the rrq */
1053 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1054 } else if (lpfc_send_rrq(phba, rrq)) {
1055 /* if we send the rrq then the completion handler
1056 * will clear the bit in the xribitmap.
1058 lpfc_clr_rrq_active(phba, rrq->xritag,
1065 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1066 * @vport: Pointer to vport context object.
1067 * @xri: The xri used in the exchange.
1068 * @did: The targets DID for this exchange.
1070 * returns NULL = rrq not found in the phba->active_rrq_list.
1071 * rrq = rrq for this xri and target.
1073 struct lpfc_node_rrq *
1074 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1076 struct lpfc_hba *phba = vport->phba;
1077 struct lpfc_node_rrq *rrq;
1078 struct lpfc_node_rrq *nextrrq;
1079 unsigned long iflags;
1081 if (phba->sli_rev != LPFC_SLI_REV4)
1083 spin_lock_irqsave(&phba->hbalock, iflags);
1084 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1085 if (rrq->vport == vport && rrq->xritag == xri &&
1086 rrq->nlp_DID == did){
1087 list_del(&rrq->list);
1088 spin_unlock_irqrestore(&phba->hbalock, iflags);
1092 spin_unlock_irqrestore(&phba->hbalock, iflags);
1097 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1098 * @vport: Pointer to vport context object.
1099 * @ndlp: Pointer to the lpfc_node_list structure.
1100 * If ndlp is NULL Remove all active RRQs for this vport from the
1101 * phba->active_rrq_list and clear the rrq.
1102 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1105 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1108 struct lpfc_hba *phba = vport->phba;
1109 struct lpfc_node_rrq *rrq;
1110 struct lpfc_node_rrq *nextrrq;
1111 unsigned long iflags;
1112 LIST_HEAD(rrq_list);
1114 if (phba->sli_rev != LPFC_SLI_REV4)
1117 lpfc_sli4_vport_delete_els_xri_aborted(vport);
1118 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1120 spin_lock_irqsave(&phba->hbalock, iflags);
1121 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
1122 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
1123 list_move(&rrq->list, &rrq_list);
1124 spin_unlock_irqrestore(&phba->hbalock, iflags);
1126 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1127 list_del(&rrq->list);
1128 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1133 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1134 * @phba: Pointer to HBA context object.
1135 * @ndlp: Targets nodelist pointer for this exchange.
1136 * @xritag: the xri in the bitmap to test.
1138 * This function returns:
1139 * 0 = rrq not active for this xri
1140 * 1 = rrq is valid for this xri.
1143 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1148 if (!ndlp->active_rrqs_xri_bitmap)
1150 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1157 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1158 * @phba: Pointer to HBA context object.
1159 * @ndlp: nodelist pointer for this target.
1160 * @xritag: xri used in this exchange.
1161 * @rxid: Remote Exchange ID.
1162 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1164 * This function takes the hbalock.
1165 * The active bit is always set in the active rrq xri_bitmap even
1166 * if there is no slot avaiable for the other rrq information.
1168 * returns 0 rrq actived for this xri
1169 * < 0 No memory or invalid ndlp.
1172 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1173 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1175 unsigned long iflags;
1176 struct lpfc_node_rrq *rrq;
1182 if (!phba->cfg_enable_rrq)
1185 spin_lock_irqsave(&phba->hbalock, iflags);
1186 if (phba->pport->load_flag & FC_UNLOADING) {
1187 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1191 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1194 if (!ndlp->active_rrqs_xri_bitmap)
1197 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1200 spin_unlock_irqrestore(&phba->hbalock, iflags);
1201 rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1203 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1204 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1205 " DID:0x%x Send:%d\n",
1206 xritag, rxid, ndlp->nlp_DID, send_rrq);
1209 if (phba->cfg_enable_rrq == 1)
1210 rrq->send_rrq = send_rrq;
1213 rrq->xritag = xritag;
1214 rrq->rrq_stop_time = jiffies +
1215 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1217 rrq->nlp_DID = ndlp->nlp_DID;
1218 rrq->vport = ndlp->vport;
1220 spin_lock_irqsave(&phba->hbalock, iflags);
1221 empty = list_empty(&phba->active_rrq_list);
1222 list_add_tail(&rrq->list, &phba->active_rrq_list);
1223 phba->hba_flag |= HBA_RRQ_ACTIVE;
1225 lpfc_worker_wake_up(phba);
1226 spin_unlock_irqrestore(&phba->hbalock, iflags);
1229 spin_unlock_irqrestore(&phba->hbalock, iflags);
1230 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1231 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1232 " DID:0x%x Send:%d\n",
1233 xritag, rxid, ndlp->nlp_DID, send_rrq);
1238 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1239 * @phba: Pointer to HBA context object.
1240 * @piocbq: Pointer to the iocbq.
1242 * The driver calls this function with either the nvme ls ring lock
1243 * or the fc els ring lock held depending on the iocb usage. This function
1244 * gets a new driver sglq object from the sglq list. If the list is not empty
1245 * then it is successful, it returns pointer to the newly allocated sglq
1246 * object else it returns NULL.
1248 static struct lpfc_sglq *
1249 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1251 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1252 struct lpfc_sglq *sglq = NULL;
1253 struct lpfc_sglq *start_sglq = NULL;
1254 struct lpfc_io_buf *lpfc_cmd;
1255 struct lpfc_nodelist *ndlp;
1256 struct lpfc_sli_ring *pring = NULL;
1259 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1260 pring = phba->sli4_hba.nvmels_wq->pring;
1262 pring = lpfc_phba_elsring(phba);
1264 lockdep_assert_held(&pring->ring_lock);
1266 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1267 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1268 ndlp = lpfc_cmd->rdata->pnode;
1269 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1270 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1271 ndlp = piocbq->context_un.ndlp;
1272 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1273 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1276 ndlp = piocbq->context_un.ndlp;
1278 ndlp = piocbq->context1;
1281 spin_lock(&phba->sli4_hba.sgl_list_lock);
1282 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1287 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1288 test_bit(sglq->sli4_lxritag,
1289 ndlp->active_rrqs_xri_bitmap)) {
1290 /* This xri has an rrq outstanding for this DID.
1291 * put it back in the list and get another xri.
1293 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1295 list_remove_head(lpfc_els_sgl_list, sglq,
1296 struct lpfc_sglq, list);
1297 if (sglq == start_sglq) {
1298 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1306 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1307 sglq->state = SGL_ALLOCATED;
1309 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1314 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1315 * @phba: Pointer to HBA context object.
1316 * @piocbq: Pointer to the iocbq.
1318 * This function is called with the sgl_list lock held. This function
1319 * gets a new driver sglq object from the sglq list. If the
1320 * list is not empty then it is successful, it returns pointer to the newly
1321 * allocated sglq object else it returns NULL.
1324 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1326 struct list_head *lpfc_nvmet_sgl_list;
1327 struct lpfc_sglq *sglq = NULL;
1329 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1331 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1333 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1336 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1337 sglq->state = SGL_ALLOCATED;
1342 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1343 * @phba: Pointer to HBA context object.
1345 * This function is called with no lock held. This function
1346 * allocates a new driver iocb object from the iocb pool. If the
1347 * allocation is successful, it returns pointer to the newly
1348 * allocated iocb object else it returns NULL.
1351 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1353 struct lpfc_iocbq * iocbq = NULL;
1354 unsigned long iflags;
1356 spin_lock_irqsave(&phba->hbalock, iflags);
1357 iocbq = __lpfc_sli_get_iocbq(phba);
1358 spin_unlock_irqrestore(&phba->hbalock, iflags);
1363 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1364 * @phba: Pointer to HBA context object.
1365 * @iocbq: Pointer to driver iocb object.
1367 * This function is called to release the driver iocb object
1368 * to the iocb pool. The iotag in the iocb object
1369 * does not change for each use of the iocb object. This function
1370 * clears all other fields of the iocb object when it is freed.
1371 * The sqlq structure that holds the xritag and phys and virtual
1372 * mappings for the scatter gather list is retrieved from the
1373 * active array of sglq. The get of the sglq pointer also clears
1374 * the entry in the array. If the status of the IO indiactes that
1375 * this IO was aborted then the sglq entry it put on the
1376 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1377 * IO has good status or fails for any other reason then the sglq
1378 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1379 * asserted held in the code path calling this routine.
1382 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1384 struct lpfc_sglq *sglq;
1385 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1386 unsigned long iflag = 0;
1387 struct lpfc_sli_ring *pring;
1389 if (iocbq->sli4_xritag == NO_XRI)
1392 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1396 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1397 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1399 sglq->state = SGL_FREED;
1401 list_add_tail(&sglq->list,
1402 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1403 spin_unlock_irqrestore(
1404 &phba->sli4_hba.sgl_list_lock, iflag);
1408 pring = phba->sli4_hba.els_wq->pring;
1409 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1410 (sglq->state != SGL_XRI_ABORTED)) {
1411 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1414 /* Check if we can get a reference on ndlp */
1415 if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1418 list_add(&sglq->list,
1419 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1420 spin_unlock_irqrestore(
1421 &phba->sli4_hba.sgl_list_lock, iflag);
1423 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1425 sglq->state = SGL_FREED;
1427 list_add_tail(&sglq->list,
1428 &phba->sli4_hba.lpfc_els_sgl_list);
1429 spin_unlock_irqrestore(
1430 &phba->sli4_hba.sgl_list_lock, iflag);
1432 /* Check if TXQ queue needs to be serviced */
1433 if (!list_empty(&pring->txq))
1434 lpfc_worker_wake_up(phba);
1440 * Clean all volatile data fields, preserve iotag and node struct.
1442 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1443 iocbq->sli4_lxritag = NO_XRI;
1444 iocbq->sli4_xritag = NO_XRI;
1445 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1447 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1452 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1453 * @phba: Pointer to HBA context object.
1454 * @iocbq: Pointer to driver iocb object.
1456 * This function is called to release the driver iocb object to the
1457 * iocb pool. The iotag in the iocb object does not change for each
1458 * use of the iocb object. This function clears all other fields of
1459 * the iocb object when it is freed. The hbalock is asserted held in
1460 * the code path calling this routine.
1463 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1465 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1468 * Clean all volatile data fields, preserve iotag and node struct.
1470 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1471 iocbq->sli4_xritag = NO_XRI;
1472 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1476 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1477 * @phba: Pointer to HBA context object.
1478 * @iocbq: Pointer to driver iocb object.
1480 * This function is called with hbalock held to release driver
1481 * iocb object to the iocb pool. The iotag in the iocb object
1482 * does not change for each use of the iocb object. This function
1483 * clears all other fields of the iocb object when it is freed.
1486 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1488 lockdep_assert_held(&phba->hbalock);
1490 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1495 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1496 * @phba: Pointer to HBA context object.
1497 * @iocbq: Pointer to driver iocb object.
1499 * This function is called with no lock held to release the iocb to
1503 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1505 unsigned long iflags;
1508 * Clean all volatile data fields, preserve iotag and node struct.
1510 spin_lock_irqsave(&phba->hbalock, iflags);
1511 __lpfc_sli_release_iocbq(phba, iocbq);
1512 spin_unlock_irqrestore(&phba->hbalock, iflags);
1516 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1517 * @phba: Pointer to HBA context object.
1518 * @iocblist: List of IOCBs.
1519 * @ulpstatus: ULP status in IOCB command field.
1520 * @ulpWord4: ULP word-4 in IOCB command field.
1522 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1523 * on the list by invoking the complete callback function associated with the
1524 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1528 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1529 uint32_t ulpstatus, uint32_t ulpWord4)
1531 struct lpfc_iocbq *piocb;
1533 while (!list_empty(iocblist)) {
1534 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1535 if (piocb->wqe_cmpl) {
1536 if (piocb->iocb_flag & LPFC_IO_NVME)
1537 lpfc_nvme_cancel_iocb(phba, piocb,
1538 ulpstatus, ulpWord4);
1540 lpfc_sli_release_iocbq(phba, piocb);
1542 } else if (piocb->iocb_cmpl) {
1543 piocb->iocb.ulpStatus = ulpstatus;
1544 piocb->iocb.un.ulpWord[4] = ulpWord4;
1545 (piocb->iocb_cmpl) (phba, piocb, piocb);
1547 lpfc_sli_release_iocbq(phba, piocb);
1554 * lpfc_sli_iocb_cmd_type - Get the iocb type
1555 * @iocb_cmnd: iocb command code.
1557 * This function is called by ring event handler function to get the iocb type.
1558 * This function translates the iocb command to an iocb command type used to
1559 * decide the final disposition of each completed IOCB.
1560 * The function returns
1561 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1562 * LPFC_SOL_IOCB if it is a solicited iocb completion
1563 * LPFC_ABORT_IOCB if it is an abort iocb
1564 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1566 * The caller is not required to hold any lock.
1568 static lpfc_iocb_type
1569 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1571 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1573 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1576 switch (iocb_cmnd) {
1577 case CMD_XMIT_SEQUENCE_CR:
1578 case CMD_XMIT_SEQUENCE_CX:
1579 case CMD_XMIT_BCAST_CN:
1580 case CMD_XMIT_BCAST_CX:
1581 case CMD_ELS_REQUEST_CR:
1582 case CMD_ELS_REQUEST_CX:
1583 case CMD_CREATE_XRI_CR:
1584 case CMD_CREATE_XRI_CX:
1585 case CMD_GET_RPI_CN:
1586 case CMD_XMIT_ELS_RSP_CX:
1587 case CMD_GET_RPI_CR:
1588 case CMD_FCP_IWRITE_CR:
1589 case CMD_FCP_IWRITE_CX:
1590 case CMD_FCP_IREAD_CR:
1591 case CMD_FCP_IREAD_CX:
1592 case CMD_FCP_ICMND_CR:
1593 case CMD_FCP_ICMND_CX:
1594 case CMD_FCP_TSEND_CX:
1595 case CMD_FCP_TRSP_CX:
1596 case CMD_FCP_TRECEIVE_CX:
1597 case CMD_FCP_AUTO_TRSP_CX:
1598 case CMD_ADAPTER_MSG:
1599 case CMD_ADAPTER_DUMP:
1600 case CMD_XMIT_SEQUENCE64_CR:
1601 case CMD_XMIT_SEQUENCE64_CX:
1602 case CMD_XMIT_BCAST64_CN:
1603 case CMD_XMIT_BCAST64_CX:
1604 case CMD_ELS_REQUEST64_CR:
1605 case CMD_ELS_REQUEST64_CX:
1606 case CMD_FCP_IWRITE64_CR:
1607 case CMD_FCP_IWRITE64_CX:
1608 case CMD_FCP_IREAD64_CR:
1609 case CMD_FCP_IREAD64_CX:
1610 case CMD_FCP_ICMND64_CR:
1611 case CMD_FCP_ICMND64_CX:
1612 case CMD_FCP_TSEND64_CX:
1613 case CMD_FCP_TRSP64_CX:
1614 case CMD_FCP_TRECEIVE64_CX:
1615 case CMD_GEN_REQUEST64_CR:
1616 case CMD_GEN_REQUEST64_CX:
1617 case CMD_XMIT_ELS_RSP64_CX:
1618 case DSSCMD_IWRITE64_CR:
1619 case DSSCMD_IWRITE64_CX:
1620 case DSSCMD_IREAD64_CR:
1621 case DSSCMD_IREAD64_CX:
1622 case CMD_SEND_FRAME:
1623 type = LPFC_SOL_IOCB;
1625 case CMD_ABORT_XRI_CN:
1626 case CMD_ABORT_XRI_CX:
1627 case CMD_CLOSE_XRI_CN:
1628 case CMD_CLOSE_XRI_CX:
1629 case CMD_XRI_ABORTED_CX:
1630 case CMD_ABORT_MXRI64_CN:
1631 case CMD_XMIT_BLS_RSP64_CX:
1632 type = LPFC_ABORT_IOCB;
1634 case CMD_RCV_SEQUENCE_CX:
1635 case CMD_RCV_ELS_REQ_CX:
1636 case CMD_RCV_SEQUENCE64_CX:
1637 case CMD_RCV_ELS_REQ64_CX:
1638 case CMD_ASYNC_STATUS:
1639 case CMD_IOCB_RCV_SEQ64_CX:
1640 case CMD_IOCB_RCV_ELS64_CX:
1641 case CMD_IOCB_RCV_CONT64_CX:
1642 case CMD_IOCB_RET_XRI64_CX:
1643 type = LPFC_UNSOL_IOCB;
1645 case CMD_IOCB_XMIT_MSEQ64_CR:
1646 case CMD_IOCB_XMIT_MSEQ64_CX:
1647 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1648 case CMD_IOCB_RCV_ELS_LIST64_CX:
1649 case CMD_IOCB_CLOSE_EXTENDED_CN:
1650 case CMD_IOCB_ABORT_EXTENDED_CN:
1651 case CMD_IOCB_RET_HBQE64_CN:
1652 case CMD_IOCB_FCP_IBIDIR64_CR:
1653 case CMD_IOCB_FCP_IBIDIR64_CX:
1654 case CMD_IOCB_FCP_ITASKMGT64_CX:
1655 case CMD_IOCB_LOGENTRY_CN:
1656 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1657 printk("%s - Unhandled SLI-3 Command x%x\n",
1658 __func__, iocb_cmnd);
1659 type = LPFC_UNKNOWN_IOCB;
1662 type = LPFC_UNKNOWN_IOCB;
1670 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1671 * @phba: Pointer to HBA context object.
1673 * This function is called from SLI initialization code
1674 * to configure every ring of the HBA's SLI interface. The
1675 * caller is not required to hold any lock. This function issues
1676 * a config_ring mailbox command for each ring.
1677 * This function returns zero if successful else returns a negative
1681 lpfc_sli_ring_map(struct lpfc_hba *phba)
1683 struct lpfc_sli *psli = &phba->sli;
1688 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1692 phba->link_state = LPFC_INIT_MBX_CMDS;
1693 for (i = 0; i < psli->num_rings; i++) {
1694 lpfc_config_ring(phba, i, pmb);
1695 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1696 if (rc != MBX_SUCCESS) {
1697 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1698 "0446 Adapter failed to init (%d), "
1699 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1701 rc, pmbox->mbxCommand,
1702 pmbox->mbxStatus, i);
1703 phba->link_state = LPFC_HBA_ERROR;
1708 mempool_free(pmb, phba->mbox_mem_pool);
1713 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1714 * @phba: Pointer to HBA context object.
1715 * @pring: Pointer to driver SLI ring object.
1716 * @piocb: Pointer to the driver iocb object.
1718 * The driver calls this function with the hbalock held for SLI3 ports or
1719 * the ring lock held for SLI4 ports. The function adds the
1720 * new iocb to txcmplq of the given ring. This function always returns
1721 * 0. If this function is called for ELS ring, this function checks if
1722 * there is a vport associated with the ELS command. This function also
1723 * starts els_tmofunc timer if this is an ELS command.
1726 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1727 struct lpfc_iocbq *piocb)
1729 if (phba->sli_rev == LPFC_SLI_REV4)
1730 lockdep_assert_held(&pring->ring_lock);
1732 lockdep_assert_held(&phba->hbalock);
1736 list_add_tail(&piocb->list, &pring->txcmplq);
1737 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1738 pring->txcmplq_cnt++;
1740 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1741 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1742 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1743 BUG_ON(!piocb->vport);
1744 if (!(piocb->vport->load_flag & FC_UNLOADING))
1745 mod_timer(&piocb->vport->els_tmofunc,
1747 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1754 * lpfc_sli_ringtx_get - Get first element of the txq
1755 * @phba: Pointer to HBA context object.
1756 * @pring: Pointer to driver SLI ring object.
1758 * This function is called with hbalock held to get next
1759 * iocb in txq of the given ring. If there is any iocb in
1760 * the txq, the function returns first iocb in the list after
1761 * removing the iocb from the list, else it returns NULL.
1764 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1766 struct lpfc_iocbq *cmd_iocb;
1768 lockdep_assert_held(&phba->hbalock);
1770 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1775 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1776 * @phba: Pointer to HBA context object.
1777 * @pring: Pointer to driver SLI ring object.
1779 * This function is called with hbalock held and the caller must post the
1780 * iocb without releasing the lock. If the caller releases the lock,
1781 * iocb slot returned by the function is not guaranteed to be available.
1782 * The function returns pointer to the next available iocb slot if there
1783 * is available slot in the ring, else it returns NULL.
1784 * If the get index of the ring is ahead of the put index, the function
1785 * will post an error attention event to the worker thread to take the
1786 * HBA to offline state.
1789 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1791 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1792 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1794 lockdep_assert_held(&phba->hbalock);
1796 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1797 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1798 pring->sli.sli3.next_cmdidx = 0;
1800 if (unlikely(pring->sli.sli3.local_getidx ==
1801 pring->sli.sli3.next_cmdidx)) {
1803 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1805 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1806 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1807 "0315 Ring %d issue: portCmdGet %d "
1808 "is bigger than cmd ring %d\n",
1810 pring->sli.sli3.local_getidx,
1813 phba->link_state = LPFC_HBA_ERROR;
1815 * All error attention handlers are posted to
1818 phba->work_ha |= HA_ERATT;
1819 phba->work_hs = HS_FFER3;
1821 lpfc_worker_wake_up(phba);
1826 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1830 return lpfc_cmd_iocb(phba, pring);
1834 * lpfc_sli_next_iotag - Get an iotag for the iocb
1835 * @phba: Pointer to HBA context object.
1836 * @iocbq: Pointer to driver iocb object.
1838 * This function gets an iotag for the iocb. If there is no unused iotag and
1839 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1840 * array and assigns a new iotag.
1841 * The function returns the allocated iotag if successful, else returns zero.
1842 * Zero is not a valid iotag.
1843 * The caller is not required to hold any lock.
1846 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1848 struct lpfc_iocbq **new_arr;
1849 struct lpfc_iocbq **old_arr;
1851 struct lpfc_sli *psli = &phba->sli;
1854 spin_lock_irq(&phba->hbalock);
1855 iotag = psli->last_iotag;
1856 if(++iotag < psli->iocbq_lookup_len) {
1857 psli->last_iotag = iotag;
1858 psli->iocbq_lookup[iotag] = iocbq;
1859 spin_unlock_irq(&phba->hbalock);
1860 iocbq->iotag = iotag;
1862 } else if (psli->iocbq_lookup_len < (0xffff
1863 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1864 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1865 spin_unlock_irq(&phba->hbalock);
1866 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1869 spin_lock_irq(&phba->hbalock);
1870 old_arr = psli->iocbq_lookup;
1871 if (new_len <= psli->iocbq_lookup_len) {
1872 /* highly unprobable case */
1874 iotag = psli->last_iotag;
1875 if(++iotag < psli->iocbq_lookup_len) {
1876 psli->last_iotag = iotag;
1877 psli->iocbq_lookup[iotag] = iocbq;
1878 spin_unlock_irq(&phba->hbalock);
1879 iocbq->iotag = iotag;
1882 spin_unlock_irq(&phba->hbalock);
1885 if (psli->iocbq_lookup)
1886 memcpy(new_arr, old_arr,
1887 ((psli->last_iotag + 1) *
1888 sizeof (struct lpfc_iocbq *)));
1889 psli->iocbq_lookup = new_arr;
1890 psli->iocbq_lookup_len = new_len;
1891 psli->last_iotag = iotag;
1892 psli->iocbq_lookup[iotag] = iocbq;
1893 spin_unlock_irq(&phba->hbalock);
1894 iocbq->iotag = iotag;
1899 spin_unlock_irq(&phba->hbalock);
1901 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1902 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1909 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1910 * @phba: Pointer to HBA context object.
1911 * @pring: Pointer to driver SLI ring object.
1912 * @iocb: Pointer to iocb slot in the ring.
1913 * @nextiocb: Pointer to driver iocb object which need to be
1914 * posted to firmware.
1916 * This function is called to post a new iocb to the firmware. This
1917 * function copies the new iocb to ring iocb slot and updates the
1918 * ring pointers. It adds the new iocb to txcmplq if there is
1919 * a completion call back for this iocb else the function will free the
1920 * iocb object. The hbalock is asserted held in the code path calling
1924 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1925 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1930 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1933 if (pring->ringno == LPFC_ELS_RING) {
1934 lpfc_debugfs_slow_ring_trc(phba,
1935 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1936 *(((uint32_t *) &nextiocb->iocb) + 4),
1937 *(((uint32_t *) &nextiocb->iocb) + 6),
1938 *(((uint32_t *) &nextiocb->iocb) + 7));
1942 * Issue iocb command to adapter
1944 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1946 pring->stats.iocb_cmd++;
1949 * If there is no completion routine to call, we can release the
1950 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1951 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1953 if (nextiocb->iocb_cmpl)
1954 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1956 __lpfc_sli_release_iocbq(phba, nextiocb);
1959 * Let the HBA know what IOCB slot will be the next one the
1960 * driver will put a command into.
1962 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1963 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1967 * lpfc_sli_update_full_ring - Update the chip attention register
1968 * @phba: Pointer to HBA context object.
1969 * @pring: Pointer to driver SLI ring object.
1971 * The caller is not required to hold any lock for calling this function.
1972 * This function updates the chip attention bits for the ring to inform firmware
1973 * that there are pending work to be done for this ring and requests an
1974 * interrupt when there is space available in the ring. This function is
1975 * called when the driver is unable to post more iocbs to the ring due
1976 * to unavailability of space in the ring.
1979 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1981 int ringno = pring->ringno;
1983 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1988 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1989 * The HBA will tell us when an IOCB entry is available.
1991 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1992 readl(phba->CAregaddr); /* flush */
1994 pring->stats.iocb_cmd_full++;
1998 * lpfc_sli_update_ring - Update chip attention register
1999 * @phba: Pointer to HBA context object.
2000 * @pring: Pointer to driver SLI ring object.
2002 * This function updates the chip attention register bit for the
2003 * given ring to inform HBA that there is more work to be done
2004 * in this ring. The caller is not required to hold any lock.
2007 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2009 int ringno = pring->ringno;
2012 * Tell the HBA that there is work to do in this ring.
2014 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2016 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2017 readl(phba->CAregaddr); /* flush */
2022 * lpfc_sli_resume_iocb - Process iocbs in the txq
2023 * @phba: Pointer to HBA context object.
2024 * @pring: Pointer to driver SLI ring object.
2026 * This function is called with hbalock held to post pending iocbs
2027 * in the txq to the firmware. This function is called when driver
2028 * detects space available in the ring.
2031 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2034 struct lpfc_iocbq *nextiocb;
2036 lockdep_assert_held(&phba->hbalock);
2040 * (a) there is anything on the txq to send
2042 * (c) link attention events can be processed (fcp ring only)
2043 * (d) IOCB processing is not blocked by the outstanding mbox command.
2046 if (lpfc_is_link_up(phba) &&
2047 (!list_empty(&pring->txq)) &&
2048 (pring->ringno != LPFC_FCP_RING ||
2049 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2051 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2052 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2053 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2056 lpfc_sli_update_ring(phba, pring);
2058 lpfc_sli_update_full_ring(phba, pring);
2065 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
2066 * @phba: Pointer to HBA context object.
2067 * @hbqno: HBQ number.
2069 * This function is called with hbalock held to get the next
2070 * available slot for the given HBQ. If there is free slot
2071 * available for the HBQ it will return pointer to the next available
2072 * HBQ entry else it will return NULL.
2074 static struct lpfc_hbq_entry *
2075 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2077 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2079 lockdep_assert_held(&phba->hbalock);
2081 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2082 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2083 hbqp->next_hbqPutIdx = 0;
2085 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2086 uint32_t raw_index = phba->hbq_get[hbqno];
2087 uint32_t getidx = le32_to_cpu(raw_index);
2089 hbqp->local_hbqGetIdx = getidx;
2091 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2092 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2093 "1802 HBQ %d: local_hbqGetIdx "
2094 "%u is > than hbqp->entry_count %u\n",
2095 hbqno, hbqp->local_hbqGetIdx,
2098 phba->link_state = LPFC_HBA_ERROR;
2102 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2106 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2111 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
2112 * @phba: Pointer to HBA context object.
2114 * This function is called with no lock held to free all the
2115 * hbq buffers while uninitializing the SLI interface. It also
2116 * frees the HBQ buffers returned by the firmware but not yet
2117 * processed by the upper layers.
2120 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2122 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2123 struct hbq_dmabuf *hbq_buf;
2124 unsigned long flags;
2127 hbq_count = lpfc_sli_hbq_count();
2128 /* Return all memory used by all HBQs */
2129 spin_lock_irqsave(&phba->hbalock, flags);
2130 for (i = 0; i < hbq_count; ++i) {
2131 list_for_each_entry_safe(dmabuf, next_dmabuf,
2132 &phba->hbqs[i].hbq_buffer_list, list) {
2133 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2134 list_del(&hbq_buf->dbuf.list);
2135 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2137 phba->hbqs[i].buffer_count = 0;
2140 /* Mark the HBQs not in use */
2141 phba->hbq_in_use = 0;
2142 spin_unlock_irqrestore(&phba->hbalock, flags);
2146 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2147 * @phba: Pointer to HBA context object.
2148 * @hbqno: HBQ number.
2149 * @hbq_buf: Pointer to HBQ buffer.
2151 * This function is called with the hbalock held to post a
2152 * hbq buffer to the firmware. If the function finds an empty
2153 * slot in the HBQ, it will post the buffer. The function will return
2154 * pointer to the hbq entry if it successfully post the buffer
2155 * else it will return NULL.
2158 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2159 struct hbq_dmabuf *hbq_buf)
2161 lockdep_assert_held(&phba->hbalock);
2162 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2166 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2167 * @phba: Pointer to HBA context object.
2168 * @hbqno: HBQ number.
2169 * @hbq_buf: Pointer to HBQ buffer.
2171 * This function is called with the hbalock held to post a hbq buffer to the
2172 * firmware. If the function finds an empty slot in the HBQ, it will post the
2173 * buffer and place it on the hbq_buffer_list. The function will return zero if
2174 * it successfully post the buffer else it will return an error.
2177 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2178 struct hbq_dmabuf *hbq_buf)
2180 struct lpfc_hbq_entry *hbqe;
2181 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2183 lockdep_assert_held(&phba->hbalock);
2184 /* Get next HBQ entry slot to use */
2185 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2187 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2189 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2190 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2191 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2192 hbqe->bde.tus.f.bdeFlags = 0;
2193 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2194 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2196 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2197 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2199 readl(phba->hbq_put + hbqno);
2200 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2207 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2208 * @phba: Pointer to HBA context object.
2209 * @hbqno: HBQ number.
2210 * @hbq_buf: Pointer to HBQ buffer.
2212 * This function is called with the hbalock held to post an RQE to the SLI4
2213 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2214 * the hbq_buffer_list and return zero, otherwise it will return an error.
2217 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2218 struct hbq_dmabuf *hbq_buf)
2221 struct lpfc_rqe hrqe;
2222 struct lpfc_rqe drqe;
2223 struct lpfc_queue *hrq;
2224 struct lpfc_queue *drq;
2226 if (hbqno != LPFC_ELS_HBQ)
2228 hrq = phba->sli4_hba.hdr_rq;
2229 drq = phba->sli4_hba.dat_rq;
2231 lockdep_assert_held(&phba->hbalock);
2232 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2233 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2234 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2235 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2236 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2239 hbq_buf->tag = (rc | (hbqno << 16));
2240 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2244 /* HBQ for ELS and CT traffic. */
2245 static struct lpfc_hbq_init lpfc_els_hbq = {
2250 .ring_mask = (1 << LPFC_ELS_RING),
2257 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2262 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2263 * @phba: Pointer to HBA context object.
2264 * @hbqno: HBQ number.
2265 * @count: Number of HBQ buffers to be posted.
2267 * This function is called with no lock held to post more hbq buffers to the
2268 * given HBQ. The function returns the number of HBQ buffers successfully
2272 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2274 uint32_t i, posted = 0;
2275 unsigned long flags;
2276 struct hbq_dmabuf *hbq_buffer;
2277 LIST_HEAD(hbq_buf_list);
2278 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2281 if ((phba->hbqs[hbqno].buffer_count + count) >
2282 lpfc_hbq_defs[hbqno]->entry_count)
2283 count = lpfc_hbq_defs[hbqno]->entry_count -
2284 phba->hbqs[hbqno].buffer_count;
2287 /* Allocate HBQ entries */
2288 for (i = 0; i < count; i++) {
2289 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2292 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2294 /* Check whether HBQ is still in use */
2295 spin_lock_irqsave(&phba->hbalock, flags);
2296 if (!phba->hbq_in_use)
2298 while (!list_empty(&hbq_buf_list)) {
2299 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2301 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2303 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2304 phba->hbqs[hbqno].buffer_count++;
2307 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2309 spin_unlock_irqrestore(&phba->hbalock, flags);
2312 spin_unlock_irqrestore(&phba->hbalock, flags);
2313 while (!list_empty(&hbq_buf_list)) {
2314 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2316 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2322 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2323 * @phba: Pointer to HBA context object.
2326 * This function posts more buffers to the HBQ. This function
2327 * is called with no lock held. The function returns the number of HBQ entries
2328 * successfully allocated.
2331 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2333 if (phba->sli_rev == LPFC_SLI_REV4)
2336 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2337 lpfc_hbq_defs[qno]->add_count);
2341 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2342 * @phba: Pointer to HBA context object.
2343 * @qno: HBQ queue number.
2345 * This function is called from SLI initialization code path with
2346 * no lock held to post initial HBQ buffers to firmware. The
2347 * function returns the number of HBQ entries successfully allocated.
2350 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2352 if (phba->sli_rev == LPFC_SLI_REV4)
2353 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2354 lpfc_hbq_defs[qno]->entry_count);
2356 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2357 lpfc_hbq_defs[qno]->init_count);
2361 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2363 * This function removes the first hbq buffer on an hbq list and returns a
2364 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2366 static struct hbq_dmabuf *
2367 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2369 struct lpfc_dmabuf *d_buf;
2371 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2374 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2378 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2379 * @phba: Pointer to HBA context object.
2382 * This function removes the first RQ buffer on an RQ buffer list and returns a
2383 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2385 static struct rqb_dmabuf *
2386 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2388 struct lpfc_dmabuf *h_buf;
2389 struct lpfc_rqb *rqbp;
2392 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2393 struct lpfc_dmabuf, list);
2396 rqbp->buffer_count--;
2397 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2401 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2402 * @phba: Pointer to HBA context object.
2403 * @tag: Tag of the hbq buffer.
2405 * This function searches for the hbq buffer associated with the given tag in
2406 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2407 * otherwise it returns NULL.
2409 static struct hbq_dmabuf *
2410 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2412 struct lpfc_dmabuf *d_buf;
2413 struct hbq_dmabuf *hbq_buf;
2417 if (hbqno >= LPFC_MAX_HBQS)
2420 spin_lock_irq(&phba->hbalock);
2421 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2422 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2423 if (hbq_buf->tag == tag) {
2424 spin_unlock_irq(&phba->hbalock);
2428 spin_unlock_irq(&phba->hbalock);
2429 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2430 "1803 Bad hbq tag. Data: x%x x%x\n",
2431 tag, phba->hbqs[tag >> 16].buffer_count);
2436 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2437 * @phba: Pointer to HBA context object.
2438 * @hbq_buffer: Pointer to HBQ buffer.
2440 * This function is called with hbalock. This function gives back
2441 * the hbq buffer to firmware. If the HBQ does not have space to
2442 * post the buffer, it will free the buffer.
2445 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2450 hbqno = hbq_buffer->tag >> 16;
2451 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2452 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2457 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2458 * @mbxCommand: mailbox command code.
2460 * This function is called by the mailbox event handler function to verify
2461 * that the completed mailbox command is a legitimate mailbox command. If the
2462 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2463 * and the mailbox event handler will take the HBA offline.
2466 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2470 switch (mbxCommand) {
2474 case MBX_WRITE_VPARMS:
2475 case MBX_RUN_BIU_DIAG:
2478 case MBX_CONFIG_LINK:
2479 case MBX_CONFIG_RING:
2480 case MBX_RESET_RING:
2481 case MBX_READ_CONFIG:
2482 case MBX_READ_RCONFIG:
2483 case MBX_READ_SPARM:
2484 case MBX_READ_STATUS:
2488 case MBX_READ_LNK_STAT:
2490 case MBX_UNREG_LOGIN:
2492 case MBX_DUMP_MEMORY:
2493 case MBX_DUMP_CONTEXT:
2496 case MBX_UPDATE_CFG:
2498 case MBX_DEL_LD_ENTRY:
2499 case MBX_RUN_PROGRAM:
2501 case MBX_SET_VARIABLE:
2502 case MBX_UNREG_D_ID:
2503 case MBX_KILL_BOARD:
2504 case MBX_CONFIG_FARP:
2507 case MBX_RUN_BIU_DIAG64:
2508 case MBX_CONFIG_PORT:
2509 case MBX_READ_SPARM64:
2510 case MBX_READ_RPI64:
2511 case MBX_REG_LOGIN64:
2512 case MBX_READ_TOPOLOGY:
2515 case MBX_LOAD_EXP_ROM:
2516 case MBX_ASYNCEVT_ENABLE:
2520 case MBX_PORT_CAPABILITIES:
2521 case MBX_PORT_IOV_CONTROL:
2522 case MBX_SLI4_CONFIG:
2523 case MBX_SLI4_REQ_FTRS:
2525 case MBX_UNREG_FCFI:
2530 case MBX_RESUME_RPI:
2531 case MBX_READ_EVENT_LOG_STATUS:
2532 case MBX_READ_EVENT_LOG:
2533 case MBX_SECURITY_MGMT:
2535 case MBX_ACCESS_VDATA:
2546 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2547 * @phba: Pointer to HBA context object.
2548 * @pmboxq: Pointer to mailbox command.
2550 * This is completion handler function for mailbox commands issued from
2551 * lpfc_sli_issue_mbox_wait function. This function is called by the
2552 * mailbox event handler function with no lock held. This function
2553 * will wake up thread waiting on the wait queue pointed by context1
2557 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2559 unsigned long drvr_flag;
2560 struct completion *pmbox_done;
2563 * If pmbox_done is empty, the driver thread gave up waiting and
2564 * continued running.
2566 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2567 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2568 pmbox_done = (struct completion *)pmboxq->context3;
2570 complete(pmbox_done);
2571 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2576 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2578 unsigned long iflags;
2580 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2581 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2582 spin_lock_irqsave(&ndlp->lock, iflags);
2583 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2584 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2585 spin_unlock_irqrestore(&ndlp->lock, iflags);
2587 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2591 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2592 * @phba: Pointer to HBA context object.
2593 * @pmb: Pointer to mailbox object.
2595 * This function is the default mailbox completion handler. It
2596 * frees the memory resources associated with the completed mailbox
2597 * command. If the completed command is a REG_LOGIN mailbox command,
2598 * this function will issue a UREG_LOGIN to re-claim the RPI.
2601 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2603 struct lpfc_vport *vport = pmb->vport;
2604 struct lpfc_dmabuf *mp;
2605 struct lpfc_nodelist *ndlp;
2606 struct Scsi_Host *shost;
2610 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2613 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2618 * If a REG_LOGIN succeeded after node is destroyed or node
2619 * is in re-discovery driver need to cleanup the RPI.
2621 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2622 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2623 !pmb->u.mb.mbxStatus) {
2624 rpi = pmb->u.mb.un.varWords[0];
2625 vpi = pmb->u.mb.un.varRegLogin.vpi;
2626 if (phba->sli_rev == LPFC_SLI_REV4)
2627 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2628 lpfc_unreg_login(phba, vpi, rpi, pmb);
2630 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2631 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2632 if (rc != MBX_NOT_FINISHED)
2636 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2637 !(phba->pport->load_flag & FC_UNLOADING) &&
2638 !pmb->u.mb.mbxStatus) {
2639 shost = lpfc_shost_from_vport(vport);
2640 spin_lock_irq(shost->host_lock);
2641 vport->vpi_state |= LPFC_VPI_REGISTERED;
2642 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2643 spin_unlock_irq(shost->host_lock);
2646 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2647 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2649 pmb->ctx_buf = NULL;
2650 pmb->ctx_ndlp = NULL;
2653 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2654 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2656 /* Check to see if there are any deferred events to process */
2660 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2661 "1438 UNREG cmpl deferred mbox x%x "
2662 "on NPort x%x Data: x%x x%x %px x%x x%x\n",
2663 ndlp->nlp_rpi, ndlp->nlp_DID,
2664 ndlp->nlp_flag, ndlp->nlp_defer_did,
2665 ndlp, vport->load_flag, kref_read(&ndlp->kref));
2667 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2668 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2669 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2670 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2671 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2673 __lpfc_sli_rpi_release(vport, ndlp);
2676 /* The unreg_login mailbox is complete and had a
2677 * reference that has to be released. The PLOGI
2681 pmb->ctx_ndlp = NULL;
2685 /* Check security permission status on INIT_LINK mailbox command */
2686 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2687 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2688 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2689 "2860 SLI authentication is required "
2690 "for INIT_LINK but has not done yet\n");
2692 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2693 lpfc_sli4_mbox_cmd_free(phba, pmb);
2695 mempool_free(pmb, phba->mbox_mem_pool);
2698 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2699 * @phba: Pointer to HBA context object.
2700 * @pmb: Pointer to mailbox object.
2702 * This function is the unreg rpi mailbox completion handler. It
2703 * frees the memory resources associated with the completed mailbox
2704 * command. An additional reference is put on the ndlp to prevent
2705 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2706 * the unreg mailbox command completes, this routine puts the
2711 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2713 struct lpfc_vport *vport = pmb->vport;
2714 struct lpfc_nodelist *ndlp;
2716 ndlp = pmb->ctx_ndlp;
2717 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2718 if (phba->sli_rev == LPFC_SLI_REV4 &&
2719 (bf_get(lpfc_sli_intf_if_type,
2720 &phba->sli4_hba.sli_intf) >=
2721 LPFC_SLI_INTF_IF_TYPE_2)) {
2724 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2725 "0010 UNREG_LOGIN vpi:%x "
2726 "rpi:%x DID:%x defer x%x flg x%x "
2728 vport->vpi, ndlp->nlp_rpi,
2729 ndlp->nlp_DID, ndlp->nlp_defer_did,
2732 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2734 /* Check to see if there are any deferred
2737 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2738 (ndlp->nlp_defer_did !=
2739 NLP_EVT_NOTHING_PENDING)) {
2741 vport, KERN_INFO, LOG_DISCOVERY,
2742 "4111 UNREG cmpl deferred "
2744 "NPort x%x Data: x%x x%px\n",
2745 ndlp->nlp_rpi, ndlp->nlp_DID,
2746 ndlp->nlp_defer_did, ndlp);
2747 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2748 ndlp->nlp_defer_did =
2749 NLP_EVT_NOTHING_PENDING;
2750 lpfc_issue_els_plogi(
2751 vport, ndlp->nlp_DID, 0);
2753 __lpfc_sli_rpi_release(vport, ndlp);
2761 mempool_free(pmb, phba->mbox_mem_pool);
2765 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2766 * @phba: Pointer to HBA context object.
2768 * This function is called with no lock held. This function processes all
2769 * the completed mailbox commands and gives it to upper layers. The interrupt
2770 * service routine processes mailbox completion interrupt and adds completed
2771 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2772 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2773 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2774 * function returns the mailbox commands to the upper layer by calling the
2775 * completion handler function of each mailbox.
2778 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2785 phba->sli.slistat.mbox_event++;
2787 /* Get all completed mailboxe buffers into the cmplq */
2788 spin_lock_irq(&phba->hbalock);
2789 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2790 spin_unlock_irq(&phba->hbalock);
2792 /* Get a Mailbox buffer to setup mailbox commands for callback */
2794 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2800 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2802 lpfc_debugfs_disc_trc(pmb->vport,
2803 LPFC_DISC_TRC_MBOX_VPORT,
2804 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2805 (uint32_t)pmbox->mbxCommand,
2806 pmbox->un.varWords[0],
2807 pmbox->un.varWords[1]);
2810 lpfc_debugfs_disc_trc(phba->pport,
2812 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2813 (uint32_t)pmbox->mbxCommand,
2814 pmbox->un.varWords[0],
2815 pmbox->un.varWords[1]);
2820 * It is a fatal error if unknown mbox command completion.
2822 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2824 /* Unknown mailbox command compl */
2825 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2826 "(%d):0323 Unknown Mailbox command "
2827 "x%x (x%x/x%x) Cmpl\n",
2828 pmb->vport ? pmb->vport->vpi :
2831 lpfc_sli_config_mbox_subsys_get(phba,
2833 lpfc_sli_config_mbox_opcode_get(phba,
2835 phba->link_state = LPFC_HBA_ERROR;
2836 phba->work_hs = HS_FFER3;
2837 lpfc_handle_eratt(phba);
2841 if (pmbox->mbxStatus) {
2842 phba->sli.slistat.mbox_stat_err++;
2843 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2844 /* Mbox cmd cmpl error - RETRYing */
2845 lpfc_printf_log(phba, KERN_INFO,
2847 "(%d):0305 Mbox cmd cmpl "
2848 "error - RETRYing Data: x%x "
2849 "(x%x/x%x) x%x x%x x%x\n",
2850 pmb->vport ? pmb->vport->vpi :
2853 lpfc_sli_config_mbox_subsys_get(phba,
2855 lpfc_sli_config_mbox_opcode_get(phba,
2858 pmbox->un.varWords[0],
2859 pmb->vport ? pmb->vport->port_state :
2860 LPFC_VPORT_UNKNOWN);
2861 pmbox->mbxStatus = 0;
2862 pmbox->mbxOwner = OWN_HOST;
2863 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2864 if (rc != MBX_NOT_FINISHED)
2869 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2870 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2871 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
2872 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2874 pmb->vport ? pmb->vport->vpi : 0,
2876 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2877 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2879 *((uint32_t *) pmbox),
2880 pmbox->un.varWords[0],
2881 pmbox->un.varWords[1],
2882 pmbox->un.varWords[2],
2883 pmbox->un.varWords[3],
2884 pmbox->un.varWords[4],
2885 pmbox->un.varWords[5],
2886 pmbox->un.varWords[6],
2887 pmbox->un.varWords[7],
2888 pmbox->un.varWords[8],
2889 pmbox->un.varWords[9],
2890 pmbox->un.varWords[10]);
2893 pmb->mbox_cmpl(phba,pmb);
2899 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2900 * @phba: Pointer to HBA context object.
2901 * @pring: Pointer to driver SLI ring object.
2904 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2905 * is set in the tag the buffer is posted for a particular exchange,
2906 * the function will return the buffer without replacing the buffer.
2907 * If the buffer is for unsolicited ELS or CT traffic, this function
2908 * returns the buffer and also posts another buffer to the firmware.
2910 static struct lpfc_dmabuf *
2911 lpfc_sli_get_buff(struct lpfc_hba *phba,
2912 struct lpfc_sli_ring *pring,
2915 struct hbq_dmabuf *hbq_entry;
2917 if (tag & QUE_BUFTAG_BIT)
2918 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2919 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2922 return &hbq_entry->dbuf;
2926 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
2927 * containing a NVME LS request.
2928 * @phba: pointer to lpfc hba data structure.
2929 * @piocb: pointer to the iocbq struct representing the sequence starting
2932 * This routine initially validates the NVME LS, validates there is a login
2933 * with the port that sent the LS, and then calls the appropriate nvme host
2934 * or target LS request handler.
2937 lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
2939 struct lpfc_nodelist *ndlp;
2940 struct lpfc_dmabuf *d_buf;
2941 struct hbq_dmabuf *nvmebuf;
2942 struct fc_frame_header *fc_hdr;
2943 struct lpfc_async_xchg_ctx *axchg = NULL;
2944 char *failwhy = NULL;
2945 uint32_t oxid, sid, did, fctl, size;
2948 d_buf = piocb->context2;
2950 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2951 fc_hdr = nvmebuf->hbuf.virt;
2952 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2953 sid = sli4_sid_from_fc_hdr(fc_hdr);
2954 did = sli4_did_from_fc_hdr(fc_hdr);
2955 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
2956 fc_hdr->fh_f_ctl[1] << 8 |
2957 fc_hdr->fh_f_ctl[2]);
2958 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
2960 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
2963 if (phba->pport->load_flag & FC_UNLOADING) {
2964 failwhy = "Driver Unloading";
2965 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
2966 failwhy = "NVME FC4 Disabled";
2967 } else if (!phba->nvmet_support && !phba->pport->localport) {
2968 failwhy = "No Localport";
2969 } else if (phba->nvmet_support && !phba->targetport) {
2970 failwhy = "No Targetport";
2971 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
2972 failwhy = "Bad NVME LS R_CTL";
2973 } else if (unlikely((fctl & 0x00FF0000) !=
2974 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
2975 failwhy = "Bad NVME LS F_CTL";
2977 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
2979 failwhy = "No CTX memory";
2982 if (unlikely(failwhy)) {
2983 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2984 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
2985 sid, oxid, failwhy);
2989 /* validate the source of the LS is logged in */
2990 ndlp = lpfc_findnode_did(phba->pport, sid);
2992 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2993 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2994 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2995 "6216 NVME Unsol rcv: No ndlp: "
2996 "NPort_ID x%x oxid x%x\n",
3007 axchg->state = LPFC_NVME_STE_LS_RCV;
3008 axchg->entry_cnt = 1;
3009 axchg->rqb_buffer = (void *)nvmebuf;
3010 axchg->hdwq = &phba->sli4_hba.hdwq[0];
3011 axchg->payload = nvmebuf->dbuf.virt;
3012 INIT_LIST_HEAD(&axchg->list);
3014 if (phba->nvmet_support) {
3015 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3016 spin_lock_irq(&ndlp->lock);
3017 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3018 ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3019 spin_unlock_irq(&ndlp->lock);
3021 /* This reference is a single occurrence to hold the
3022 * node valid until the nvmet transport calls
3025 if (!lpfc_nlp_get(ndlp))
3028 lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3029 "6206 NVMET unsol ls_req ndlp %p "
3030 "DID x%x xflags x%x refcnt %d\n",
3031 ndlp, ndlp->nlp_DID,
3032 ndlp->fc4_xpt_flags,
3033 kref_read(&ndlp->kref));
3035 spin_unlock_irq(&ndlp->lock);
3038 ret = lpfc_nvme_handle_lsreq(phba, axchg);
3041 /* if zero, LS was successfully handled. If non-zero, LS not handled */
3046 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3047 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3048 "NVMe%s handler failed %d\n",
3050 (phba->nvmet_support) ? "T" : "I", ret);
3052 /* recycle receive buffer */
3053 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3055 /* If start of new exchange, abort it */
3056 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3057 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3064 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3065 * @phba: Pointer to HBA context object.
3066 * @pring: Pointer to driver SLI ring object.
3067 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3068 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3069 * @fch_type: the type for the first frame of the sequence.
3071 * This function is called with no lock held. This function uses the r_ctl and
3072 * type of the received sequence to find the correct callback function to call
3073 * to process the sequence.
3076 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3077 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3084 lpfc_nvme_unsol_ls_handler(phba, saveq);
3090 /* unSolicited Responses */
3091 if (pring->prt[0].profile) {
3092 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3093 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3097 /* We must search, based on rctl / type
3098 for the right routine */
3099 for (i = 0; i < pring->num_mask; i++) {
3100 if ((pring->prt[i].rctl == fch_r_ctl) &&
3101 (pring->prt[i].type == fch_type)) {
3102 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3103 (pring->prt[i].lpfc_sli_rcv_unsol_event)
3104 (phba, pring, saveq);
3112 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
3113 * @phba: Pointer to HBA context object.
3114 * @pring: Pointer to driver SLI ring object.
3115 * @saveq: Pointer to the unsolicited iocb.
3117 * This function is called with no lock held by the ring event handler
3118 * when there is an unsolicited iocb posted to the response ring by the
3119 * firmware. This function gets the buffer associated with the iocbs
3120 * and calls the event handler for the ring. This function handles both
3121 * qring buffers and hbq buffers.
3122 * When the function returns 1 the caller can free the iocb object otherwise
3123 * upper layer functions will free the iocb objects.
3126 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3127 struct lpfc_iocbq *saveq)
3131 uint32_t Rctl, Type;
3132 struct lpfc_iocbq *iocbq;
3133 struct lpfc_dmabuf *dmzbuf;
3135 irsp = &(saveq->iocb);
3137 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3138 if (pring->lpfc_sli_rcv_async_status)
3139 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3141 lpfc_printf_log(phba,
3144 "0316 Ring %d handler: unexpected "
3145 "ASYNC_STATUS iocb received evt_code "
3148 irsp->un.asyncstat.evt_code);
3152 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3153 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3154 if (irsp->ulpBdeCount > 0) {
3155 dmzbuf = lpfc_sli_get_buff(phba, pring,
3156 irsp->un.ulpWord[3]);
3157 lpfc_in_buf_free(phba, dmzbuf);
3160 if (irsp->ulpBdeCount > 1) {
3161 dmzbuf = lpfc_sli_get_buff(phba, pring,
3162 irsp->unsli3.sli3Words[3]);
3163 lpfc_in_buf_free(phba, dmzbuf);
3166 if (irsp->ulpBdeCount > 2) {
3167 dmzbuf = lpfc_sli_get_buff(phba, pring,
3168 irsp->unsli3.sli3Words[7]);
3169 lpfc_in_buf_free(phba, dmzbuf);
3175 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3176 if (irsp->ulpBdeCount != 0) {
3177 saveq->context2 = lpfc_sli_get_buff(phba, pring,
3178 irsp->un.ulpWord[3]);
3179 if (!saveq->context2)
3180 lpfc_printf_log(phba,
3183 "0341 Ring %d Cannot find buffer for "
3184 "an unsolicited iocb. tag 0x%x\n",
3186 irsp->un.ulpWord[3]);
3188 if (irsp->ulpBdeCount == 2) {
3189 saveq->context3 = lpfc_sli_get_buff(phba, pring,
3190 irsp->unsli3.sli3Words[7]);
3191 if (!saveq->context3)
3192 lpfc_printf_log(phba,
3195 "0342 Ring %d Cannot find buffer for an"
3196 " unsolicited iocb. tag 0x%x\n",
3198 irsp->unsli3.sli3Words[7]);
3200 list_for_each_entry(iocbq, &saveq->list, list) {
3201 irsp = &(iocbq->iocb);
3202 if (irsp->ulpBdeCount != 0) {
3203 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
3204 irsp->un.ulpWord[3]);
3205 if (!iocbq->context2)
3206 lpfc_printf_log(phba,
3209 "0343 Ring %d Cannot find "
3210 "buffer for an unsolicited iocb"
3211 ". tag 0x%x\n", pring->ringno,
3212 irsp->un.ulpWord[3]);
3214 if (irsp->ulpBdeCount == 2) {
3215 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
3216 irsp->unsli3.sli3Words[7]);
3217 if (!iocbq->context3)
3218 lpfc_printf_log(phba,
3221 "0344 Ring %d Cannot find "
3222 "buffer for an unsolicited "
3225 irsp->unsli3.sli3Words[7]);
3229 if (irsp->ulpBdeCount != 0 &&
3230 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3231 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3234 /* search continue save q for same XRI */
3235 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3236 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3237 saveq->iocb.unsli3.rcvsli3.ox_id) {
3238 list_add_tail(&saveq->list, &iocbq->list);
3244 list_add_tail(&saveq->clist,
3245 &pring->iocb_continue_saveq);
3246 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3247 list_del_init(&iocbq->clist);
3249 irsp = &(saveq->iocb);
3253 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3254 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3255 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3256 Rctl = FC_RCTL_ELS_REQ;
3259 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3260 Rctl = w5p->hcsw.Rctl;
3261 Type = w5p->hcsw.Type;
3263 /* Firmware Workaround */
3264 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3265 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3266 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3267 Rctl = FC_RCTL_ELS_REQ;
3269 w5p->hcsw.Rctl = Rctl;
3270 w5p->hcsw.Type = Type;
3274 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3275 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3276 "0313 Ring %d handler: unexpected Rctl x%x "
3277 "Type x%x received\n",
3278 pring->ringno, Rctl, Type);
3284 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3285 * @phba: Pointer to HBA context object.
3286 * @pring: Pointer to driver SLI ring object.
3287 * @prspiocb: Pointer to response iocb object.
3289 * This function looks up the iocb_lookup table to get the command iocb
3290 * corresponding to the given response iocb using the iotag of the
3291 * response iocb. The driver calls this function with the hbalock held
3292 * for SLI3 ports or the ring lock held for SLI4 ports.
3293 * This function returns the command iocb object if it finds the command
3294 * iocb else returns NULL.
3296 static struct lpfc_iocbq *
3297 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3298 struct lpfc_sli_ring *pring,
3299 struct lpfc_iocbq *prspiocb)
3301 struct lpfc_iocbq *cmd_iocb = NULL;
3303 spinlock_t *temp_lock = NULL;
3304 unsigned long iflag = 0;
3306 if (phba->sli_rev == LPFC_SLI_REV4)
3307 temp_lock = &pring->ring_lock;
3309 temp_lock = &phba->hbalock;
3311 spin_lock_irqsave(temp_lock, iflag);
3312 iotag = prspiocb->iocb.ulpIoTag;
3314 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3315 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3316 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3317 /* remove from txcmpl queue list */
3318 list_del_init(&cmd_iocb->list);
3319 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3320 pring->txcmplq_cnt--;
3321 spin_unlock_irqrestore(temp_lock, iflag);
3326 spin_unlock_irqrestore(temp_lock, iflag);
3327 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3328 "0317 iotag x%x is out of "
3329 "range: max iotag x%x wd0 x%x\n",
3330 iotag, phba->sli.last_iotag,
3331 *(((uint32_t *) &prspiocb->iocb) + 7));
3336 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3337 * @phba: Pointer to HBA context object.
3338 * @pring: Pointer to driver SLI ring object.
3341 * This function looks up the iocb_lookup table to get the command iocb
3342 * corresponding to the given iotag. The driver calls this function with
3343 * the ring lock held because this function is an SLI4 port only helper.
3344 * This function returns the command iocb object if it finds the command
3345 * iocb else returns NULL.
3347 static struct lpfc_iocbq *
3348 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3349 struct lpfc_sli_ring *pring, uint16_t iotag)
3351 struct lpfc_iocbq *cmd_iocb = NULL;
3352 spinlock_t *temp_lock = NULL;
3353 unsigned long iflag = 0;
3355 if (phba->sli_rev == LPFC_SLI_REV4)
3356 temp_lock = &pring->ring_lock;
3358 temp_lock = &phba->hbalock;
3360 spin_lock_irqsave(temp_lock, iflag);
3361 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3362 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3363 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3364 /* remove from txcmpl queue list */
3365 list_del_init(&cmd_iocb->list);
3366 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3367 pring->txcmplq_cnt--;
3368 spin_unlock_irqrestore(temp_lock, iflag);
3373 spin_unlock_irqrestore(temp_lock, iflag);
3374 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3375 "0372 iotag x%x lookup error: max iotag (x%x) "
3377 iotag, phba->sli.last_iotag,
3378 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3383 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3384 * @phba: Pointer to HBA context object.
3385 * @pring: Pointer to driver SLI ring object.
3386 * @saveq: Pointer to the response iocb to be processed.
3388 * This function is called by the ring event handler for non-fcp
3389 * rings when there is a new response iocb in the response ring.
3390 * The caller is not required to hold any locks. This function
3391 * gets the command iocb associated with the response iocb and
3392 * calls the completion handler for the command iocb. If there
3393 * is no completion handler, the function will free the resources
3394 * associated with command iocb. If the response iocb is for
3395 * an already aborted command iocb, the status of the completion
3396 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3397 * This function always returns 1.
3400 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3401 struct lpfc_iocbq *saveq)
3403 struct lpfc_iocbq *cmdiocbp;
3405 unsigned long iflag;
3407 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3409 if (cmdiocbp->iocb_cmpl) {
3411 * If an ELS command failed send an event to mgmt
3414 if (saveq->iocb.ulpStatus &&
3415 (pring->ringno == LPFC_ELS_RING) &&
3416 (cmdiocbp->iocb.ulpCommand ==
3417 CMD_ELS_REQUEST64_CR))
3418 lpfc_send_els_failure_event(phba,
3422 * Post all ELS completions to the worker thread.
3423 * All other are passed to the completion callback.
3425 if (pring->ringno == LPFC_ELS_RING) {
3426 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3427 (cmdiocbp->iocb_flag &
3428 LPFC_DRIVER_ABORTED)) {
3429 spin_lock_irqsave(&phba->hbalock,
3431 cmdiocbp->iocb_flag &=
3432 ~LPFC_DRIVER_ABORTED;
3433 spin_unlock_irqrestore(&phba->hbalock,
3435 saveq->iocb.ulpStatus =
3436 IOSTAT_LOCAL_REJECT;
3437 saveq->iocb.un.ulpWord[4] =
3440 /* Firmware could still be in progress
3441 * of DMAing payload, so don't free data
3442 * buffer till after a hbeat.
3444 spin_lock_irqsave(&phba->hbalock,
3446 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3447 spin_unlock_irqrestore(&phba->hbalock,
3450 if (phba->sli_rev == LPFC_SLI_REV4) {
3451 if (saveq->iocb_flag &
3452 LPFC_EXCHANGE_BUSY) {
3453 /* Set cmdiocb flag for the
3454 * exchange busy so sgl (xri)
3455 * will not be released until
3456 * the abort xri is received
3460 &phba->hbalock, iflag);
3461 cmdiocbp->iocb_flag |=
3463 spin_unlock_irqrestore(
3464 &phba->hbalock, iflag);
3466 if (cmdiocbp->iocb_flag &
3467 LPFC_DRIVER_ABORTED) {
3469 * Clear LPFC_DRIVER_ABORTED
3470 * bit in case it was driver
3474 &phba->hbalock, iflag);
3475 cmdiocbp->iocb_flag &=
3476 ~LPFC_DRIVER_ABORTED;
3477 spin_unlock_irqrestore(
3478 &phba->hbalock, iflag);
3479 cmdiocbp->iocb.ulpStatus =
3480 IOSTAT_LOCAL_REJECT;
3481 cmdiocbp->iocb.un.ulpWord[4] =
3482 IOERR_ABORT_REQUESTED;
3484 * For SLI4, irsiocb contains
3485 * NO_XRI in sli_xritag, it
3486 * shall not affect releasing
3487 * sgl (xri) process.
3489 saveq->iocb.ulpStatus =
3490 IOSTAT_LOCAL_REJECT;
3491 saveq->iocb.un.ulpWord[4] =
3494 &phba->hbalock, iflag);
3496 LPFC_DELAY_MEM_FREE;
3497 spin_unlock_irqrestore(
3498 &phba->hbalock, iflag);
3502 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3504 lpfc_sli_release_iocbq(phba, cmdiocbp);
3507 * Unknown initiating command based on the response iotag.
3508 * This could be the case on the ELS ring because of
3511 if (pring->ringno != LPFC_ELS_RING) {
3513 * Ring <ringno> handler: unexpected completion IoTag
3516 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3517 "0322 Ring %d handler: "
3518 "unexpected completion IoTag x%x "
3519 "Data: x%x x%x x%x x%x\n",
3521 saveq->iocb.ulpIoTag,
3522 saveq->iocb.ulpStatus,
3523 saveq->iocb.un.ulpWord[4],
3524 saveq->iocb.ulpCommand,
3525 saveq->iocb.ulpContext);
3533 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3534 * @phba: Pointer to HBA context object.
3535 * @pring: Pointer to driver SLI ring object.
3537 * This function is called from the iocb ring event handlers when
3538 * put pointer is ahead of the get pointer for a ring. This function signal
3539 * an error attention condition to the worker thread and the worker
3540 * thread will transition the HBA to offline state.
3543 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3545 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3547 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3548 * rsp ring <portRspMax>
3550 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3551 "0312 Ring %d handler: portRspPut %d "
3552 "is bigger than rsp ring %d\n",
3553 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3554 pring->sli.sli3.numRiocb);
3556 phba->link_state = LPFC_HBA_ERROR;
3559 * All error attention handlers are posted to
3562 phba->work_ha |= HA_ERATT;
3563 phba->work_hs = HS_FFER3;
3565 lpfc_worker_wake_up(phba);
3571 * lpfc_poll_eratt - Error attention polling timer timeout handler
3572 * @t: Context to fetch pointer to address of HBA context object from.
3574 * This function is invoked by the Error Attention polling timer when the
3575 * timer times out. It will check the SLI Error Attention register for
3576 * possible attention events. If so, it will post an Error Attention event
3577 * and wake up worker thread to process it. Otherwise, it will set up the
3578 * Error Attention polling timer for the next poll.
3580 void lpfc_poll_eratt(struct timer_list *t)
3582 struct lpfc_hba *phba;
3584 uint64_t sli_intr, cnt;
3586 phba = from_timer(phba, t, eratt_poll);
3588 /* Here we will also keep track of interrupts per sec of the hba */
3589 sli_intr = phba->sli.slistat.sli_intr;
3591 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3592 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3595 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3597 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3598 do_div(cnt, phba->eratt_poll_interval);
3599 phba->sli.slistat.sli_ips = cnt;
3601 phba->sli.slistat.sli_prev_intr = sli_intr;
3603 /* Check chip HA register for error event */
3604 eratt = lpfc_sli_check_eratt(phba);
3607 /* Tell the worker thread there is work to do */
3608 lpfc_worker_wake_up(phba);
3610 /* Restart the timer for next eratt poll */
3611 mod_timer(&phba->eratt_poll,
3613 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3619 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3620 * @phba: Pointer to HBA context object.
3621 * @pring: Pointer to driver SLI ring object.
3622 * @mask: Host attention register mask for this ring.
3624 * This function is called from the interrupt context when there is a ring
3625 * event for the fcp ring. The caller does not hold any lock.
3626 * The function processes each response iocb in the response ring until it
3627 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3628 * LE bit set. The function will call the completion handler of the command iocb
3629 * if the response iocb indicates a completion for a command iocb or it is
3630 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3631 * function if this is an unsolicited iocb.
3632 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3633 * to check it explicitly.
3636 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3637 struct lpfc_sli_ring *pring, uint32_t mask)
3639 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3640 IOCB_t *irsp = NULL;
3641 IOCB_t *entry = NULL;
3642 struct lpfc_iocbq *cmdiocbq = NULL;
3643 struct lpfc_iocbq rspiocbq;
3645 uint32_t portRspPut, portRspMax;
3647 lpfc_iocb_type type;
3648 unsigned long iflag;
3649 uint32_t rsp_cmpl = 0;
3651 spin_lock_irqsave(&phba->hbalock, iflag);
3652 pring->stats.iocb_event++;
3655 * The next available response entry should never exceed the maximum
3656 * entries. If it does, treat it as an adapter hardware error.
3658 portRspMax = pring->sli.sli3.numRiocb;
3659 portRspPut = le32_to_cpu(pgp->rspPutInx);
3660 if (unlikely(portRspPut >= portRspMax)) {
3661 lpfc_sli_rsp_pointers_error(phba, pring);
3662 spin_unlock_irqrestore(&phba->hbalock, iflag);
3665 if (phba->fcp_ring_in_use) {
3666 spin_unlock_irqrestore(&phba->hbalock, iflag);
3669 phba->fcp_ring_in_use = 1;
3672 while (pring->sli.sli3.rspidx != portRspPut) {
3674 * Fetch an entry off the ring and copy it into a local data
3675 * structure. The copy involves a byte-swap since the
3676 * network byte order and pci byte orders are different.
3678 entry = lpfc_resp_iocb(phba, pring);
3679 phba->last_completion_time = jiffies;
3681 if (++pring->sli.sli3.rspidx >= portRspMax)
3682 pring->sli.sli3.rspidx = 0;
3684 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3685 (uint32_t *) &rspiocbq.iocb,
3686 phba->iocb_rsp_size);
3687 INIT_LIST_HEAD(&(rspiocbq.list));
3688 irsp = &rspiocbq.iocb;
3690 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3691 pring->stats.iocb_rsp++;
3694 if (unlikely(irsp->ulpStatus)) {
3696 * If resource errors reported from HBA, reduce
3697 * queuedepths of the SCSI device.
3699 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3700 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3701 IOERR_NO_RESOURCES)) {
3702 spin_unlock_irqrestore(&phba->hbalock, iflag);
3703 phba->lpfc_rampdown_queue_depth(phba);
3704 spin_lock_irqsave(&phba->hbalock, iflag);
3707 /* Rsp ring <ringno> error: IOCB */
3708 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3709 "0336 Rsp Ring %d error: IOCB Data: "
3710 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3712 irsp->un.ulpWord[0],
3713 irsp->un.ulpWord[1],
3714 irsp->un.ulpWord[2],
3715 irsp->un.ulpWord[3],
3716 irsp->un.ulpWord[4],
3717 irsp->un.ulpWord[5],
3718 *(uint32_t *)&irsp->un1,
3719 *((uint32_t *)&irsp->un1 + 1));
3723 case LPFC_ABORT_IOCB:
3726 * Idle exchange closed via ABTS from port. No iocb
3727 * resources need to be recovered.
3729 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3730 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3731 "0333 IOCB cmd 0x%x"
3732 " processed. Skipping"
3738 spin_unlock_irqrestore(&phba->hbalock, iflag);
3739 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3741 spin_lock_irqsave(&phba->hbalock, iflag);
3742 if (unlikely(!cmdiocbq))
3744 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3745 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3746 if (cmdiocbq->iocb_cmpl) {
3747 spin_unlock_irqrestore(&phba->hbalock, iflag);
3748 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3750 spin_lock_irqsave(&phba->hbalock, iflag);
3753 case LPFC_UNSOL_IOCB:
3754 spin_unlock_irqrestore(&phba->hbalock, iflag);
3755 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3756 spin_lock_irqsave(&phba->hbalock, iflag);
3759 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3760 char adaptermsg[LPFC_MAX_ADPTMSG];
3761 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3762 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3764 dev_warn(&((phba->pcidev)->dev),
3766 phba->brd_no, adaptermsg);
3768 /* Unknown IOCB command */
3769 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3770 "0334 Unknown IOCB command "
3771 "Data: x%x, x%x x%x x%x x%x\n",
3772 type, irsp->ulpCommand,
3781 * The response IOCB has been processed. Update the ring
3782 * pointer in SLIM. If the port response put pointer has not
3783 * been updated, sync the pgp->rspPutInx and fetch the new port
3784 * response put pointer.
3786 writel(pring->sli.sli3.rspidx,
3787 &phba->host_gp[pring->ringno].rspGetInx);
3789 if (pring->sli.sli3.rspidx == portRspPut)
3790 portRspPut = le32_to_cpu(pgp->rspPutInx);
3793 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3794 pring->stats.iocb_rsp_full++;
3795 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3796 writel(status, phba->CAregaddr);
3797 readl(phba->CAregaddr);
3799 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3800 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3801 pring->stats.iocb_cmd_empty++;
3803 /* Force update of the local copy of cmdGetInx */
3804 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3805 lpfc_sli_resume_iocb(phba, pring);
3807 if ((pring->lpfc_sli_cmd_available))
3808 (pring->lpfc_sli_cmd_available) (phba, pring);
3812 phba->fcp_ring_in_use = 0;
3813 spin_unlock_irqrestore(&phba->hbalock, iflag);
3818 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3819 * @phba: Pointer to HBA context object.
3820 * @pring: Pointer to driver SLI ring object.
3821 * @rspiocbp: Pointer to driver response IOCB object.
3823 * This function is called from the worker thread when there is a slow-path
3824 * response IOCB to process. This function chains all the response iocbs until
3825 * seeing the iocb with the LE bit set. The function will call
3826 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3827 * completion of a command iocb. The function will call the
3828 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3829 * The function frees the resources or calls the completion handler if this
3830 * iocb is an abort completion. The function returns NULL when the response
3831 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3832 * this function shall chain the iocb on to the iocb_continueq and return the
3833 * response iocb passed in.
3835 static struct lpfc_iocbq *
3836 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3837 struct lpfc_iocbq *rspiocbp)
3839 struct lpfc_iocbq *saveq;
3840 struct lpfc_iocbq *cmdiocbp;
3841 struct lpfc_iocbq *next_iocb;
3842 IOCB_t *irsp = NULL;
3843 uint32_t free_saveq;
3844 uint8_t iocb_cmd_type;
3845 lpfc_iocb_type type;
3846 unsigned long iflag;
3849 spin_lock_irqsave(&phba->hbalock, iflag);
3850 /* First add the response iocb to the countinueq list */
3851 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3852 pring->iocb_continueq_cnt++;
3854 /* Now, determine whether the list is completed for processing */
3855 irsp = &rspiocbp->iocb;
3858 * By default, the driver expects to free all resources
3859 * associated with this iocb completion.
3862 saveq = list_get_first(&pring->iocb_continueq,
3863 struct lpfc_iocbq, list);
3864 irsp = &(saveq->iocb);
3865 list_del_init(&pring->iocb_continueq);
3866 pring->iocb_continueq_cnt = 0;
3868 pring->stats.iocb_rsp++;
3871 * If resource errors reported from HBA, reduce
3872 * queuedepths of the SCSI device.
3874 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3875 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3876 IOERR_NO_RESOURCES)) {
3877 spin_unlock_irqrestore(&phba->hbalock, iflag);
3878 phba->lpfc_rampdown_queue_depth(phba);
3879 spin_lock_irqsave(&phba->hbalock, iflag);
3882 if (irsp->ulpStatus) {
3883 /* Rsp ring <ringno> error: IOCB */
3884 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3885 "0328 Rsp Ring %d error: "
3890 "x%x x%x x%x x%x\n",
3892 irsp->un.ulpWord[0],
3893 irsp->un.ulpWord[1],
3894 irsp->un.ulpWord[2],
3895 irsp->un.ulpWord[3],
3896 irsp->un.ulpWord[4],
3897 irsp->un.ulpWord[5],
3898 *(((uint32_t *) irsp) + 6),
3899 *(((uint32_t *) irsp) + 7),
3900 *(((uint32_t *) irsp) + 8),
3901 *(((uint32_t *) irsp) + 9),
3902 *(((uint32_t *) irsp) + 10),
3903 *(((uint32_t *) irsp) + 11),
3904 *(((uint32_t *) irsp) + 12),
3905 *(((uint32_t *) irsp) + 13),
3906 *(((uint32_t *) irsp) + 14),
3907 *(((uint32_t *) irsp) + 15));
3911 * Fetch the IOCB command type and call the correct completion
3912 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3913 * get freed back to the lpfc_iocb_list by the discovery
3916 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3917 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3920 spin_unlock_irqrestore(&phba->hbalock, iflag);
3921 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3922 spin_lock_irqsave(&phba->hbalock, iflag);
3925 case LPFC_UNSOL_IOCB:
3926 spin_unlock_irqrestore(&phba->hbalock, iflag);
3927 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3928 spin_lock_irqsave(&phba->hbalock, iflag);
3933 case LPFC_ABORT_IOCB:
3935 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3936 spin_unlock_irqrestore(&phba->hbalock, iflag);
3937 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3939 spin_lock_irqsave(&phba->hbalock, iflag);
3942 /* Call the specified completion routine */
3943 if (cmdiocbp->iocb_cmpl) {
3944 spin_unlock_irqrestore(&phba->hbalock,
3946 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3948 spin_lock_irqsave(&phba->hbalock,
3951 __lpfc_sli_release_iocbq(phba,
3956 case LPFC_UNKNOWN_IOCB:
3957 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3958 char adaptermsg[LPFC_MAX_ADPTMSG];
3959 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3960 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3962 dev_warn(&((phba->pcidev)->dev),
3964 phba->brd_no, adaptermsg);
3966 /* Unknown IOCB command */
3967 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3968 "0335 Unknown IOCB "
3969 "command Data: x%x "
3980 list_for_each_entry_safe(rspiocbp, next_iocb,
3981 &saveq->list, list) {
3982 list_del_init(&rspiocbp->list);
3983 __lpfc_sli_release_iocbq(phba, rspiocbp);
3985 __lpfc_sli_release_iocbq(phba, saveq);
3989 spin_unlock_irqrestore(&phba->hbalock, iflag);
3994 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3995 * @phba: Pointer to HBA context object.
3996 * @pring: Pointer to driver SLI ring object.
3997 * @mask: Host attention register mask for this ring.
3999 * This routine wraps the actual slow_ring event process routine from the
4000 * API jump table function pointer from the lpfc_hba struct.
4003 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4004 struct lpfc_sli_ring *pring, uint32_t mask)
4006 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4010 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
4011 * @phba: Pointer to HBA context object.
4012 * @pring: Pointer to driver SLI ring object.
4013 * @mask: Host attention register mask for this ring.
4015 * This function is called from the worker thread when there is a ring event
4016 * for non-fcp rings. The caller does not hold any lock. The function will
4017 * remove each response iocb in the response ring and calls the handle
4018 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4021 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4022 struct lpfc_sli_ring *pring, uint32_t mask)
4024 struct lpfc_pgp *pgp;
4026 IOCB_t *irsp = NULL;
4027 struct lpfc_iocbq *rspiocbp = NULL;
4028 uint32_t portRspPut, portRspMax;
4029 unsigned long iflag;
4032 pgp = &phba->port_gp[pring->ringno];
4033 spin_lock_irqsave(&phba->hbalock, iflag);
4034 pring->stats.iocb_event++;
4037 * The next available response entry should never exceed the maximum
4038 * entries. If it does, treat it as an adapter hardware error.
4040 portRspMax = pring->sli.sli3.numRiocb;
4041 portRspPut = le32_to_cpu(pgp->rspPutInx);
4042 if (portRspPut >= portRspMax) {
4044 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
4045 * rsp ring <portRspMax>
4047 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4048 "0303 Ring %d handler: portRspPut %d "
4049 "is bigger than rsp ring %d\n",
4050 pring->ringno, portRspPut, portRspMax);
4052 phba->link_state = LPFC_HBA_ERROR;
4053 spin_unlock_irqrestore(&phba->hbalock, iflag);
4055 phba->work_hs = HS_FFER3;
4056 lpfc_handle_eratt(phba);
4062 while (pring->sli.sli3.rspidx != portRspPut) {
4064 * Build a completion list and call the appropriate handler.
4065 * The process is to get the next available response iocb, get
4066 * a free iocb from the list, copy the response data into the
4067 * free iocb, insert to the continuation list, and update the
4068 * next response index to slim. This process makes response
4069 * iocb's in the ring available to DMA as fast as possible but
4070 * pays a penalty for a copy operation. Since the iocb is
4071 * only 32 bytes, this penalty is considered small relative to
4072 * the PCI reads for register values and a slim write. When
4073 * the ulpLe field is set, the entire Command has been
4076 entry = lpfc_resp_iocb(phba, pring);
4078 phba->last_completion_time = jiffies;
4079 rspiocbp = __lpfc_sli_get_iocbq(phba);
4080 if (rspiocbp == NULL) {
4081 printk(KERN_ERR "%s: out of buffers! Failing "
4082 "completion.\n", __func__);
4086 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4087 phba->iocb_rsp_size);
4088 irsp = &rspiocbp->iocb;
4090 if (++pring->sli.sli3.rspidx >= portRspMax)
4091 pring->sli.sli3.rspidx = 0;
4093 if (pring->ringno == LPFC_ELS_RING) {
4094 lpfc_debugfs_slow_ring_trc(phba,
4095 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
4096 *(((uint32_t *) irsp) + 4),
4097 *(((uint32_t *) irsp) + 6),
4098 *(((uint32_t *) irsp) + 7));
4101 writel(pring->sli.sli3.rspidx,
4102 &phba->host_gp[pring->ringno].rspGetInx);
4104 spin_unlock_irqrestore(&phba->hbalock, iflag);
4105 /* Handle the response IOCB */
4106 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4107 spin_lock_irqsave(&phba->hbalock, iflag);
4110 * If the port response put pointer has not been updated, sync
4111 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4112 * response put pointer.
4114 if (pring->sli.sli3.rspidx == portRspPut) {
4115 portRspPut = le32_to_cpu(pgp->rspPutInx);
4117 } /* while (pring->sli.sli3.rspidx != portRspPut) */
4119 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4120 /* At least one response entry has been freed */
4121 pring->stats.iocb_rsp_full++;
4122 /* SET RxRE_RSP in Chip Att register */
4123 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4124 writel(status, phba->CAregaddr);
4125 readl(phba->CAregaddr); /* flush */
4127 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4128 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4129 pring->stats.iocb_cmd_empty++;
4131 /* Force update of the local copy of cmdGetInx */
4132 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4133 lpfc_sli_resume_iocb(phba, pring);
4135 if ((pring->lpfc_sli_cmd_available))
4136 (pring->lpfc_sli_cmd_available) (phba, pring);
4140 spin_unlock_irqrestore(&phba->hbalock, iflag);
4145 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4146 * @phba: Pointer to HBA context object.
4147 * @pring: Pointer to driver SLI ring object.
4148 * @mask: Host attention register mask for this ring.
4150 * This function is called from the worker thread when there is a pending
4151 * ELS response iocb on the driver internal slow-path response iocb worker
4152 * queue. The caller does not hold any lock. The function will remove each
4153 * response iocb from the response worker queue and calls the handle
4154 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4157 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4158 struct lpfc_sli_ring *pring, uint32_t mask)
4160 struct lpfc_iocbq *irspiocbq;
4161 struct hbq_dmabuf *dmabuf;
4162 struct lpfc_cq_event *cq_event;
4163 unsigned long iflag;
4166 spin_lock_irqsave(&phba->hbalock, iflag);
4167 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4168 spin_unlock_irqrestore(&phba->hbalock, iflag);
4169 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4170 /* Get the response iocb from the head of work queue */
4171 spin_lock_irqsave(&phba->hbalock, iflag);
4172 list_remove_head(&phba->sli4_hba.sp_queue_event,
4173 cq_event, struct lpfc_cq_event, list);
4174 spin_unlock_irqrestore(&phba->hbalock, iflag);
4176 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4177 case CQE_CODE_COMPL_WQE:
4178 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4180 /* Translate ELS WCQE to response IOCBQ */
4181 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
4184 lpfc_sli_sp_handle_rspiocb(phba, pring,
4188 case CQE_CODE_RECEIVE:
4189 case CQE_CODE_RECEIVE_V1:
4190 dmabuf = container_of(cq_event, struct hbq_dmabuf,
4192 lpfc_sli4_handle_received_buffer(phba, dmabuf);
4199 /* Limit the number of events to 64 to avoid soft lockups */
4206 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4207 * @phba: Pointer to HBA context object.
4208 * @pring: Pointer to driver SLI ring object.
4210 * This function aborts all iocbs in the given ring and frees all the iocb
4211 * objects in txq. This function issues an abort iocb for all the iocb commands
4212 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4213 * the return of this function. The caller is not required to hold any locks.
4216 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4218 LIST_HEAD(completions);
4219 struct lpfc_iocbq *iocb, *next_iocb;
4221 if (pring->ringno == LPFC_ELS_RING) {
4222 lpfc_fabric_abort_hba(phba);
4225 /* Error everything on txq and txcmplq
4228 if (phba->sli_rev >= LPFC_SLI_REV4) {
4229 spin_lock_irq(&pring->ring_lock);
4230 list_splice_init(&pring->txq, &completions);
4232 spin_unlock_irq(&pring->ring_lock);
4234 spin_lock_irq(&phba->hbalock);
4235 /* Next issue ABTS for everything on the txcmplq */
4236 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4237 lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4238 spin_unlock_irq(&phba->hbalock);
4240 spin_lock_irq(&phba->hbalock);
4241 list_splice_init(&pring->txq, &completions);
4244 /* Next issue ABTS for everything on the txcmplq */
4245 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4246 lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4247 spin_unlock_irq(&phba->hbalock);
4249 /* Make sure HBA is alive */
4250 lpfc_issue_hb_tmo(phba);
4252 /* Cancel all the IOCBs from the completions list */
4253 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4258 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4259 * @phba: Pointer to HBA context object.
4261 * This function aborts all iocbs in FCP rings and frees all the iocb
4262 * objects in txq. This function issues an abort iocb for all the iocb commands
4263 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4264 * the return of this function. The caller is not required to hold any locks.
4267 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4269 struct lpfc_sli *psli = &phba->sli;
4270 struct lpfc_sli_ring *pring;
4273 /* Look on all the FCP Rings for the iotag */
4274 if (phba->sli_rev >= LPFC_SLI_REV4) {
4275 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4276 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4277 lpfc_sli_abort_iocb_ring(phba, pring);
4280 pring = &psli->sli3_ring[LPFC_FCP_RING];
4281 lpfc_sli_abort_iocb_ring(phba, pring);
4286 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4287 * @phba: Pointer to HBA context object.
4289 * This function flushes all iocbs in the IO ring and frees all the iocb
4290 * objects in txq and txcmplq. This function will not issue abort iocbs
4291 * for all the iocb commands in txcmplq, they will just be returned with
4292 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4293 * slot has been permanently disabled.
4296 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4300 struct lpfc_sli *psli = &phba->sli;
4301 struct lpfc_sli_ring *pring;
4303 struct lpfc_iocbq *piocb, *next_iocb;
4305 spin_lock_irq(&phba->hbalock);
4306 if (phba->hba_flag & HBA_IOQ_FLUSH ||
4307 !phba->sli4_hba.hdwq) {
4308 spin_unlock_irq(&phba->hbalock);
4311 /* Indicate the I/O queues are flushed */
4312 phba->hba_flag |= HBA_IOQ_FLUSH;
4313 spin_unlock_irq(&phba->hbalock);
4315 /* Look on all the FCP Rings for the iotag */
4316 if (phba->sli_rev >= LPFC_SLI_REV4) {
4317 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4318 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4320 spin_lock_irq(&pring->ring_lock);
4321 /* Retrieve everything on txq */
4322 list_splice_init(&pring->txq, &txq);
4323 list_for_each_entry_safe(piocb, next_iocb,
4324 &pring->txcmplq, list)
4325 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4326 /* Retrieve everything on the txcmplq */
4327 list_splice_init(&pring->txcmplq, &txcmplq);
4329 pring->txcmplq_cnt = 0;
4330 spin_unlock_irq(&pring->ring_lock);
4333 lpfc_sli_cancel_iocbs(phba, &txq,
4334 IOSTAT_LOCAL_REJECT,
4336 /* Flush the txcmpq */
4337 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4338 IOSTAT_LOCAL_REJECT,
4342 pring = &psli->sli3_ring[LPFC_FCP_RING];
4344 spin_lock_irq(&phba->hbalock);
4345 /* Retrieve everything on txq */
4346 list_splice_init(&pring->txq, &txq);
4347 list_for_each_entry_safe(piocb, next_iocb,
4348 &pring->txcmplq, list)
4349 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4350 /* Retrieve everything on the txcmplq */
4351 list_splice_init(&pring->txcmplq, &txcmplq);
4353 pring->txcmplq_cnt = 0;
4354 spin_unlock_irq(&phba->hbalock);
4357 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4359 /* Flush the txcmpq */
4360 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4366 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4367 * @phba: Pointer to HBA context object.
4368 * @mask: Bit mask to be checked.
4370 * This function reads the host status register and compares
4371 * with the provided bit mask to check if HBA completed
4372 * the restart. This function will wait in a loop for the
4373 * HBA to complete restart. If the HBA does not restart within
4374 * 15 iterations, the function will reset the HBA again. The
4375 * function returns 1 when HBA fail to restart otherwise returns
4379 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4385 /* Read the HBA Host Status Register */
4386 if (lpfc_readl(phba->HSregaddr, &status))
4389 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4392 * Check status register every 100ms for 5 retries, then every
4393 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4394 * every 2.5 sec for 4.
4395 * Break our of the loop if errors occurred during init.
4397 while (((status & mask) != mask) &&
4398 !(status & HS_FFERM) &&
4410 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4411 lpfc_sli_brdrestart(phba);
4413 /* Read the HBA Host Status Register */
4414 if (lpfc_readl(phba->HSregaddr, &status)) {
4420 /* Check to see if any errors occurred during init */
4421 if ((status & HS_FFERM) || (i >= 20)) {
4422 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4423 "2751 Adapter failed to restart, "
4424 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4426 readl(phba->MBslimaddr + 0xa8),
4427 readl(phba->MBslimaddr + 0xac));
4428 phba->link_state = LPFC_HBA_ERROR;
4436 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4437 * @phba: Pointer to HBA context object.
4438 * @mask: Bit mask to be checked.
4440 * This function checks the host status register to check if HBA is
4441 * ready. This function will wait in a loop for the HBA to be ready
4442 * If the HBA is not ready , the function will will reset the HBA PCI
4443 * function again. The function returns 1 when HBA fail to be ready
4444 * otherwise returns zero.
4447 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4452 /* Read the HBA Host Status Register */
4453 status = lpfc_sli4_post_status_check(phba);
4456 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4457 lpfc_sli_brdrestart(phba);
4458 status = lpfc_sli4_post_status_check(phba);
4461 /* Check to see if any errors occurred during init */
4463 phba->link_state = LPFC_HBA_ERROR;
4466 phba->sli4_hba.intr_enable = 0;
4472 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4473 * @phba: Pointer to HBA context object.
4474 * @mask: Bit mask to be checked.
4476 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4477 * from the API jump table function pointer from the lpfc_hba struct.
4480 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4482 return phba->lpfc_sli_brdready(phba, mask);
4485 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4488 * lpfc_reset_barrier - Make HBA ready for HBA reset
4489 * @phba: Pointer to HBA context object.
4491 * This function is called before resetting an HBA. This function is called
4492 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4494 void lpfc_reset_barrier(struct lpfc_hba *phba)
4496 uint32_t __iomem *resp_buf;
4497 uint32_t __iomem *mbox_buf;
4498 volatile uint32_t mbox;
4499 uint32_t hc_copy, ha_copy, resp_data;
4503 lockdep_assert_held(&phba->hbalock);
4505 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4506 if (hdrtype != 0x80 ||
4507 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4508 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4512 * Tell the other part of the chip to suspend temporarily all
4515 resp_buf = phba->MBslimaddr;
4517 /* Disable the error attention */
4518 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4520 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4521 readl(phba->HCregaddr); /* flush */
4522 phba->link_flag |= LS_IGNORE_ERATT;
4524 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4526 if (ha_copy & HA_ERATT) {
4527 /* Clear Chip error bit */
4528 writel(HA_ERATT, phba->HAregaddr);
4529 phba->pport->stopped = 1;
4533 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4534 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4536 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4537 mbox_buf = phba->MBslimaddr;
4538 writel(mbox, mbox_buf);
4540 for (i = 0; i < 50; i++) {
4541 if (lpfc_readl((resp_buf + 1), &resp_data))
4543 if (resp_data != ~(BARRIER_TEST_PATTERN))
4549 if (lpfc_readl((resp_buf + 1), &resp_data))
4551 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4552 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4553 phba->pport->stopped)
4559 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4561 for (i = 0; i < 500; i++) {
4562 if (lpfc_readl(resp_buf, &resp_data))
4564 if (resp_data != mbox)
4573 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4575 if (!(ha_copy & HA_ERATT))
4581 if (readl(phba->HAregaddr) & HA_ERATT) {
4582 writel(HA_ERATT, phba->HAregaddr);
4583 phba->pport->stopped = 1;
4587 phba->link_flag &= ~LS_IGNORE_ERATT;
4588 writel(hc_copy, phba->HCregaddr);
4589 readl(phba->HCregaddr); /* flush */
4593 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4594 * @phba: Pointer to HBA context object.
4596 * This function issues a kill_board mailbox command and waits for
4597 * the error attention interrupt. This function is called for stopping
4598 * the firmware processing. The caller is not required to hold any
4599 * locks. This function calls lpfc_hba_down_post function to free
4600 * any pending commands after the kill. The function will return 1 when it
4601 * fails to kill the board else will return 0.
4604 lpfc_sli_brdkill(struct lpfc_hba *phba)
4606 struct lpfc_sli *psli;
4616 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4617 "0329 Kill HBA Data: x%x x%x\n",
4618 phba->pport->port_state, psli->sli_flag);
4620 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4624 /* Disable the error attention */
4625 spin_lock_irq(&phba->hbalock);
4626 if (lpfc_readl(phba->HCregaddr, &status)) {
4627 spin_unlock_irq(&phba->hbalock);
4628 mempool_free(pmb, phba->mbox_mem_pool);
4631 status &= ~HC_ERINT_ENA;
4632 writel(status, phba->HCregaddr);
4633 readl(phba->HCregaddr); /* flush */
4634 phba->link_flag |= LS_IGNORE_ERATT;
4635 spin_unlock_irq(&phba->hbalock);
4637 lpfc_kill_board(phba, pmb);
4638 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4639 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4641 if (retval != MBX_SUCCESS) {
4642 if (retval != MBX_BUSY)
4643 mempool_free(pmb, phba->mbox_mem_pool);
4644 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4645 "2752 KILL_BOARD command failed retval %d\n",
4647 spin_lock_irq(&phba->hbalock);
4648 phba->link_flag &= ~LS_IGNORE_ERATT;
4649 spin_unlock_irq(&phba->hbalock);
4653 spin_lock_irq(&phba->hbalock);
4654 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4655 spin_unlock_irq(&phba->hbalock);
4657 mempool_free(pmb, phba->mbox_mem_pool);
4659 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4660 * attention every 100ms for 3 seconds. If we don't get ERATT after
4661 * 3 seconds we still set HBA_ERROR state because the status of the
4662 * board is now undefined.
4664 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4666 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4668 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4672 del_timer_sync(&psli->mbox_tmo);
4673 if (ha_copy & HA_ERATT) {
4674 writel(HA_ERATT, phba->HAregaddr);
4675 phba->pport->stopped = 1;
4677 spin_lock_irq(&phba->hbalock);
4678 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4679 psli->mbox_active = NULL;
4680 phba->link_flag &= ~LS_IGNORE_ERATT;
4681 spin_unlock_irq(&phba->hbalock);
4683 lpfc_hba_down_post(phba);
4684 phba->link_state = LPFC_HBA_ERROR;
4686 return ha_copy & HA_ERATT ? 0 : 1;
4690 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4691 * @phba: Pointer to HBA context object.
4693 * This function resets the HBA by writing HC_INITFF to the control
4694 * register. After the HBA resets, this function resets all the iocb ring
4695 * indices. This function disables PCI layer parity checking during
4697 * This function returns 0 always.
4698 * The caller is not required to hold any locks.
4701 lpfc_sli_brdreset(struct lpfc_hba *phba)
4703 struct lpfc_sli *psli;
4704 struct lpfc_sli_ring *pring;
4711 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4712 "0325 Reset HBA Data: x%x x%x\n",
4713 (phba->pport) ? phba->pport->port_state : 0,
4716 /* perform board reset */
4717 phba->fc_eventTag = 0;
4718 phba->link_events = 0;
4719 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4721 phba->pport->fc_myDID = 0;
4722 phba->pport->fc_prevDID = 0;
4725 /* Turn off parity checking and serr during the physical reset */
4726 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4729 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4731 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4733 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4735 /* Now toggle INITFF bit in the Host Control Register */
4736 writel(HC_INITFF, phba->HCregaddr);
4738 readl(phba->HCregaddr); /* flush */
4739 writel(0, phba->HCregaddr);
4740 readl(phba->HCregaddr); /* flush */
4742 /* Restore PCI cmd register */
4743 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4745 /* Initialize relevant SLI info */
4746 for (i = 0; i < psli->num_rings; i++) {
4747 pring = &psli->sli3_ring[i];
4749 pring->sli.sli3.rspidx = 0;
4750 pring->sli.sli3.next_cmdidx = 0;
4751 pring->sli.sli3.local_getidx = 0;
4752 pring->sli.sli3.cmdidx = 0;
4753 pring->missbufcnt = 0;
4756 phba->link_state = LPFC_WARM_START;
4761 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4762 * @phba: Pointer to HBA context object.
4764 * This function resets a SLI4 HBA. This function disables PCI layer parity
4765 * checking during resets the device. The caller is not required to hold
4768 * This function returns 0 on success else returns negative error code.
4771 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4773 struct lpfc_sli *psli = &phba->sli;
4778 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4779 "0295 Reset HBA Data: x%x x%x x%x\n",
4780 phba->pport->port_state, psli->sli_flag,
4783 /* perform board reset */
4784 phba->fc_eventTag = 0;
4785 phba->link_events = 0;
4786 phba->pport->fc_myDID = 0;
4787 phba->pport->fc_prevDID = 0;
4789 spin_lock_irq(&phba->hbalock);
4790 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4791 phba->fcf.fcf_flag = 0;
4792 spin_unlock_irq(&phba->hbalock);
4794 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4795 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4796 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4800 /* Now physically reset the device */
4801 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4802 "0389 Performing PCI function reset!\n");
4804 /* Turn off parity checking and serr during the physical reset */
4805 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4806 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4807 "3205 PCI read Config failed\n");
4811 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4812 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4814 /* Perform FCoE PCI function reset before freeing queue memory */
4815 rc = lpfc_pci_function_reset(phba);
4817 /* Restore PCI cmd register */
4818 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4824 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4825 * @phba: Pointer to HBA context object.
4827 * This function is called in the SLI initialization code path to
4828 * restart the HBA. The caller is not required to hold any lock.
4829 * This function writes MBX_RESTART mailbox command to the SLIM and
4830 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4831 * function to free any pending commands. The function enables
4832 * POST only during the first initialization. The function returns zero.
4833 * The function does not guarantee completion of MBX_RESTART mailbox
4834 * command before the return of this function.
4837 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4840 struct lpfc_sli *psli;
4841 volatile uint32_t word0;
4842 void __iomem *to_slim;
4843 uint32_t hba_aer_enabled;
4845 spin_lock_irq(&phba->hbalock);
4847 /* Take PCIe device Advanced Error Reporting (AER) state */
4848 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4853 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4854 "0337 Restart HBA Data: x%x x%x\n",
4855 (phba->pport) ? phba->pport->port_state : 0,
4859 mb = (MAILBOX_t *) &word0;
4860 mb->mbxCommand = MBX_RESTART;
4863 lpfc_reset_barrier(phba);
4865 to_slim = phba->MBslimaddr;
4866 writel(*(uint32_t *) mb, to_slim);
4867 readl(to_slim); /* flush */
4869 /* Only skip post after fc_ffinit is completed */
4870 if (phba->pport && phba->pport->port_state)
4871 word0 = 1; /* This is really setting up word1 */
4873 word0 = 0; /* This is really setting up word1 */
4874 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4875 writel(*(uint32_t *) mb, to_slim);
4876 readl(to_slim); /* flush */
4878 lpfc_sli_brdreset(phba);
4880 phba->pport->stopped = 0;
4881 phba->link_state = LPFC_INIT_START;
4883 spin_unlock_irq(&phba->hbalock);
4885 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4886 psli->stats_start = ktime_get_seconds();
4888 /* Give the INITFF and Post time to settle. */
4891 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4892 if (hba_aer_enabled)
4893 pci_disable_pcie_error_reporting(phba->pcidev);
4895 lpfc_hba_down_post(phba);
4901 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4902 * @phba: Pointer to HBA context object.
4904 * This function is called in the SLI initialization code path to restart
4905 * a SLI4 HBA. The caller is not required to hold any lock.
4906 * At the end of the function, it calls lpfc_hba_down_post function to
4907 * free any pending commands.
4910 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4912 struct lpfc_sli *psli = &phba->sli;
4913 uint32_t hba_aer_enabled;
4917 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4918 "0296 Restart HBA Data: x%x x%x\n",
4919 phba->pport->port_state, psli->sli_flag);
4921 /* Take PCIe device Advanced Error Reporting (AER) state */
4922 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4924 rc = lpfc_sli4_brdreset(phba);
4926 phba->link_state = LPFC_HBA_ERROR;
4927 goto hba_down_queue;
4930 spin_lock_irq(&phba->hbalock);
4931 phba->pport->stopped = 0;
4932 phba->link_state = LPFC_INIT_START;
4934 spin_unlock_irq(&phba->hbalock);
4936 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4937 psli->stats_start = ktime_get_seconds();
4939 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4940 if (hba_aer_enabled)
4941 pci_disable_pcie_error_reporting(phba->pcidev);
4944 lpfc_hba_down_post(phba);
4945 lpfc_sli4_queue_destroy(phba);
4951 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4952 * @phba: Pointer to HBA context object.
4954 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4955 * API jump table function pointer from the lpfc_hba struct.
4958 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4960 return phba->lpfc_sli_brdrestart(phba);
4964 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4965 * @phba: Pointer to HBA context object.
4967 * This function is called after a HBA restart to wait for successful
4968 * restart of the HBA. Successful restart of the HBA is indicated by
4969 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4970 * iteration, the function will restart the HBA again. The function returns
4971 * zero if HBA successfully restarted else returns negative error code.
4974 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4976 uint32_t status, i = 0;
4978 /* Read the HBA Host Status Register */
4979 if (lpfc_readl(phba->HSregaddr, &status))
4982 /* Check status register to see what current state is */
4984 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4986 /* Check every 10ms for 10 retries, then every 100ms for 90
4987 * retries, then every 1 sec for 50 retires for a total of
4988 * ~60 seconds before reset the board again and check every
4989 * 1 sec for 50 retries. The up to 60 seconds before the
4990 * board ready is required by the Falcon FIPS zeroization
4991 * complete, and any reset the board in between shall cause
4992 * restart of zeroization, further delay the board ready.
4995 /* Adapter failed to init, timeout, status reg
4997 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4998 "0436 Adapter failed to init, "
4999 "timeout, status reg x%x, "
5000 "FW Data: A8 x%x AC x%x\n", status,
5001 readl(phba->MBslimaddr + 0xa8),
5002 readl(phba->MBslimaddr + 0xac));
5003 phba->link_state = LPFC_HBA_ERROR;
5007 /* Check to see if any errors occurred during init */
5008 if (status & HS_FFERM) {
5009 /* ERROR: During chipset initialization */
5010 /* Adapter failed to init, chipset, status reg
5012 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5013 "0437 Adapter failed to init, "
5014 "chipset, status reg x%x, "
5015 "FW Data: A8 x%x AC x%x\n", status,
5016 readl(phba->MBslimaddr + 0xa8),
5017 readl(phba->MBslimaddr + 0xac));
5018 phba->link_state = LPFC_HBA_ERROR;
5031 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5032 lpfc_sli_brdrestart(phba);
5034 /* Read the HBA Host Status Register */
5035 if (lpfc_readl(phba->HSregaddr, &status))
5039 /* Check to see if any errors occurred during init */
5040 if (status & HS_FFERM) {
5041 /* ERROR: During chipset initialization */
5042 /* Adapter failed to init, chipset, status reg <status> */
5043 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5044 "0438 Adapter failed to init, chipset, "
5046 "FW Data: A8 x%x AC x%x\n", status,
5047 readl(phba->MBslimaddr + 0xa8),
5048 readl(phba->MBslimaddr + 0xac));
5049 phba->link_state = LPFC_HBA_ERROR;
5053 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5055 /* Clear all interrupt enable conditions */
5056 writel(0, phba->HCregaddr);
5057 readl(phba->HCregaddr); /* flush */
5059 /* setup host attn register */
5060 writel(0xffffffff, phba->HAregaddr);
5061 readl(phba->HAregaddr); /* flush */
5066 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
5068 * This function calculates and returns the number of HBQs required to be
5072 lpfc_sli_hbq_count(void)
5074 return ARRAY_SIZE(lpfc_hbq_defs);
5078 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
5080 * This function adds the number of hbq entries in every HBQ to get
5081 * the total number of hbq entries required for the HBA and returns
5085 lpfc_sli_hbq_entry_count(void)
5087 int hbq_count = lpfc_sli_hbq_count();
5091 for (i = 0; i < hbq_count; ++i)
5092 count += lpfc_hbq_defs[i]->entry_count;
5097 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
5099 * This function calculates amount of memory required for all hbq entries
5100 * to be configured and returns the total memory required.
5103 lpfc_sli_hbq_size(void)
5105 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5109 * lpfc_sli_hbq_setup - configure and initialize HBQs
5110 * @phba: Pointer to HBA context object.
5112 * This function is called during the SLI initialization to configure
5113 * all the HBQs and post buffers to the HBQ. The caller is not
5114 * required to hold any locks. This function will return zero if successful
5115 * else it will return negative error code.
5118 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5120 int hbq_count = lpfc_sli_hbq_count();
5124 uint32_t hbq_entry_index;
5126 /* Get a Mailbox buffer to setup mailbox
5127 * commands for HBA initialization
5129 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5136 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
5137 phba->link_state = LPFC_INIT_MBX_CMDS;
5138 phba->hbq_in_use = 1;
5140 hbq_entry_index = 0;
5141 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5142 phba->hbqs[hbqno].next_hbqPutIdx = 0;
5143 phba->hbqs[hbqno].hbqPutIdx = 0;
5144 phba->hbqs[hbqno].local_hbqGetIdx = 0;
5145 phba->hbqs[hbqno].entry_count =
5146 lpfc_hbq_defs[hbqno]->entry_count;
5147 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5148 hbq_entry_index, pmb);
5149 hbq_entry_index += phba->hbqs[hbqno].entry_count;
5151 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5152 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
5153 mbxStatus <status>, ring <num> */
5155 lpfc_printf_log(phba, KERN_ERR,
5156 LOG_SLI | LOG_VPORT,
5157 "1805 Adapter failed to init. "
5158 "Data: x%x x%x x%x\n",
5160 pmbox->mbxStatus, hbqno);
5162 phba->link_state = LPFC_HBA_ERROR;
5163 mempool_free(pmb, phba->mbox_mem_pool);
5167 phba->hbq_count = hbq_count;
5169 mempool_free(pmb, phba->mbox_mem_pool);
5171 /* Initially populate or replenish the HBQs */
5172 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5173 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5178 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5179 * @phba: Pointer to HBA context object.
5181 * This function is called during the SLI initialization to configure
5182 * all the HBQs and post buffers to the HBQ. The caller is not
5183 * required to hold any locks. This function will return zero if successful
5184 * else it will return negative error code.
5187 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5189 phba->hbq_in_use = 1;
5191 * Specific case when the MDS diagnostics is enabled and supported.
5192 * The receive buffer count is truncated to manage the incoming
5195 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5196 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5197 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5199 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5200 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5201 phba->hbq_count = 1;
5202 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5203 /* Initially populate or replenish the HBQs */
5208 * lpfc_sli_config_port - Issue config port mailbox command
5209 * @phba: Pointer to HBA context object.
5210 * @sli_mode: sli mode - 2/3
5212 * This function is called by the sli initialization code path
5213 * to issue config_port mailbox command. This function restarts the
5214 * HBA firmware and issues a config_port mailbox command to configure
5215 * the SLI interface in the sli mode specified by sli_mode
5216 * variable. The caller is not required to hold any locks.
5217 * The function returns 0 if successful, else returns negative error
5221 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5224 uint32_t resetcount = 0, rc = 0, done = 0;
5226 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5228 phba->link_state = LPFC_HBA_ERROR;
5232 phba->sli_rev = sli_mode;
5233 while (resetcount < 2 && !done) {
5234 spin_lock_irq(&phba->hbalock);
5235 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5236 spin_unlock_irq(&phba->hbalock);
5237 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5238 lpfc_sli_brdrestart(phba);
5239 rc = lpfc_sli_chipset_init(phba);
5243 spin_lock_irq(&phba->hbalock);
5244 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5245 spin_unlock_irq(&phba->hbalock);
5248 /* Call pre CONFIG_PORT mailbox command initialization. A
5249 * value of 0 means the call was successful. Any other
5250 * nonzero value is a failure, but if ERESTART is returned,
5251 * the driver may reset the HBA and try again.
5253 rc = lpfc_config_port_prep(phba);
5254 if (rc == -ERESTART) {
5255 phba->link_state = LPFC_LINK_UNKNOWN;
5260 phba->link_state = LPFC_INIT_MBX_CMDS;
5261 lpfc_config_port(phba, pmb);
5262 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5263 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5264 LPFC_SLI3_HBQ_ENABLED |
5265 LPFC_SLI3_CRP_ENABLED |
5266 LPFC_SLI3_DSS_ENABLED);
5267 if (rc != MBX_SUCCESS) {
5268 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5269 "0442 Adapter failed to init, mbxCmd x%x "
5270 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5271 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5272 spin_lock_irq(&phba->hbalock);
5273 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5274 spin_unlock_irq(&phba->hbalock);
5277 /* Allow asynchronous mailbox command to go through */
5278 spin_lock_irq(&phba->hbalock);
5279 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5280 spin_unlock_irq(&phba->hbalock);
5283 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5284 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5285 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5286 "3110 Port did not grant ASABT\n");
5291 goto do_prep_failed;
5293 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5294 if (!pmb->u.mb.un.varCfgPort.cMA) {
5296 goto do_prep_failed;
5298 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5299 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5300 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5301 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5302 phba->max_vpi : phba->max_vports;
5306 if (pmb->u.mb.un.varCfgPort.gerbm)
5307 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5308 if (pmb->u.mb.un.varCfgPort.gcrp)
5309 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5311 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5312 phba->port_gp = phba->mbox->us.s3_pgp.port;
5314 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5315 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5316 phba->cfg_enable_bg = 0;
5317 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5318 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5319 "0443 Adapter did not grant "
5324 phba->hbq_get = NULL;
5325 phba->port_gp = phba->mbox->us.s2.port;
5329 mempool_free(pmb, phba->mbox_mem_pool);
5335 * lpfc_sli_hba_setup - SLI initialization function
5336 * @phba: Pointer to HBA context object.
5338 * This function is the main SLI initialization function. This function
5339 * is called by the HBA initialization code, HBA reset code and HBA
5340 * error attention handler code. Caller is not required to hold any
5341 * locks. This function issues config_port mailbox command to configure
5342 * the SLI, setup iocb rings and HBQ rings. In the end the function
5343 * calls the config_port_post function to issue init_link mailbox
5344 * command and to start the discovery. The function will return zero
5345 * if successful, else it will return negative error code.
5348 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5354 /* Enable ISR already does config_port because of config_msi mbx */
5355 if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
5356 rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5359 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
5361 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5363 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5364 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5365 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5367 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5368 "2709 This device supports "
5369 "Advanced Error Reporting (AER)\n");
5370 spin_lock_irq(&phba->hbalock);
5371 phba->hba_flag |= HBA_AER_ENABLED;
5372 spin_unlock_irq(&phba->hbalock);
5374 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5375 "2708 This device does not support "
5376 "Advanced Error Reporting (AER): %d\n",
5378 phba->cfg_aer_support = 0;
5382 if (phba->sli_rev == 3) {
5383 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5384 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5386 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5387 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5388 phba->sli3_options = 0;
5391 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5392 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5393 phba->sli_rev, phba->max_vpi);
5394 rc = lpfc_sli_ring_map(phba);
5397 goto lpfc_sli_hba_setup_error;
5399 /* Initialize VPIs. */
5400 if (phba->sli_rev == LPFC_SLI_REV3) {
5402 * The VPI bitmask and physical ID array are allocated
5403 * and initialized once only - at driver load. A port
5404 * reset doesn't need to reinitialize this memory.
5406 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5407 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5408 phba->vpi_bmask = kcalloc(longs,
5409 sizeof(unsigned long),
5411 if (!phba->vpi_bmask) {
5413 goto lpfc_sli_hba_setup_error;
5416 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5419 if (!phba->vpi_ids) {
5420 kfree(phba->vpi_bmask);
5422 goto lpfc_sli_hba_setup_error;
5424 for (i = 0; i < phba->max_vpi; i++)
5425 phba->vpi_ids[i] = i;
5430 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5431 rc = lpfc_sli_hbq_setup(phba);
5433 goto lpfc_sli_hba_setup_error;
5435 spin_lock_irq(&phba->hbalock);
5436 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5437 spin_unlock_irq(&phba->hbalock);
5439 rc = lpfc_config_port_post(phba);
5441 goto lpfc_sli_hba_setup_error;
5445 lpfc_sli_hba_setup_error:
5446 phba->link_state = LPFC_HBA_ERROR;
5447 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5448 "0445 Firmware initialization failed\n");
5453 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5454 * @phba: Pointer to HBA context object.
5456 * This function issue a dump mailbox command to read config region
5457 * 23 and parse the records in the region and populate driver
5461 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5463 LPFC_MBOXQ_t *mboxq;
5464 struct lpfc_dmabuf *mp;
5465 struct lpfc_mqe *mqe;
5466 uint32_t data_length;
5469 /* Program the default value of vlan_id and fc_map */
5470 phba->valid_vlan = 0;
5471 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5472 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5473 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5475 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5479 mqe = &mboxq->u.mqe;
5480 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5482 goto out_free_mboxq;
5485 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5486 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5488 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5489 "(%d):2571 Mailbox cmd x%x Status x%x "
5490 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5491 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5492 "CQ: x%x x%x x%x x%x\n",
5493 mboxq->vport ? mboxq->vport->vpi : 0,
5494 bf_get(lpfc_mqe_command, mqe),
5495 bf_get(lpfc_mqe_status, mqe),
5496 mqe->un.mb_words[0], mqe->un.mb_words[1],
5497 mqe->un.mb_words[2], mqe->un.mb_words[3],
5498 mqe->un.mb_words[4], mqe->un.mb_words[5],
5499 mqe->un.mb_words[6], mqe->un.mb_words[7],
5500 mqe->un.mb_words[8], mqe->un.mb_words[9],
5501 mqe->un.mb_words[10], mqe->un.mb_words[11],
5502 mqe->un.mb_words[12], mqe->un.mb_words[13],
5503 mqe->un.mb_words[14], mqe->un.mb_words[15],
5504 mqe->un.mb_words[16], mqe->un.mb_words[50],
5506 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5507 mboxq->mcqe.trailer);
5510 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5513 goto out_free_mboxq;
5515 data_length = mqe->un.mb_words[5];
5516 if (data_length > DMP_RGN23_SIZE) {
5517 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5520 goto out_free_mboxq;
5523 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5524 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5529 mempool_free(mboxq, phba->mbox_mem_pool);
5534 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5535 * @phba: pointer to lpfc hba data structure.
5536 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5537 * @vpd: pointer to the memory to hold resulting port vpd data.
5538 * @vpd_size: On input, the number of bytes allocated to @vpd.
5539 * On output, the number of data bytes in @vpd.
5541 * This routine executes a READ_REV SLI4 mailbox command. In
5542 * addition, this routine gets the port vpd data.
5546 * -ENOMEM - could not allocated memory.
5549 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5550 uint8_t *vpd, uint32_t *vpd_size)
5554 struct lpfc_dmabuf *dmabuf;
5555 struct lpfc_mqe *mqe;
5557 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5562 * Get a DMA buffer for the vpd data resulting from the READ_REV
5565 dma_size = *vpd_size;
5566 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5567 &dmabuf->phys, GFP_KERNEL);
5568 if (!dmabuf->virt) {
5574 * The SLI4 implementation of READ_REV conflicts at word1,
5575 * bits 31:16 and SLI4 adds vpd functionality not present
5576 * in SLI3. This code corrects the conflicts.
5578 lpfc_read_rev(phba, mboxq);
5579 mqe = &mboxq->u.mqe;
5580 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5581 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5582 mqe->un.read_rev.word1 &= 0x0000FFFF;
5583 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5584 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5586 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5588 dma_free_coherent(&phba->pcidev->dev, dma_size,
5589 dmabuf->virt, dmabuf->phys);
5595 * The available vpd length cannot be bigger than the
5596 * DMA buffer passed to the port. Catch the less than
5597 * case and update the caller's size.
5599 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5600 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5602 memcpy(vpd, dmabuf->virt, *vpd_size);
5604 dma_free_coherent(&phba->pcidev->dev, dma_size,
5605 dmabuf->virt, dmabuf->phys);
5611 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5612 * @phba: pointer to lpfc hba data structure.
5614 * This routine retrieves SLI4 device physical port name this PCI function
5619 * otherwise - failed to retrieve controller attributes
5622 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5624 LPFC_MBOXQ_t *mboxq;
5625 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5626 struct lpfc_controller_attribute *cntl_attr;
5627 void *virtaddr = NULL;
5628 uint32_t alloclen, reqlen;
5629 uint32_t shdr_status, shdr_add_status;
5630 union lpfc_sli4_cfg_shdr *shdr;
5633 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5637 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5638 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5639 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5640 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5641 LPFC_SLI4_MBX_NEMBED);
5643 if (alloclen < reqlen) {
5644 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5645 "3084 Allocated DMA memory size (%d) is "
5646 "less than the requested DMA memory size "
5647 "(%d)\n", alloclen, reqlen);
5649 goto out_free_mboxq;
5651 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5652 virtaddr = mboxq->sge_array->addr[0];
5653 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5654 shdr = &mbx_cntl_attr->cfg_shdr;
5655 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5656 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5657 if (shdr_status || shdr_add_status || rc) {
5658 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5659 "3085 Mailbox x%x (x%x/x%x) failed, "
5660 "rc:x%x, status:x%x, add_status:x%x\n",
5661 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5662 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5663 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5664 rc, shdr_status, shdr_add_status);
5666 goto out_free_mboxq;
5669 cntl_attr = &mbx_cntl_attr->cntl_attr;
5670 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5671 phba->sli4_hba.lnk_info.lnk_tp =
5672 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5673 phba->sli4_hba.lnk_info.lnk_no =
5674 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5676 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5677 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5678 sizeof(phba->BIOSVersion));
5680 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5681 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5682 phba->sli4_hba.lnk_info.lnk_tp,
5683 phba->sli4_hba.lnk_info.lnk_no,
5686 if (rc != MBX_TIMEOUT) {
5687 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5688 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5690 mempool_free(mboxq, phba->mbox_mem_pool);
5696 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5697 * @phba: pointer to lpfc hba data structure.
5699 * This routine retrieves SLI4 device physical port name this PCI function
5704 * otherwise - failed to retrieve physical port name
5707 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5709 LPFC_MBOXQ_t *mboxq;
5710 struct lpfc_mbx_get_port_name *get_port_name;
5711 uint32_t shdr_status, shdr_add_status;
5712 union lpfc_sli4_cfg_shdr *shdr;
5713 char cport_name = 0;
5716 /* We assume nothing at this point */
5717 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5718 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5720 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5723 /* obtain link type and link number via READ_CONFIG */
5724 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5725 lpfc_sli4_read_config(phba);
5726 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5727 goto retrieve_ppname;
5729 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5730 rc = lpfc_sli4_get_ctl_attr(phba);
5732 goto out_free_mboxq;
5735 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5736 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5737 sizeof(struct lpfc_mbx_get_port_name) -
5738 sizeof(struct lpfc_sli4_cfg_mhdr),
5739 LPFC_SLI4_MBX_EMBED);
5740 get_port_name = &mboxq->u.mqe.un.get_port_name;
5741 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5742 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5743 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5744 phba->sli4_hba.lnk_info.lnk_tp);
5745 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5746 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5747 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5748 if (shdr_status || shdr_add_status || rc) {
5749 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5750 "3087 Mailbox x%x (x%x/x%x) failed: "
5751 "rc:x%x, status:x%x, add_status:x%x\n",
5752 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5753 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5754 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5755 rc, shdr_status, shdr_add_status);
5757 goto out_free_mboxq;
5759 switch (phba->sli4_hba.lnk_info.lnk_no) {
5760 case LPFC_LINK_NUMBER_0:
5761 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5762 &get_port_name->u.response);
5763 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5765 case LPFC_LINK_NUMBER_1:
5766 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5767 &get_port_name->u.response);
5768 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5770 case LPFC_LINK_NUMBER_2:
5771 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5772 &get_port_name->u.response);
5773 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5775 case LPFC_LINK_NUMBER_3:
5776 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5777 &get_port_name->u.response);
5778 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5784 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5785 phba->Port[0] = cport_name;
5786 phba->Port[1] = '\0';
5787 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5788 "3091 SLI get port name: %s\n", phba->Port);
5792 if (rc != MBX_TIMEOUT) {
5793 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5794 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5796 mempool_free(mboxq, phba->mbox_mem_pool);
5802 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5803 * @phba: pointer to lpfc hba data structure.
5805 * This routine is called to explicitly arm the SLI4 device's completion and
5809 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5812 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5813 struct lpfc_sli4_hdw_queue *qp;
5814 struct lpfc_queue *eq;
5816 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5817 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
5818 if (sli4_hba->nvmels_cq)
5819 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5822 if (sli4_hba->hdwq) {
5823 /* Loop thru all Hardware Queues */
5824 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5825 qp = &sli4_hba->hdwq[qidx];
5826 /* ARM the corresponding CQ */
5827 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
5831 /* Loop thru all IRQ vectors */
5832 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5833 eq = sli4_hba->hba_eq_hdl[qidx].eq;
5834 /* ARM the corresponding EQ */
5835 sli4_hba->sli4_write_eq_db(phba, eq,
5836 0, LPFC_QUEUE_REARM);
5840 if (phba->nvmet_support) {
5841 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5842 sli4_hba->sli4_write_cq_db(phba,
5843 sli4_hba->nvmet_cqset[qidx], 0,
5850 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5851 * @phba: Pointer to HBA context object.
5852 * @type: The resource extent type.
5853 * @extnt_count: buffer to hold port available extent count.
5854 * @extnt_size: buffer to hold element count per extent.
5856 * This function calls the port and retrievs the number of available
5857 * extents and their size for a particular extent type.
5859 * Returns: 0 if successful. Nonzero otherwise.
5862 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5863 uint16_t *extnt_count, uint16_t *extnt_size)
5868 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5871 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5875 /* Find out how many extents are available for this resource type */
5876 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5877 sizeof(struct lpfc_sli4_cfg_mhdr));
5878 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5879 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5880 length, LPFC_SLI4_MBX_EMBED);
5882 /* Send an extents count of 0 - the GET doesn't use it. */
5883 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5884 LPFC_SLI4_MBX_EMBED);
5890 if (!phba->sli4_hba.intr_enable)
5891 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5893 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5894 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5901 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5902 if (bf_get(lpfc_mbox_hdr_status,
5903 &rsrc_info->header.cfg_shdr.response)) {
5904 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5905 "2930 Failed to get resource extents "
5906 "Status 0x%x Add'l Status 0x%x\n",
5907 bf_get(lpfc_mbox_hdr_status,
5908 &rsrc_info->header.cfg_shdr.response),
5909 bf_get(lpfc_mbox_hdr_add_status,
5910 &rsrc_info->header.cfg_shdr.response));
5915 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5917 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5920 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5921 "3162 Retrieved extents type-%d from port: count:%d, "
5922 "size:%d\n", type, *extnt_count, *extnt_size);
5925 mempool_free(mbox, phba->mbox_mem_pool);
5930 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5931 * @phba: Pointer to HBA context object.
5932 * @type: The extent type to check.
5934 * This function reads the current available extents from the port and checks
5935 * if the extent count or extent size has changed since the last access.
5936 * Callers use this routine post port reset to understand if there is a
5937 * extent reprovisioning requirement.
5940 * -Error: error indicates problem.
5941 * 1: Extent count or size has changed.
5945 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5947 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5948 uint16_t size_diff, rsrc_ext_size;
5950 struct lpfc_rsrc_blks *rsrc_entry;
5951 struct list_head *rsrc_blk_list = NULL;
5955 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5962 case LPFC_RSC_TYPE_FCOE_RPI:
5963 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5965 case LPFC_RSC_TYPE_FCOE_VPI:
5966 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5968 case LPFC_RSC_TYPE_FCOE_XRI:
5969 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5971 case LPFC_RSC_TYPE_FCOE_VFI:
5972 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5978 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5980 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5984 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5991 * lpfc_sli4_cfg_post_extnts -
5992 * @phba: Pointer to HBA context object.
5993 * @extnt_cnt: number of available extents.
5994 * @type: the extent type (rpi, xri, vfi, vpi).
5995 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5996 * @mbox: pointer to the caller's allocated mailbox structure.
5998 * This function executes the extents allocation request. It also
5999 * takes care of the amount of memory needed to allocate or get the
6000 * allocated extents. It is the caller's responsibility to evaluate
6004 * -Error: Error value describes the condition found.
6008 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6009 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6014 uint32_t alloc_len, mbox_tmo;
6016 /* Calculate the total requested length of the dma memory */
6017 req_len = extnt_cnt * sizeof(uint16_t);
6020 * Calculate the size of an embedded mailbox. The uint32_t
6021 * accounts for extents-specific word.
6023 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6027 * Presume the allocation and response will fit into an embedded
6028 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6030 *emb = LPFC_SLI4_MBX_EMBED;
6031 if (req_len > emb_len) {
6032 req_len = extnt_cnt * sizeof(uint16_t) +
6033 sizeof(union lpfc_sli4_cfg_shdr) +
6035 *emb = LPFC_SLI4_MBX_NEMBED;
6038 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6039 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6041 if (alloc_len < req_len) {
6042 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6043 "2982 Allocated DMA memory size (x%x) is "
6044 "less than the requested DMA memory "
6045 "size (x%x)\n", alloc_len, req_len);
6048 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6052 if (!phba->sli4_hba.intr_enable)
6053 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6055 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6056 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6065 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
6066 * @phba: Pointer to HBA context object.
6067 * @type: The resource extent type to allocate.
6069 * This function allocates the number of elements for the specified
6073 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6076 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6077 uint16_t rsrc_id, rsrc_start, j, k;
6080 unsigned long longs;
6081 unsigned long *bmask;
6082 struct lpfc_rsrc_blks *rsrc_blks;
6085 struct lpfc_id_range *id_array = NULL;
6086 void *virtaddr = NULL;
6087 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6088 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6089 struct list_head *ext_blk_list;
6091 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6097 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
6098 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6099 "3009 No available Resource Extents "
6100 "for resource type 0x%x: Count: 0x%x, "
6101 "Size 0x%x\n", type, rsrc_cnt,
6106 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6107 "2903 Post resource extents type-0x%x: "
6108 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6110 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6114 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6121 * Figure out where the response is located. Then get local pointers
6122 * to the response data. The port does not guarantee to respond to
6123 * all extents counts request so update the local variable with the
6124 * allocated count from the port.
6126 if (emb == LPFC_SLI4_MBX_EMBED) {
6127 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6128 id_array = &rsrc_ext->u.rsp.id[0];
6129 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6131 virtaddr = mbox->sge_array->addr[0];
6132 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6133 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6134 id_array = &n_rsrc->id;
6137 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6138 rsrc_id_cnt = rsrc_cnt * rsrc_size;
6141 * Based on the resource size and count, correct the base and max
6144 length = sizeof(struct lpfc_rsrc_blks);
6146 case LPFC_RSC_TYPE_FCOE_RPI:
6147 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6148 sizeof(unsigned long),
6150 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6154 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6157 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6158 kfree(phba->sli4_hba.rpi_bmask);
6164 * The next_rpi was initialized with the maximum available
6165 * count but the port may allocate a smaller number. Catch
6166 * that case and update the next_rpi.
6168 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6170 /* Initialize local ptrs for common extent processing later. */
6171 bmask = phba->sli4_hba.rpi_bmask;
6172 ids = phba->sli4_hba.rpi_ids;
6173 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6175 case LPFC_RSC_TYPE_FCOE_VPI:
6176 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6178 if (unlikely(!phba->vpi_bmask)) {
6182 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6184 if (unlikely(!phba->vpi_ids)) {
6185 kfree(phba->vpi_bmask);
6190 /* Initialize local ptrs for common extent processing later. */
6191 bmask = phba->vpi_bmask;
6192 ids = phba->vpi_ids;
6193 ext_blk_list = &phba->lpfc_vpi_blk_list;
6195 case LPFC_RSC_TYPE_FCOE_XRI:
6196 phba->sli4_hba.xri_bmask = kcalloc(longs,
6197 sizeof(unsigned long),
6199 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6203 phba->sli4_hba.max_cfg_param.xri_used = 0;
6204 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6207 if (unlikely(!phba->sli4_hba.xri_ids)) {
6208 kfree(phba->sli4_hba.xri_bmask);
6213 /* Initialize local ptrs for common extent processing later. */
6214 bmask = phba->sli4_hba.xri_bmask;
6215 ids = phba->sli4_hba.xri_ids;
6216 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6218 case LPFC_RSC_TYPE_FCOE_VFI:
6219 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6220 sizeof(unsigned long),
6222 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6226 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6229 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6230 kfree(phba->sli4_hba.vfi_bmask);
6235 /* Initialize local ptrs for common extent processing later. */
6236 bmask = phba->sli4_hba.vfi_bmask;
6237 ids = phba->sli4_hba.vfi_ids;
6238 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6241 /* Unsupported Opcode. Fail call. */
6245 ext_blk_list = NULL;
6250 * Complete initializing the extent configuration with the
6251 * allocated ids assigned to this function. The bitmask serves
6252 * as an index into the array and manages the available ids. The
6253 * array just stores the ids communicated to the port via the wqes.
6255 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6257 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6260 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6263 rsrc_blks = kzalloc(length, GFP_KERNEL);
6264 if (unlikely(!rsrc_blks)) {
6270 rsrc_blks->rsrc_start = rsrc_id;
6271 rsrc_blks->rsrc_size = rsrc_size;
6272 list_add_tail(&rsrc_blks->list, ext_blk_list);
6273 rsrc_start = rsrc_id;
6274 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6275 phba->sli4_hba.io_xri_start = rsrc_start +
6276 lpfc_sli4_get_iocb_cnt(phba);
6279 while (rsrc_id < (rsrc_start + rsrc_size)) {
6284 /* Entire word processed. Get next word.*/
6289 lpfc_sli4_mbox_cmd_free(phba, mbox);
6296 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6297 * @phba: Pointer to HBA context object.
6298 * @type: the extent's type.
6300 * This function deallocates all extents of a particular resource type.
6301 * SLI4 does not allow for deallocating a particular extent range. It
6302 * is the caller's responsibility to release all kernel memory resources.
6305 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6308 uint32_t length, mbox_tmo = 0;
6310 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6311 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6313 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6318 * This function sends an embedded mailbox because it only sends the
6319 * the resource type. All extents of this type are released by the
6322 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6323 sizeof(struct lpfc_sli4_cfg_mhdr));
6324 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6325 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6326 length, LPFC_SLI4_MBX_EMBED);
6328 /* Send an extents count of 0 - the dealloc doesn't use it. */
6329 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6330 LPFC_SLI4_MBX_EMBED);
6335 if (!phba->sli4_hba.intr_enable)
6336 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6338 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6339 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6346 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6347 if (bf_get(lpfc_mbox_hdr_status,
6348 &dealloc_rsrc->header.cfg_shdr.response)) {
6349 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6350 "2919 Failed to release resource extents "
6351 "for type %d - Status 0x%x Add'l Status 0x%x. "
6352 "Resource memory not released.\n",
6354 bf_get(lpfc_mbox_hdr_status,
6355 &dealloc_rsrc->header.cfg_shdr.response),
6356 bf_get(lpfc_mbox_hdr_add_status,
6357 &dealloc_rsrc->header.cfg_shdr.response));
6362 /* Release kernel memory resources for the specific type. */
6364 case LPFC_RSC_TYPE_FCOE_VPI:
6365 kfree(phba->vpi_bmask);
6366 kfree(phba->vpi_ids);
6367 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6368 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6369 &phba->lpfc_vpi_blk_list, list) {
6370 list_del_init(&rsrc_blk->list);
6373 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6375 case LPFC_RSC_TYPE_FCOE_XRI:
6376 kfree(phba->sli4_hba.xri_bmask);
6377 kfree(phba->sli4_hba.xri_ids);
6378 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6379 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6380 list_del_init(&rsrc_blk->list);
6384 case LPFC_RSC_TYPE_FCOE_VFI:
6385 kfree(phba->sli4_hba.vfi_bmask);
6386 kfree(phba->sli4_hba.vfi_ids);
6387 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6388 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6389 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6390 list_del_init(&rsrc_blk->list);
6394 case LPFC_RSC_TYPE_FCOE_RPI:
6395 /* RPI bitmask and physical id array are cleaned up earlier. */
6396 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6397 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6398 list_del_init(&rsrc_blk->list);
6406 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6409 mempool_free(mbox, phba->mbox_mem_pool);
6414 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6419 len = sizeof(struct lpfc_mbx_set_feature) -
6420 sizeof(struct lpfc_sli4_cfg_mhdr);
6421 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6422 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6423 LPFC_SLI4_MBX_EMBED);
6426 case LPFC_SET_UE_RECOVERY:
6427 bf_set(lpfc_mbx_set_feature_UER,
6428 &mbox->u.mqe.un.set_feature, 1);
6429 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6430 mbox->u.mqe.un.set_feature.param_len = 8;
6432 case LPFC_SET_MDS_DIAGS:
6433 bf_set(lpfc_mbx_set_feature_mds,
6434 &mbox->u.mqe.un.set_feature, 1);
6435 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6436 &mbox->u.mqe.un.set_feature, 1);
6437 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6438 mbox->u.mqe.un.set_feature.param_len = 8;
6440 case LPFC_SET_DUAL_DUMP:
6441 bf_set(lpfc_mbx_set_feature_dd,
6442 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6443 bf_set(lpfc_mbx_set_feature_ddquery,
6444 &mbox->u.mqe.un.set_feature, 0);
6445 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6446 mbox->u.mqe.un.set_feature.param_len = 4;
6454 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6455 * @phba: Pointer to HBA context object.
6457 * Disable FW logging into host memory on the adapter. To
6458 * be done before reading logs from the host memory.
6461 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6463 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6465 spin_lock_irq(&phba->hbalock);
6466 ras_fwlog->state = INACTIVE;
6467 spin_unlock_irq(&phba->hbalock);
6469 /* Disable FW logging to host memory */
6470 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6471 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6473 /* Wait 10ms for firmware to stop using DMA buffer */
6474 usleep_range(10 * 1000, 20 * 1000);
6478 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6479 * @phba: Pointer to HBA context object.
6481 * This function is called to free memory allocated for RAS FW logging
6482 * support in the driver.
6485 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6487 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6488 struct lpfc_dmabuf *dmabuf, *next;
6490 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6491 list_for_each_entry_safe(dmabuf, next,
6492 &ras_fwlog->fwlog_buff_list,
6494 list_del(&dmabuf->list);
6495 dma_free_coherent(&phba->pcidev->dev,
6496 LPFC_RAS_MAX_ENTRY_SIZE,
6497 dmabuf->virt, dmabuf->phys);
6502 if (ras_fwlog->lwpd.virt) {
6503 dma_free_coherent(&phba->pcidev->dev,
6504 sizeof(uint32_t) * 2,
6505 ras_fwlog->lwpd.virt,
6506 ras_fwlog->lwpd.phys);
6507 ras_fwlog->lwpd.virt = NULL;
6510 spin_lock_irq(&phba->hbalock);
6511 ras_fwlog->state = INACTIVE;
6512 spin_unlock_irq(&phba->hbalock);
6516 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6517 * @phba: Pointer to HBA context object.
6518 * @fwlog_buff_count: Count of buffers to be created.
6520 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6521 * to update FW log is posted to the adapter.
6522 * Buffer count is calculated based on module param ras_fwlog_buffsize
6523 * Size of each buffer posted to FW is 64K.
6527 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6528 uint32_t fwlog_buff_count)
6530 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6531 struct lpfc_dmabuf *dmabuf;
6534 /* Initialize List */
6535 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6537 /* Allocate memory for the LWPD */
6538 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6539 sizeof(uint32_t) * 2,
6540 &ras_fwlog->lwpd.phys,
6542 if (!ras_fwlog->lwpd.virt) {
6543 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6544 "6185 LWPD Memory Alloc Failed\n");
6549 ras_fwlog->fw_buffcount = fwlog_buff_count;
6550 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6551 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6555 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6556 "6186 Memory Alloc failed FW logging");
6560 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6561 LPFC_RAS_MAX_ENTRY_SIZE,
6562 &dmabuf->phys, GFP_KERNEL);
6563 if (!dmabuf->virt) {
6566 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6567 "6187 DMA Alloc Failed FW logging");
6570 dmabuf->buffer_tag = i;
6571 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6576 lpfc_sli4_ras_dma_free(phba);
6582 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6583 * @phba: pointer to lpfc hba data structure.
6584 * @pmb: pointer to the driver internal queue element for mailbox command.
6586 * Completion handler for driver's RAS MBX command to the device.
6589 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6592 union lpfc_sli4_cfg_shdr *shdr;
6593 uint32_t shdr_status, shdr_add_status;
6594 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6598 shdr = (union lpfc_sli4_cfg_shdr *)
6599 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6600 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6601 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6603 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6604 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6605 "6188 FW LOG mailbox "
6606 "completed with status x%x add_status x%x,"
6607 " mbx status x%x\n",
6608 shdr_status, shdr_add_status, mb->mbxStatus);
6610 ras_fwlog->ras_hwsupport = false;
6614 spin_lock_irq(&phba->hbalock);
6615 ras_fwlog->state = ACTIVE;
6616 spin_unlock_irq(&phba->hbalock);
6617 mempool_free(pmb, phba->mbox_mem_pool);
6622 /* Free RAS DMA memory */
6623 lpfc_sli4_ras_dma_free(phba);
6624 mempool_free(pmb, phba->mbox_mem_pool);
6628 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6629 * @phba: pointer to lpfc hba data structure.
6630 * @fwlog_level: Logging verbosity level.
6631 * @fwlog_enable: Enable/Disable logging.
6633 * Initialize memory and post mailbox command to enable FW logging in host
6637 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6638 uint32_t fwlog_level,
6639 uint32_t fwlog_enable)
6641 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6642 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6643 struct lpfc_dmabuf *dmabuf;
6645 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6648 spin_lock_irq(&phba->hbalock);
6649 ras_fwlog->state = INACTIVE;
6650 spin_unlock_irq(&phba->hbalock);
6652 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6653 phba->cfg_ras_fwlog_buffsize);
6654 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6657 * If re-enabling FW logging support use earlier allocated
6658 * DMA buffers while posting MBX command.
6660 if (!ras_fwlog->lwpd.virt) {
6661 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6663 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6664 "6189 FW Log Memory Allocation Failed");
6669 /* Setup Mailbox command */
6670 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6672 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6673 "6190 RAS MBX Alloc Failed");
6678 ras_fwlog->fw_loglevel = fwlog_level;
6679 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6680 sizeof(struct lpfc_sli4_cfg_mhdr));
6682 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6683 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6684 len, LPFC_SLI4_MBX_EMBED);
6686 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6687 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6689 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6690 ras_fwlog->fw_loglevel);
6691 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6692 ras_fwlog->fw_buffcount);
6693 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6694 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6696 /* Update DMA buffer address */
6697 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6698 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6700 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6701 putPaddrLow(dmabuf->phys);
6703 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6704 putPaddrHigh(dmabuf->phys);
6707 /* Update LPWD address */
6708 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6709 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6711 spin_lock_irq(&phba->hbalock);
6712 ras_fwlog->state = REG_INPROGRESS;
6713 spin_unlock_irq(&phba->hbalock);
6714 mbox->vport = phba->pport;
6715 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6717 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6719 if (rc == MBX_NOT_FINISHED) {
6720 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6721 "6191 FW-Log Mailbox failed. "
6722 "status %d mbxStatus : x%x", rc,
6723 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6724 mempool_free(mbox, phba->mbox_mem_pool);
6731 lpfc_sli4_ras_dma_free(phba);
6737 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6738 * @phba: Pointer to HBA context object.
6740 * Check if RAS is supported on the adapter and initialize it.
6743 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6745 /* Check RAS FW Log needs to be enabled or not */
6746 if (lpfc_check_fwlog_support(phba))
6749 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6750 LPFC_RAS_ENABLE_LOGGING);
6754 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6755 * @phba: Pointer to HBA context object.
6757 * This function allocates all SLI4 resource identifiers.
6760 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6762 int i, rc, error = 0;
6763 uint16_t count, base;
6764 unsigned long longs;
6766 if (!phba->sli4_hba.rpi_hdrs_in_use)
6767 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6768 if (phba->sli4_hba.extents_in_use) {
6770 * The port supports resource extents. The XRI, VPI, VFI, RPI
6771 * resource extent count must be read and allocated before
6772 * provisioning the resource id arrays.
6774 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6775 LPFC_IDX_RSRC_RDY) {
6777 * Extent-based resources are set - the driver could
6778 * be in a port reset. Figure out if any corrective
6779 * actions need to be taken.
6781 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6782 LPFC_RSC_TYPE_FCOE_VFI);
6785 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6786 LPFC_RSC_TYPE_FCOE_VPI);
6789 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6790 LPFC_RSC_TYPE_FCOE_XRI);
6793 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6794 LPFC_RSC_TYPE_FCOE_RPI);
6799 * It's possible that the number of resources
6800 * provided to this port instance changed between
6801 * resets. Detect this condition and reallocate
6802 * resources. Otherwise, there is no action.
6805 lpfc_printf_log(phba, KERN_INFO,
6806 LOG_MBOX | LOG_INIT,
6807 "2931 Detected extent resource "
6808 "change. Reallocating all "
6810 rc = lpfc_sli4_dealloc_extent(phba,
6811 LPFC_RSC_TYPE_FCOE_VFI);
6812 rc = lpfc_sli4_dealloc_extent(phba,
6813 LPFC_RSC_TYPE_FCOE_VPI);
6814 rc = lpfc_sli4_dealloc_extent(phba,
6815 LPFC_RSC_TYPE_FCOE_XRI);
6816 rc = lpfc_sli4_dealloc_extent(phba,
6817 LPFC_RSC_TYPE_FCOE_RPI);
6822 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6826 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6830 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6834 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6837 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6842 * The port does not support resource extents. The XRI, VPI,
6843 * VFI, RPI resource ids were determined from READ_CONFIG.
6844 * Just allocate the bitmasks and provision the resource id
6845 * arrays. If a port reset is active, the resources don't
6846 * need any action - just exit.
6848 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6849 LPFC_IDX_RSRC_RDY) {
6850 lpfc_sli4_dealloc_resource_identifiers(phba);
6851 lpfc_sli4_remove_rpis(phba);
6854 count = phba->sli4_hba.max_cfg_param.max_rpi;
6856 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6857 "3279 Invalid provisioning of "
6862 base = phba->sli4_hba.max_cfg_param.rpi_base;
6863 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6864 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6865 sizeof(unsigned long),
6867 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6871 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6873 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6875 goto free_rpi_bmask;
6878 for (i = 0; i < count; i++)
6879 phba->sli4_hba.rpi_ids[i] = base + i;
6882 count = phba->sli4_hba.max_cfg_param.max_vpi;
6884 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6885 "3280 Invalid provisioning of "
6890 base = phba->sli4_hba.max_cfg_param.vpi_base;
6891 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6892 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6894 if (unlikely(!phba->vpi_bmask)) {
6898 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6900 if (unlikely(!phba->vpi_ids)) {
6902 goto free_vpi_bmask;
6905 for (i = 0; i < count; i++)
6906 phba->vpi_ids[i] = base + i;
6909 count = phba->sli4_hba.max_cfg_param.max_xri;
6911 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6912 "3281 Invalid provisioning of "
6917 base = phba->sli4_hba.max_cfg_param.xri_base;
6918 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6919 phba->sli4_hba.xri_bmask = kcalloc(longs,
6920 sizeof(unsigned long),
6922 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6926 phba->sli4_hba.max_cfg_param.xri_used = 0;
6927 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6929 if (unlikely(!phba->sli4_hba.xri_ids)) {
6931 goto free_xri_bmask;
6934 for (i = 0; i < count; i++)
6935 phba->sli4_hba.xri_ids[i] = base + i;
6938 count = phba->sli4_hba.max_cfg_param.max_vfi;
6940 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6941 "3282 Invalid provisioning of "
6946 base = phba->sli4_hba.max_cfg_param.vfi_base;
6947 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6948 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6949 sizeof(unsigned long),
6951 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6955 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6957 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6959 goto free_vfi_bmask;
6962 for (i = 0; i < count; i++)
6963 phba->sli4_hba.vfi_ids[i] = base + i;
6966 * Mark all resources ready. An HBA reset doesn't need
6967 * to reset the initialization.
6969 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6975 kfree(phba->sli4_hba.vfi_bmask);
6976 phba->sli4_hba.vfi_bmask = NULL;
6978 kfree(phba->sli4_hba.xri_ids);
6979 phba->sli4_hba.xri_ids = NULL;
6981 kfree(phba->sli4_hba.xri_bmask);
6982 phba->sli4_hba.xri_bmask = NULL;
6984 kfree(phba->vpi_ids);
6985 phba->vpi_ids = NULL;
6987 kfree(phba->vpi_bmask);
6988 phba->vpi_bmask = NULL;
6990 kfree(phba->sli4_hba.rpi_ids);
6991 phba->sli4_hba.rpi_ids = NULL;
6993 kfree(phba->sli4_hba.rpi_bmask);
6994 phba->sli4_hba.rpi_bmask = NULL;
7000 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
7001 * @phba: Pointer to HBA context object.
7003 * This function allocates the number of elements for the specified
7007 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7009 if (phba->sli4_hba.extents_in_use) {
7010 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7011 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7012 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7013 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7015 kfree(phba->vpi_bmask);
7016 phba->sli4_hba.max_cfg_param.vpi_used = 0;
7017 kfree(phba->vpi_ids);
7018 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7019 kfree(phba->sli4_hba.xri_bmask);
7020 kfree(phba->sli4_hba.xri_ids);
7021 kfree(phba->sli4_hba.vfi_bmask);
7022 kfree(phba->sli4_hba.vfi_ids);
7023 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7024 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7031 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
7032 * @phba: Pointer to HBA context object.
7033 * @type: The resource extent type.
7034 * @extnt_cnt: buffer to hold port extent count response
7035 * @extnt_size: buffer to hold port extent size response.
7037 * This function calls the port to read the host allocated extents
7038 * for a particular type.
7041 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7042 uint16_t *extnt_cnt, uint16_t *extnt_size)
7046 uint16_t curr_blks = 0;
7047 uint32_t req_len, emb_len;
7048 uint32_t alloc_len, mbox_tmo;
7049 struct list_head *blk_list_head;
7050 struct lpfc_rsrc_blks *rsrc_blk;
7052 void *virtaddr = NULL;
7053 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7054 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7055 union lpfc_sli4_cfg_shdr *shdr;
7058 case LPFC_RSC_TYPE_FCOE_VPI:
7059 blk_list_head = &phba->lpfc_vpi_blk_list;
7061 case LPFC_RSC_TYPE_FCOE_XRI:
7062 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7064 case LPFC_RSC_TYPE_FCOE_VFI:
7065 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7067 case LPFC_RSC_TYPE_FCOE_RPI:
7068 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7074 /* Count the number of extents currently allocatd for this type. */
7075 list_for_each_entry(rsrc_blk, blk_list_head, list) {
7076 if (curr_blks == 0) {
7078 * The GET_ALLOCATED mailbox does not return the size,
7079 * just the count. The size should be just the size
7080 * stored in the current allocated block and all sizes
7081 * for an extent type are the same so set the return
7084 *extnt_size = rsrc_blk->rsrc_size;
7090 * Calculate the size of an embedded mailbox. The uint32_t
7091 * accounts for extents-specific word.
7093 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7097 * Presume the allocation and response will fit into an embedded
7098 * mailbox. If not true, reconfigure to a non-embedded mailbox.
7100 emb = LPFC_SLI4_MBX_EMBED;
7102 if (req_len > emb_len) {
7103 req_len = curr_blks * sizeof(uint16_t) +
7104 sizeof(union lpfc_sli4_cfg_shdr) +
7106 emb = LPFC_SLI4_MBX_NEMBED;
7109 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7112 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7114 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7115 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7117 if (alloc_len < req_len) {
7118 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7119 "2983 Allocated DMA memory size (x%x) is "
7120 "less than the requested DMA memory "
7121 "size (x%x)\n", alloc_len, req_len);
7125 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7131 if (!phba->sli4_hba.intr_enable)
7132 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7134 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7135 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7144 * Figure out where the response is located. Then get local pointers
7145 * to the response data. The port does not guarantee to respond to
7146 * all extents counts request so update the local variable with the
7147 * allocated count from the port.
7149 if (emb == LPFC_SLI4_MBX_EMBED) {
7150 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7151 shdr = &rsrc_ext->header.cfg_shdr;
7152 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7154 virtaddr = mbox->sge_array->addr[0];
7155 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7156 shdr = &n_rsrc->cfg_shdr;
7157 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7160 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7161 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7162 "2984 Failed to read allocated resources "
7163 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7165 bf_get(lpfc_mbox_hdr_status, &shdr->response),
7166 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7171 lpfc_sli4_mbox_cmd_free(phba, mbox);
7176 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7177 * @phba: pointer to lpfc hba data structure.
7178 * @sgl_list: linked link of sgl buffers to post
7179 * @cnt: number of linked list buffers
7181 * This routine walks the list of buffers that have been allocated and
7182 * repost them to the port by using SGL block post. This is needed after a
7183 * pci_function_reset/warm_start or start. It attempts to construct blocks
7184 * of buffer sgls which contains contiguous xris and uses the non-embedded
7185 * SGL block post mailbox commands to post them to the port. For single
7186 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7187 * mailbox command for posting.
7189 * Returns: 0 = success, non-zero failure.
7192 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7193 struct list_head *sgl_list, int cnt)
7195 struct lpfc_sglq *sglq_entry = NULL;
7196 struct lpfc_sglq *sglq_entry_next = NULL;
7197 struct lpfc_sglq *sglq_entry_first = NULL;
7198 int status, total_cnt;
7199 int post_cnt = 0, num_posted = 0, block_cnt = 0;
7200 int last_xritag = NO_XRI;
7201 LIST_HEAD(prep_sgl_list);
7202 LIST_HEAD(blck_sgl_list);
7203 LIST_HEAD(allc_sgl_list);
7204 LIST_HEAD(post_sgl_list);
7205 LIST_HEAD(free_sgl_list);
7207 spin_lock_irq(&phba->hbalock);
7208 spin_lock(&phba->sli4_hba.sgl_list_lock);
7209 list_splice_init(sgl_list, &allc_sgl_list);
7210 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7211 spin_unlock_irq(&phba->hbalock);
7214 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7215 &allc_sgl_list, list) {
7216 list_del_init(&sglq_entry->list);
7218 if ((last_xritag != NO_XRI) &&
7219 (sglq_entry->sli4_xritag != last_xritag + 1)) {
7220 /* a hole in xri block, form a sgl posting block */
7221 list_splice_init(&prep_sgl_list, &blck_sgl_list);
7222 post_cnt = block_cnt - 1;
7223 /* prepare list for next posting block */
7224 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7227 /* prepare list for next posting block */
7228 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7229 /* enough sgls for non-embed sgl mbox command */
7230 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7231 list_splice_init(&prep_sgl_list,
7233 post_cnt = block_cnt;
7239 /* keep track of last sgl's xritag */
7240 last_xritag = sglq_entry->sli4_xritag;
7242 /* end of repost sgl list condition for buffers */
7243 if (num_posted == total_cnt) {
7244 if (post_cnt == 0) {
7245 list_splice_init(&prep_sgl_list,
7247 post_cnt = block_cnt;
7248 } else if (block_cnt == 1) {
7249 status = lpfc_sli4_post_sgl(phba,
7250 sglq_entry->phys, 0,
7251 sglq_entry->sli4_xritag);
7253 /* successful, put sgl to posted list */
7254 list_add_tail(&sglq_entry->list,
7257 /* Failure, put sgl to free list */
7258 lpfc_printf_log(phba, KERN_WARNING,
7260 "3159 Failed to post "
7261 "sgl, xritag:x%x\n",
7262 sglq_entry->sli4_xritag);
7263 list_add_tail(&sglq_entry->list,
7270 /* continue until a nembed page worth of sgls */
7274 /* post the buffer list sgls as a block */
7275 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7279 /* success, put sgl list to posted sgl list */
7280 list_splice_init(&blck_sgl_list, &post_sgl_list);
7282 /* Failure, put sgl list to free sgl list */
7283 sglq_entry_first = list_first_entry(&blck_sgl_list,
7286 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7287 "3160 Failed to post sgl-list, "
7289 sglq_entry_first->sli4_xritag,
7290 (sglq_entry_first->sli4_xritag +
7292 list_splice_init(&blck_sgl_list, &free_sgl_list);
7293 total_cnt -= post_cnt;
7296 /* don't reset xirtag due to hole in xri block */
7298 last_xritag = NO_XRI;
7300 /* reset sgl post count for next round of posting */
7304 /* free the sgls failed to post */
7305 lpfc_free_sgl_list(phba, &free_sgl_list);
7307 /* push sgls posted to the available list */
7308 if (!list_empty(&post_sgl_list)) {
7309 spin_lock_irq(&phba->hbalock);
7310 spin_lock(&phba->sli4_hba.sgl_list_lock);
7311 list_splice_init(&post_sgl_list, sgl_list);
7312 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7313 spin_unlock_irq(&phba->hbalock);
7315 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7316 "3161 Failure to post sgl to port.\n");
7320 /* return the number of XRIs actually posted */
7325 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7326 * @phba: pointer to lpfc hba data structure.
7328 * This routine walks the list of nvme buffers that have been allocated and
7329 * repost them to the port by using SGL block post. This is needed after a
7330 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7331 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7332 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7334 * Returns: 0 = success, non-zero failure.
7337 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7339 LIST_HEAD(post_nblist);
7340 int num_posted, rc = 0;
7342 /* get all NVME buffers need to repost to a local list */
7343 lpfc_io_buf_flush(phba, &post_nblist);
7345 /* post the list of nvme buffer sgls to port if available */
7346 if (!list_empty(&post_nblist)) {
7347 num_posted = lpfc_sli4_post_io_sgl_list(
7348 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7349 /* failed to post any nvme buffer, return error */
7350 if (num_posted == 0)
7357 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7361 len = sizeof(struct lpfc_mbx_set_host_data) -
7362 sizeof(struct lpfc_sli4_cfg_mhdr);
7363 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7364 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7365 LPFC_SLI4_MBX_EMBED);
7367 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7368 mbox->u.mqe.un.set_host_data.param_len =
7369 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7370 snprintf(mbox->u.mqe.un.set_host_data.data,
7371 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7372 "Linux %s v"LPFC_DRIVER_VERSION,
7373 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7377 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7378 struct lpfc_queue *drq, int count, int idx)
7381 struct lpfc_rqe hrqe;
7382 struct lpfc_rqe drqe;
7383 struct lpfc_rqb *rqbp;
7384 unsigned long flags;
7385 struct rqb_dmabuf *rqb_buffer;
7386 LIST_HEAD(rqb_buf_list);
7389 for (i = 0; i < count; i++) {
7390 spin_lock_irqsave(&phba->hbalock, flags);
7391 /* IF RQ is already full, don't bother */
7392 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7393 spin_unlock_irqrestore(&phba->hbalock, flags);
7396 spin_unlock_irqrestore(&phba->hbalock, flags);
7398 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7401 rqb_buffer->hrq = hrq;
7402 rqb_buffer->drq = drq;
7403 rqb_buffer->idx = idx;
7404 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7407 spin_lock_irqsave(&phba->hbalock, flags);
7408 while (!list_empty(&rqb_buf_list)) {
7409 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7412 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7413 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7414 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7415 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7416 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7418 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7419 "6421 Cannot post to HRQ %d: %x %x %x "
7427 rqbp->rqb_free_buffer(phba, rqb_buffer);
7429 list_add_tail(&rqb_buffer->hbuf.list,
7430 &rqbp->rqb_buffer_list);
7431 rqbp->buffer_count++;
7434 spin_unlock_irqrestore(&phba->hbalock, flags);
7439 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7440 * @phba: pointer to lpfc hba data structure.
7442 * This routine initializes the per-cq idle_stat to dynamically dictate
7443 * polling decisions.
7448 static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7451 struct lpfc_sli4_hdw_queue *hdwq;
7452 struct lpfc_queue *cq;
7453 struct lpfc_idle_stat *idle_stat;
7456 for_each_present_cpu(i) {
7457 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7460 /* Skip if we've already handled this cq's primary CPU */
7464 idle_stat = &phba->sli4_hba.idle_stat[i];
7466 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7467 idle_stat->prev_wall = wall;
7469 if (phba->nvmet_support)
7470 cq->poll_mode = LPFC_QUEUE_WORK;
7472 cq->poll_mode = LPFC_IRQ_POLL;
7475 if (!phba->nvmet_support)
7476 schedule_delayed_work(&phba->idle_stat_delay_work,
7477 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7480 static void lpfc_sli4_dip(struct lpfc_hba *phba)
7484 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7485 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
7486 if_type == LPFC_SLI_INTF_IF_TYPE_6) {
7487 struct lpfc_register reg_data;
7489 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7493 if (bf_get(lpfc_sliport_status_dip, ®_data))
7494 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7495 "2904 Firmware Dump Image Present"
7501 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
7502 * @phba: Pointer to HBA context object.
7504 * This function is the main SLI4 device initialization PCI function. This
7505 * function is called by the HBA initialization code, HBA reset code and
7506 * HBA error attention handler code. Caller is not required to hold any
7510 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7512 int rc, i, cnt, len, dd;
7513 LPFC_MBOXQ_t *mboxq;
7514 struct lpfc_mqe *mqe;
7517 uint32_t ftr_rsp = 0;
7518 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7519 struct lpfc_vport *vport = phba->pport;
7520 struct lpfc_dmabuf *mp;
7521 struct lpfc_rqb *rqbp;
7523 /* Perform a PCI function reset to start from clean */
7524 rc = lpfc_pci_function_reset(phba);
7528 /* Check the HBA Host Status Register for readyness */
7529 rc = lpfc_sli4_post_status_check(phba);
7533 spin_lock_irq(&phba->hbalock);
7534 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7535 spin_unlock_irq(&phba->hbalock);
7538 lpfc_sli4_dip(phba);
7541 * Allocate a single mailbox container for initializing the
7544 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7548 /* Issue READ_REV to collect vpd and FW information. */
7549 vpd_size = SLI4_PAGE_SIZE;
7550 vpd = kzalloc(vpd_size, GFP_KERNEL);
7556 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7562 mqe = &mboxq->u.mqe;
7563 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7564 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7565 phba->hba_flag |= HBA_FCOE_MODE;
7566 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7568 phba->hba_flag &= ~HBA_FCOE_MODE;
7571 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7573 phba->hba_flag |= HBA_FIP_SUPPORT;
7575 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7577 phba->hba_flag &= ~HBA_IOQ_FLUSH;
7579 if (phba->sli_rev != LPFC_SLI_REV4) {
7580 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7581 "0376 READ_REV Error. SLI Level %d "
7582 "FCoE enabled %d\n",
7583 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7590 * Continue initialization with default values even if driver failed
7591 * to read FCoE param config regions, only read parameters if the
7594 if (phba->hba_flag & HBA_FCOE_MODE &&
7595 lpfc_sli4_read_fcoe_params(phba))
7596 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7597 "2570 Failed to read FCoE parameters\n");
7600 * Retrieve sli4 device physical port name, failure of doing it
7601 * is considered as non-fatal.
7603 rc = lpfc_sli4_retrieve_pport_name(phba);
7605 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7606 "3080 Successful retrieving SLI4 device "
7607 "physical port name: %s.\n", phba->Port);
7609 rc = lpfc_sli4_get_ctl_attr(phba);
7611 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7612 "8351 Successful retrieving SLI4 device "
7616 * Evaluate the read rev and vpd data. Populate the driver
7617 * state with the results. If this routine fails, the failure
7618 * is not fatal as the driver will use generic values.
7620 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7621 if (unlikely(!rc)) {
7622 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7623 "0377 Error %d parsing vpd. "
7624 "Using defaults.\n", rc);
7629 /* Save information as VPD data */
7630 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7631 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7634 * This is because first G7 ASIC doesn't support the standard
7635 * 0x5a NVME cmd descriptor type/subtype
7637 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7638 LPFC_SLI_INTF_IF_TYPE_6) &&
7639 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7640 (phba->vpd.rev.smRev == 0) &&
7641 (phba->cfg_nvme_embed_cmd == 1))
7642 phba->cfg_nvme_embed_cmd = 0;
7644 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7645 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7647 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7649 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7651 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7653 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7654 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7655 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7656 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7657 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7658 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7659 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7660 "(%d):0380 READ_REV Status x%x "
7661 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7662 mboxq->vport ? mboxq->vport->vpi : 0,
7663 bf_get(lpfc_mqe_status, mqe),
7664 phba->vpd.rev.opFwName,
7665 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7666 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7668 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7669 LPFC_SLI_INTF_IF_TYPE_0) {
7670 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7671 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7672 if (rc == MBX_SUCCESS) {
7673 phba->hba_flag |= HBA_RECOVERABLE_UE;
7674 /* Set 1Sec interval to detect UE */
7675 phba->eratt_poll_interval = 1;
7676 phba->sli4_hba.ue_to_sr = bf_get(
7677 lpfc_mbx_set_feature_UESR,
7678 &mboxq->u.mqe.un.set_feature);
7679 phba->sli4_hba.ue_to_rp = bf_get(
7680 lpfc_mbx_set_feature_UERP,
7681 &mboxq->u.mqe.un.set_feature);
7685 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7686 /* Enable MDS Diagnostics only if the SLI Port supports it */
7687 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7688 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7689 if (rc != MBX_SUCCESS)
7690 phba->mds_diags_support = 0;
7694 * Discover the port's supported feature set and match it against the
7697 lpfc_request_features(phba, mboxq);
7698 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7705 * The port must support FCP initiator mode as this is the
7706 * only mode running in the host.
7708 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7709 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7710 "0378 No support for fcpi mode.\n");
7714 /* Performance Hints are ONLY for FCoE */
7715 if (phba->hba_flag & HBA_FCOE_MODE) {
7716 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7717 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7719 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7723 * If the port cannot support the host's requested features
7724 * then turn off the global config parameters to disable the
7725 * feature in the driver. This is not a fatal error.
7727 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7728 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7729 phba->cfg_enable_bg = 0;
7730 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7735 if (phba->max_vpi && phba->cfg_enable_npiv &&
7736 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7740 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7741 "0379 Feature Mismatch Data: x%08x %08x "
7742 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7743 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7744 phba->cfg_enable_npiv, phba->max_vpi);
7745 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7746 phba->cfg_enable_bg = 0;
7747 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7748 phba->cfg_enable_npiv = 0;
7751 /* These SLI3 features are assumed in SLI4 */
7752 spin_lock_irq(&phba->hbalock);
7753 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7754 spin_unlock_irq(&phba->hbalock);
7756 /* Always try to enable dual dump feature if we can */
7757 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
7758 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7759 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
7760 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
7761 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7762 "6448 Dual Dump is enabled\n");
7764 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
7765 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
7767 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7768 lpfc_sli_config_mbox_subsys_get(
7770 lpfc_sli_config_mbox_opcode_get(
7774 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7775 * calls depends on these resources to complete port setup.
7777 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7779 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7780 "2920 Failed to alloc Resource IDs "
7785 lpfc_set_host_data(phba, mboxq);
7787 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7789 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7790 "2134 Failed to set host os driver version %x",
7794 /* Read the port's service parameters. */
7795 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7797 phba->link_state = LPFC_HBA_ERROR;
7802 mboxq->vport = vport;
7803 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7804 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7805 if (rc == MBX_SUCCESS) {
7806 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7811 * This memory was allocated by the lpfc_read_sparam routine. Release
7812 * it to the mbuf pool.
7814 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7816 mboxq->ctx_buf = NULL;
7818 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7819 "0382 READ_SPARAM command failed "
7820 "status %d, mbxStatus x%x\n",
7821 rc, bf_get(lpfc_mqe_status, mqe));
7822 phba->link_state = LPFC_HBA_ERROR;
7827 lpfc_update_vport_wwn(vport);
7829 /* Update the fc_host data structures with new wwn. */
7830 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7831 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7833 /* Create all the SLI4 queues */
7834 rc = lpfc_sli4_queue_create(phba);
7836 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7837 "3089 Failed to allocate queues\n");
7841 /* Set up all the queues to the device */
7842 rc = lpfc_sli4_queue_setup(phba);
7844 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7845 "0381 Error %d during queue setup.\n ", rc);
7846 goto out_stop_timers;
7848 /* Initialize the driver internal SLI layer lists. */
7849 lpfc_sli4_setup(phba);
7850 lpfc_sli4_queue_init(phba);
7852 /* update host els xri-sgl sizes and mappings */
7853 rc = lpfc_sli4_els_sgl_update(phba);
7855 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7856 "1400 Failed to update xri-sgl size and "
7857 "mapping: %d\n", rc);
7858 goto out_destroy_queue;
7861 /* register the els sgl pool to the port */
7862 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7863 phba->sli4_hba.els_xri_cnt);
7864 if (unlikely(rc < 0)) {
7865 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7866 "0582 Error %d during els sgl post "
7869 goto out_destroy_queue;
7871 phba->sli4_hba.els_xri_cnt = rc;
7873 if (phba->nvmet_support) {
7874 /* update host nvmet xri-sgl sizes and mappings */
7875 rc = lpfc_sli4_nvmet_sgl_update(phba);
7877 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7878 "6308 Failed to update nvmet-sgl size "
7879 "and mapping: %d\n", rc);
7880 goto out_destroy_queue;
7883 /* register the nvmet sgl pool to the port */
7884 rc = lpfc_sli4_repost_sgl_list(
7886 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7887 phba->sli4_hba.nvmet_xri_cnt);
7888 if (unlikely(rc < 0)) {
7889 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7890 "3117 Error %d during nvmet "
7893 goto out_destroy_queue;
7895 phba->sli4_hba.nvmet_xri_cnt = rc;
7897 /* We allocate an iocbq for every receive context SGL.
7898 * The additional allocation is for abort and ls handling.
7900 cnt = phba->sli4_hba.nvmet_xri_cnt +
7901 phba->sli4_hba.max_cfg_param.max_xri;
7903 /* update host common xri-sgl sizes and mappings */
7904 rc = lpfc_sli4_io_sgl_update(phba);
7906 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7907 "6082 Failed to update nvme-sgl size "
7908 "and mapping: %d\n", rc);
7909 goto out_destroy_queue;
7912 /* register the allocated common sgl pool to the port */
7913 rc = lpfc_sli4_repost_io_sgl_list(phba);
7915 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7916 "6116 Error %d during nvme sgl post "
7918 /* Some NVME buffers were moved to abort nvme list */
7919 /* A pci function reset will repost them */
7921 goto out_destroy_queue;
7923 /* Each lpfc_io_buf job structure has an iocbq element.
7924 * This cnt provides for abort, els, ct and ls requests.
7926 cnt = phba->sli4_hba.max_cfg_param.max_xri;
7929 if (!phba->sli.iocbq_lookup) {
7930 /* Initialize and populate the iocb list per host */
7931 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7932 "2821 initialize iocb list with %d entries\n",
7934 rc = lpfc_init_iocb_list(phba, cnt);
7936 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7937 "1413 Failed to init iocb list.\n");
7938 goto out_destroy_queue;
7942 if (phba->nvmet_support)
7943 lpfc_nvmet_create_targetport(phba);
7945 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7946 /* Post initial buffers to all RQs created */
7947 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7948 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7949 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7950 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7951 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7952 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7953 rqbp->buffer_count = 0;
7955 lpfc_post_rq_buffer(
7956 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7957 phba->sli4_hba.nvmet_mrq_data[i],
7958 phba->cfg_nvmet_mrq_post, i);
7962 /* Post the rpi header region to the device. */
7963 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7965 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7966 "0393 Error %d during rpi post operation\n",
7969 goto out_destroy_queue;
7971 lpfc_sli4_node_prep(phba);
7973 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7974 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7976 * The FC Port needs to register FCFI (index 0)
7978 lpfc_reg_fcfi(phba, mboxq);
7979 mboxq->vport = phba->pport;
7980 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7981 if (rc != MBX_SUCCESS)
7982 goto out_unset_queue;
7984 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7985 &mboxq->u.mqe.un.reg_fcfi);
7987 /* We are a NVME Target mode with MRQ > 1 */
7989 /* First register the FCFI */
7990 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7991 mboxq->vport = phba->pport;
7992 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7993 if (rc != MBX_SUCCESS)
7994 goto out_unset_queue;
7996 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7997 &mboxq->u.mqe.un.reg_fcfi_mrq);
7999 /* Next register the MRQs */
8000 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
8001 mboxq->vport = phba->pport;
8002 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8003 if (rc != MBX_SUCCESS)
8004 goto out_unset_queue;
8007 /* Check if the port is configured to be disabled */
8008 lpfc_sli_read_link_ste(phba);
8011 /* Don't post more new bufs if repost already recovered
8014 if (phba->nvmet_support == 0) {
8015 if (phba->sli4_hba.io_xri_cnt == 0) {
8016 len = lpfc_new_io_buf(
8017 phba, phba->sli4_hba.io_xri_max);
8020 goto out_unset_queue;
8023 if (phba->cfg_xri_rebalancing)
8024 lpfc_create_multixri_pools(phba);
8027 phba->cfg_xri_rebalancing = 0;
8030 /* Allow asynchronous mailbox command to go through */
8031 spin_lock_irq(&phba->hbalock);
8032 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8033 spin_unlock_irq(&phba->hbalock);
8035 /* Post receive buffers to the device */
8036 lpfc_sli4_rb_setup(phba);
8038 /* Reset HBA FCF states after HBA reset */
8039 phba->fcf.fcf_flag = 0;
8040 phba->fcf.current_rec.flag = 0;
8042 /* Start the ELS watchdog timer */
8043 mod_timer(&vport->els_tmofunc,
8044 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
8046 /* Start heart beat timer */
8047 mod_timer(&phba->hb_tmofunc,
8048 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
8049 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
8050 phba->last_completion_time = jiffies;
8052 /* start eq_delay heartbeat */
8053 if (phba->cfg_auto_imax)
8054 queue_delayed_work(phba->wq, &phba->eq_delay_work,
8055 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
8057 /* start per phba idle_stat_delay heartbeat */
8058 lpfc_init_idle_stat_hb(phba);
8060 /* Start error attention (ERATT) polling timer */
8061 mod_timer(&phba->eratt_poll,
8062 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
8064 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
8065 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
8066 rc = pci_enable_pcie_error_reporting(phba->pcidev);
8068 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8069 "2829 This device supports "
8070 "Advanced Error Reporting (AER)\n");
8071 spin_lock_irq(&phba->hbalock);
8072 phba->hba_flag |= HBA_AER_ENABLED;
8073 spin_unlock_irq(&phba->hbalock);
8075 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8076 "2830 This device does not support "
8077 "Advanced Error Reporting (AER)\n");
8078 phba->cfg_aer_support = 0;
8084 * The port is ready, set the host's link state to LINK_DOWN
8085 * in preparation for link interrupts.
8087 spin_lock_irq(&phba->hbalock);
8088 phba->link_state = LPFC_LINK_DOWN;
8090 /* Check if physical ports are trunked */
8091 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
8092 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
8093 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
8094 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
8095 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
8096 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
8097 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
8098 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
8099 spin_unlock_irq(&phba->hbalock);
8101 /* Arm the CQs and then EQs on device */
8102 lpfc_sli4_arm_cqeq_intr(phba);
8104 /* Indicate device interrupt mode */
8105 phba->sli4_hba.intr_enable = 1;
8107 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
8108 (phba->hba_flag & LINK_DISABLED)) {
8109 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8110 "3103 Adapter Link is disabled.\n");
8111 lpfc_down_link(phba, mboxq);
8112 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8113 if (rc != MBX_SUCCESS) {
8114 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8115 "3104 Adapter failed to issue "
8116 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
8117 goto out_io_buff_free;
8119 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
8120 /* don't perform init_link on SLI4 FC port loopback test */
8121 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
8122 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
8124 goto out_io_buff_free;
8127 mempool_free(mboxq, phba->mbox_mem_pool);
8130 /* Free allocated IO Buffers */
8133 /* Unset all the queues set up in this routine when error out */
8134 lpfc_sli4_queue_unset(phba);
8136 lpfc_free_iocb_list(phba);
8137 lpfc_sli4_queue_destroy(phba);
8139 lpfc_stop_hba_timers(phba);
8141 mempool_free(mboxq, phba->mbox_mem_pool);
8146 * lpfc_mbox_timeout - Timeout call back function for mbox timer
8147 * @t: Context to fetch pointer to hba structure from.
8149 * This is the callback function for mailbox timer. The mailbox
8150 * timer is armed when a new mailbox command is issued and the timer
8151 * is deleted when the mailbox complete. The function is called by
8152 * the kernel timer code when a mailbox does not complete within
8153 * expected time. This function wakes up the worker thread to
8154 * process the mailbox timeout and returns. All the processing is
8155 * done by the worker thread function lpfc_mbox_timeout_handler.
8158 lpfc_mbox_timeout(struct timer_list *t)
8160 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
8161 unsigned long iflag;
8162 uint32_t tmo_posted;
8164 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
8165 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
8167 phba->pport->work_port_events |= WORKER_MBOX_TMO;
8168 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
8171 lpfc_worker_wake_up(phba);
8176 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
8178 * @phba: Pointer to HBA context object.
8180 * This function checks if any mailbox completions are present on the mailbox
8184 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
8188 struct lpfc_queue *mcq;
8189 struct lpfc_mcqe *mcqe;
8190 bool pending_completions = false;
8193 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8196 /* Check for completions on mailbox completion queue */
8198 mcq = phba->sli4_hba.mbx_cq;
8199 idx = mcq->hba_index;
8200 qe_valid = mcq->qe_valid;
8201 while (bf_get_le32(lpfc_cqe_valid,
8202 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
8203 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
8204 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
8205 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
8206 pending_completions = true;
8209 idx = (idx + 1) % mcq->entry_count;
8210 if (mcq->hba_index == idx)
8213 /* if the index wrapped around, toggle the valid bit */
8214 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
8215 qe_valid = (qe_valid) ? 0 : 1;
8217 return pending_completions;
8222 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
8224 * @phba: Pointer to HBA context object.
8226 * For sli4, it is possible to miss an interrupt. As such mbox completions
8227 * maybe missed causing erroneous mailbox timeouts to occur. This function
8228 * checks to see if mbox completions are on the mailbox completion queue
8229 * and will process all the completions associated with the eq for the
8230 * mailbox completion queue.
8233 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
8235 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
8237 struct lpfc_queue *fpeq = NULL;
8238 struct lpfc_queue *eq;
8241 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8244 /* Find the EQ associated with the mbox CQ */
8245 if (sli4_hba->hdwq) {
8246 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8247 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
8248 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
8257 /* Turn off interrupts from this EQ */
8259 sli4_hba->sli4_eq_clr_intr(fpeq);
8261 /* Check to see if a mbox completion is pending */
8263 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
8266 * If a mbox completion is pending, process all the events on EQ
8267 * associated with the mbox completion queue (this could include
8268 * mailbox commands, async events, els commands, receive queue data
8273 /* process and rearm the EQ */
8274 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
8276 /* Always clear and re-arm the EQ */
8277 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
8279 return mbox_pending;
8284 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
8285 * @phba: Pointer to HBA context object.
8287 * This function is called from worker thread when a mailbox command times out.
8288 * The caller is not required to hold any locks. This function will reset the
8289 * HBA and recover all the pending commands.
8292 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
8294 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
8295 MAILBOX_t *mb = NULL;
8297 struct lpfc_sli *psli = &phba->sli;
8299 /* If the mailbox completed, process the completion */
8300 lpfc_sli4_process_missed_mbox_completions(phba);
8302 if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
8307 /* Check the pmbox pointer first. There is a race condition
8308 * between the mbox timeout handler getting executed in the
8309 * worklist and the mailbox actually completing. When this
8310 * race condition occurs, the mbox_active will be NULL.
8312 spin_lock_irq(&phba->hbalock);
8313 if (pmbox == NULL) {
8314 lpfc_printf_log(phba, KERN_WARNING,
8316 "0353 Active Mailbox cleared - mailbox timeout "
8318 spin_unlock_irq(&phba->hbalock);
8322 /* Mbox cmd <mbxCommand> timeout */
8323 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8324 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
8326 phba->pport->port_state,
8328 phba->sli.mbox_active);
8329 spin_unlock_irq(&phba->hbalock);
8331 /* Setting state unknown so lpfc_sli_abort_iocb_ring
8332 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
8333 * it to fail all outstanding SCSI IO.
8335 spin_lock_irq(&phba->pport->work_port_lock);
8336 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8337 spin_unlock_irq(&phba->pport->work_port_lock);
8338 spin_lock_irq(&phba->hbalock);
8339 phba->link_state = LPFC_LINK_UNKNOWN;
8340 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8341 spin_unlock_irq(&phba->hbalock);
8343 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8344 "0345 Resetting board due to mailbox timeout\n");
8346 /* Reset the HBA device */
8347 lpfc_reset_hba(phba);
8351 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
8352 * @phba: Pointer to HBA context object.
8353 * @pmbox: Pointer to mailbox object.
8354 * @flag: Flag indicating how the mailbox need to be processed.
8356 * This function is called by discovery code and HBA management code
8357 * to submit a mailbox command to firmware with SLI-3 interface spec. This
8358 * function gets the hbalock to protect the data structures.
8359 * The mailbox command can be submitted in polling mode, in which case
8360 * this function will wait in a polling loop for the completion of the
8362 * If the mailbox is submitted in no_wait mode (not polling) the
8363 * function will submit the command and returns immediately without waiting
8364 * for the mailbox completion. The no_wait is supported only when HBA
8365 * is in SLI2/SLI3 mode - interrupts are enabled.
8366 * The SLI interface allows only one mailbox pending at a time. If the
8367 * mailbox is issued in polling mode and there is already a mailbox
8368 * pending, then the function will return an error. If the mailbox is issued
8369 * in NO_WAIT mode and there is a mailbox pending already, the function
8370 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
8371 * The sli layer owns the mailbox object until the completion of mailbox
8372 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
8373 * return codes the caller owns the mailbox command after the return of
8377 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8381 struct lpfc_sli *psli = &phba->sli;
8382 uint32_t status, evtctr;
8383 uint32_t ha_copy, hc_copy;
8385 unsigned long timeout;
8386 unsigned long drvr_flag = 0;
8387 uint32_t word0, ldata;
8388 void __iomem *to_slim;
8389 int processing_queue = 0;
8391 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8393 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8394 /* processing mbox queue from intr_handler */
8395 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8396 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8399 processing_queue = 1;
8400 pmbox = lpfc_mbox_get(phba);
8402 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8407 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8408 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8410 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8411 lpfc_printf_log(phba, KERN_ERR,
8412 LOG_MBOX | LOG_VPORT,
8413 "1806 Mbox x%x failed. No vport\n",
8414 pmbox->u.mb.mbxCommand);
8416 goto out_not_finished;
8420 /* If the PCI channel is in offline state, do not post mbox. */
8421 if (unlikely(pci_channel_offline(phba->pcidev))) {
8422 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8423 goto out_not_finished;
8426 /* If HBA has a deferred error attention, fail the iocb. */
8427 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8428 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8429 goto out_not_finished;
8435 status = MBX_SUCCESS;
8437 if (phba->link_state == LPFC_HBA_ERROR) {
8438 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8440 /* Mbox command <mbxCommand> cannot issue */
8441 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8442 "(%d):0311 Mailbox command x%x cannot "
8443 "issue Data: x%x x%x\n",
8444 pmbox->vport ? pmbox->vport->vpi : 0,
8445 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8446 goto out_not_finished;
8449 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8450 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8451 !(hc_copy & HC_MBINT_ENA)) {
8452 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8453 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8454 "(%d):2528 Mailbox command x%x cannot "
8455 "issue Data: x%x x%x\n",
8456 pmbox->vport ? pmbox->vport->vpi : 0,
8457 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8458 goto out_not_finished;
8462 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8463 /* Polling for a mbox command when another one is already active
8464 * is not allowed in SLI. Also, the driver must have established
8465 * SLI2 mode to queue and process multiple mbox commands.
8468 if (flag & MBX_POLL) {
8469 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8471 /* Mbox command <mbxCommand> cannot issue */
8472 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8473 "(%d):2529 Mailbox command x%x "
8474 "cannot issue Data: x%x x%x\n",
8475 pmbox->vport ? pmbox->vport->vpi : 0,
8476 pmbox->u.mb.mbxCommand,
8477 psli->sli_flag, flag);
8478 goto out_not_finished;
8481 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8482 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8483 /* Mbox command <mbxCommand> cannot issue */
8484 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8485 "(%d):2530 Mailbox command x%x "
8486 "cannot issue Data: x%x x%x\n",
8487 pmbox->vport ? pmbox->vport->vpi : 0,
8488 pmbox->u.mb.mbxCommand,
8489 psli->sli_flag, flag);
8490 goto out_not_finished;
8493 /* Another mailbox command is still being processed, queue this
8494 * command to be processed later.
8496 lpfc_mbox_put(phba, pmbox);
8498 /* Mbox cmd issue - BUSY */
8499 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8500 "(%d):0308 Mbox cmd issue - BUSY Data: "
8501 "x%x x%x x%x x%x\n",
8502 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8504 phba->pport ? phba->pport->port_state : 0xff,
8505 psli->sli_flag, flag);
8507 psli->slistat.mbox_busy++;
8508 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8511 lpfc_debugfs_disc_trc(pmbox->vport,
8512 LPFC_DISC_TRC_MBOX_VPORT,
8513 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
8514 (uint32_t)mbx->mbxCommand,
8515 mbx->un.varWords[0], mbx->un.varWords[1]);
8518 lpfc_debugfs_disc_trc(phba->pport,
8520 "MBOX Bsy: cmd:x%x mb:x%x x%x",
8521 (uint32_t)mbx->mbxCommand,
8522 mbx->un.varWords[0], mbx->un.varWords[1]);
8528 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8530 /* If we are not polling, we MUST be in SLI2 mode */
8531 if (flag != MBX_POLL) {
8532 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8533 (mbx->mbxCommand != MBX_KILL_BOARD)) {
8534 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8535 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8536 /* Mbox command <mbxCommand> cannot issue */
8537 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8538 "(%d):2531 Mailbox command x%x "
8539 "cannot issue Data: x%x x%x\n",
8540 pmbox->vport ? pmbox->vport->vpi : 0,
8541 pmbox->u.mb.mbxCommand,
8542 psli->sli_flag, flag);
8543 goto out_not_finished;
8545 /* timeout active mbox command */
8546 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8548 mod_timer(&psli->mbox_tmo, jiffies + timeout);
8551 /* Mailbox cmd <cmd> issue */
8552 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8553 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8555 pmbox->vport ? pmbox->vport->vpi : 0,
8557 phba->pport ? phba->pport->port_state : 0xff,
8558 psli->sli_flag, flag);
8560 if (mbx->mbxCommand != MBX_HEARTBEAT) {
8562 lpfc_debugfs_disc_trc(pmbox->vport,
8563 LPFC_DISC_TRC_MBOX_VPORT,
8564 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8565 (uint32_t)mbx->mbxCommand,
8566 mbx->un.varWords[0], mbx->un.varWords[1]);
8569 lpfc_debugfs_disc_trc(phba->pport,
8571 "MBOX Send: cmd:x%x mb:x%x x%x",
8572 (uint32_t)mbx->mbxCommand,
8573 mbx->un.varWords[0], mbx->un.varWords[1]);
8577 psli->slistat.mbox_cmd++;
8578 evtctr = psli->slistat.mbox_event;
8580 /* next set own bit for the adapter and copy over command word */
8581 mbx->mbxOwner = OWN_CHIP;
8583 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8584 /* Populate mbox extension offset word. */
8585 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8586 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8587 = (uint8_t *)phba->mbox_ext
8588 - (uint8_t *)phba->mbox;
8591 /* Copy the mailbox extension data */
8592 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8593 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8594 (uint8_t *)phba->mbox_ext,
8595 pmbox->in_ext_byte_len);
8597 /* Copy command data to host SLIM area */
8598 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8600 /* Populate mbox extension offset word. */
8601 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8602 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8603 = MAILBOX_HBA_EXT_OFFSET;
8605 /* Copy the mailbox extension data */
8606 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8607 lpfc_memcpy_to_slim(phba->MBslimaddr +
8608 MAILBOX_HBA_EXT_OFFSET,
8609 pmbox->ctx_buf, pmbox->in_ext_byte_len);
8611 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8612 /* copy command data into host mbox for cmpl */
8613 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8616 /* First copy mbox command data to HBA SLIM, skip past first
8618 to_slim = phba->MBslimaddr + sizeof (uint32_t);
8619 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8620 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8622 /* Next copy over first word, with mbxOwner set */
8623 ldata = *((uint32_t *)mbx);
8624 to_slim = phba->MBslimaddr;
8625 writel(ldata, to_slim);
8626 readl(to_slim); /* flush */
8628 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8629 /* switch over to host mailbox */
8630 psli->sli_flag |= LPFC_SLI_ACTIVE;
8637 /* Set up reference to mailbox command */
8638 psli->mbox_active = pmbox;
8639 /* Interrupt board to do it */
8640 writel(CA_MBATT, phba->CAregaddr);
8641 readl(phba->CAregaddr); /* flush */
8642 /* Don't wait for it to finish, just return */
8646 /* Set up null reference to mailbox command */
8647 psli->mbox_active = NULL;
8648 /* Interrupt board to do it */
8649 writel(CA_MBATT, phba->CAregaddr);
8650 readl(phba->CAregaddr); /* flush */
8652 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8653 /* First read mbox status word */
8654 word0 = *((uint32_t *)phba->mbox);
8655 word0 = le32_to_cpu(word0);
8657 /* First read mbox status word */
8658 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8659 spin_unlock_irqrestore(&phba->hbalock,
8661 goto out_not_finished;
8665 /* Read the HBA Host Attention Register */
8666 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8667 spin_unlock_irqrestore(&phba->hbalock,
8669 goto out_not_finished;
8671 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8674 /* Wait for command to complete */
8675 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8676 (!(ha_copy & HA_MBATT) &&
8677 (phba->link_state > LPFC_WARM_START))) {
8678 if (time_after(jiffies, timeout)) {
8679 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8680 spin_unlock_irqrestore(&phba->hbalock,
8682 goto out_not_finished;
8685 /* Check if we took a mbox interrupt while we were
8687 if (((word0 & OWN_CHIP) != OWN_CHIP)
8688 && (evtctr != psli->slistat.mbox_event))
8692 spin_unlock_irqrestore(&phba->hbalock,
8695 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8698 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8699 /* First copy command data */
8700 word0 = *((uint32_t *)phba->mbox);
8701 word0 = le32_to_cpu(word0);
8702 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8705 /* Check real SLIM for any errors */
8706 slimword0 = readl(phba->MBslimaddr);
8707 slimmb = (MAILBOX_t *) & slimword0;
8708 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8709 && slimmb->mbxStatus) {
8716 /* First copy command data */
8717 word0 = readl(phba->MBslimaddr);
8719 /* Read the HBA Host Attention Register */
8720 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8721 spin_unlock_irqrestore(&phba->hbalock,
8723 goto out_not_finished;
8727 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8728 /* copy results back to user */
8729 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8731 /* Copy the mailbox extension data */
8732 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8733 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8735 pmbox->out_ext_byte_len);
8738 /* First copy command data */
8739 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8741 /* Copy the mailbox extension data */
8742 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8743 lpfc_memcpy_from_slim(
8746 MAILBOX_HBA_EXT_OFFSET,
8747 pmbox->out_ext_byte_len);
8751 writel(HA_MBATT, phba->HAregaddr);
8752 readl(phba->HAregaddr); /* flush */
8754 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8755 status = mbx->mbxStatus;
8758 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8762 if (processing_queue) {
8763 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8764 lpfc_mbox_cmpl_put(phba, pmbox);
8766 return MBX_NOT_FINISHED;
8770 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8771 * @phba: Pointer to HBA context object.
8773 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8774 * the driver internal pending mailbox queue. It will then try to wait out the
8775 * possible outstanding mailbox command before return.
8778 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8779 * the outstanding mailbox command timed out.
8782 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8784 struct lpfc_sli *psli = &phba->sli;
8786 unsigned long timeout = 0;
8788 /* Mark the asynchronous mailbox command posting as blocked */
8789 spin_lock_irq(&phba->hbalock);
8790 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8791 /* Determine how long we might wait for the active mailbox
8792 * command to be gracefully completed by firmware.
8794 if (phba->sli.mbox_active)
8795 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8796 phba->sli.mbox_active) *
8798 spin_unlock_irq(&phba->hbalock);
8800 /* Make sure the mailbox is really active */
8802 lpfc_sli4_process_missed_mbox_completions(phba);
8804 /* Wait for the outstnading mailbox command to complete */
8805 while (phba->sli.mbox_active) {
8806 /* Check active mailbox complete status every 2ms */
8808 if (time_after(jiffies, timeout)) {
8809 /* Timeout, marked the outstanding cmd not complete */
8815 /* Can not cleanly block async mailbox command, fails it */
8817 spin_lock_irq(&phba->hbalock);
8818 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8819 spin_unlock_irq(&phba->hbalock);
8825 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8826 * @phba: Pointer to HBA context object.
8828 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8829 * commands from the driver internal pending mailbox queue. It makes sure
8830 * that there is no outstanding mailbox command before resuming posting
8831 * asynchronous mailbox commands. If, for any reason, there is outstanding
8832 * mailbox command, it will try to wait it out before resuming asynchronous
8833 * mailbox command posting.
8836 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8838 struct lpfc_sli *psli = &phba->sli;
8840 spin_lock_irq(&phba->hbalock);
8841 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8842 /* Asynchronous mailbox posting is not blocked, do nothing */
8843 spin_unlock_irq(&phba->hbalock);
8847 /* Outstanding synchronous mailbox command is guaranteed to be done,
8848 * successful or timeout, after timing-out the outstanding mailbox
8849 * command shall always be removed, so just unblock posting async
8850 * mailbox command and resume
8852 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8853 spin_unlock_irq(&phba->hbalock);
8855 /* wake up worker thread to post asynchronous mailbox command */
8856 lpfc_worker_wake_up(phba);
8860 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8861 * @phba: Pointer to HBA context object.
8862 * @mboxq: Pointer to mailbox object.
8864 * The function waits for the bootstrap mailbox register ready bit from
8865 * port for twice the regular mailbox command timeout value.
8867 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8868 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8871 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8874 unsigned long timeout;
8875 struct lpfc_register bmbx_reg;
8877 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8881 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8882 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8886 if (time_after(jiffies, timeout))
8887 return MBXERR_ERROR;
8888 } while (!db_ready);
8894 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8895 * @phba: Pointer to HBA context object.
8896 * @mboxq: Pointer to mailbox object.
8898 * The function posts a mailbox to the port. The mailbox is expected
8899 * to be comletely filled in and ready for the port to operate on it.
8900 * This routine executes a synchronous completion operation on the
8901 * mailbox by polling for its completion.
8903 * The caller must not be holding any locks when calling this routine.
8906 * MBX_SUCCESS - mailbox posted successfully
8907 * Any of the MBX error values.
8910 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8912 int rc = MBX_SUCCESS;
8913 unsigned long iflag;
8914 uint32_t mcqe_status;
8916 struct lpfc_sli *psli = &phba->sli;
8917 struct lpfc_mqe *mb = &mboxq->u.mqe;
8918 struct lpfc_bmbx_create *mbox_rgn;
8919 struct dma_address *dma_address;
8922 * Only one mailbox can be active to the bootstrap mailbox region
8923 * at a time and there is no queueing provided.
8925 spin_lock_irqsave(&phba->hbalock, iflag);
8926 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8927 spin_unlock_irqrestore(&phba->hbalock, iflag);
8928 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8929 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8930 "cannot issue Data: x%x x%x\n",
8931 mboxq->vport ? mboxq->vport->vpi : 0,
8932 mboxq->u.mb.mbxCommand,
8933 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8934 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8935 psli->sli_flag, MBX_POLL);
8936 return MBXERR_ERROR;
8938 /* The server grabs the token and owns it until release */
8939 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8940 phba->sli.mbox_active = mboxq;
8941 spin_unlock_irqrestore(&phba->hbalock, iflag);
8943 /* wait for bootstrap mbox register for readyness */
8944 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8948 * Initialize the bootstrap memory region to avoid stale data areas
8949 * in the mailbox post. Then copy the caller's mailbox contents to
8950 * the bmbx mailbox region.
8952 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8953 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8954 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8955 sizeof(struct lpfc_mqe));
8957 /* Post the high mailbox dma address to the port and wait for ready. */
8958 dma_address = &phba->sli4_hba.bmbx.dma_address;
8959 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8961 /* wait for bootstrap mbox register for hi-address write done */
8962 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8966 /* Post the low mailbox dma address to the port. */
8967 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8969 /* wait for bootstrap mbox register for low address write done */
8970 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8975 * Read the CQ to ensure the mailbox has completed.
8976 * If so, update the mailbox status so that the upper layers
8977 * can complete the request normally.
8979 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8980 sizeof(struct lpfc_mqe));
8981 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8982 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8983 sizeof(struct lpfc_mcqe));
8984 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8986 * When the CQE status indicates a failure and the mailbox status
8987 * indicates success then copy the CQE status into the mailbox status
8988 * (and prefix it with x4000).
8990 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8991 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8992 bf_set(lpfc_mqe_status, mb,
8993 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8996 lpfc_sli4_swap_str(phba, mboxq);
8998 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8999 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
9000 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
9001 " x%x x%x CQ: x%x x%x x%x x%x\n",
9002 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9003 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9004 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9005 bf_get(lpfc_mqe_status, mb),
9006 mb->un.mb_words[0], mb->un.mb_words[1],
9007 mb->un.mb_words[2], mb->un.mb_words[3],
9008 mb->un.mb_words[4], mb->un.mb_words[5],
9009 mb->un.mb_words[6], mb->un.mb_words[7],
9010 mb->un.mb_words[8], mb->un.mb_words[9],
9011 mb->un.mb_words[10], mb->un.mb_words[11],
9012 mb->un.mb_words[12], mboxq->mcqe.word0,
9013 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
9014 mboxq->mcqe.trailer);
9016 /* We are holding the token, no needed for lock when release */
9017 spin_lock_irqsave(&phba->hbalock, iflag);
9018 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9019 phba->sli.mbox_active = NULL;
9020 spin_unlock_irqrestore(&phba->hbalock, iflag);
9025 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
9026 * @phba: Pointer to HBA context object.
9027 * @mboxq: Pointer to mailbox object.
9028 * @flag: Flag indicating how the mailbox need to be processed.
9030 * This function is called by discovery code and HBA management code to submit
9031 * a mailbox command to firmware with SLI-4 interface spec.
9033 * Return codes the caller owns the mailbox command after the return of the
9037 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
9040 struct lpfc_sli *psli = &phba->sli;
9041 unsigned long iflags;
9044 /* dump from issue mailbox command if setup */
9045 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
9047 rc = lpfc_mbox_dev_check(phba);
9049 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9050 "(%d):2544 Mailbox command x%x (x%x/x%x) "
9051 "cannot issue Data: x%x x%x\n",
9052 mboxq->vport ? mboxq->vport->vpi : 0,
9053 mboxq->u.mb.mbxCommand,
9054 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9055 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9056 psli->sli_flag, flag);
9057 goto out_not_finished;
9060 /* Detect polling mode and jump to a handler */
9061 if (!phba->sli4_hba.intr_enable) {
9062 if (flag == MBX_POLL)
9063 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9066 if (rc != MBX_SUCCESS)
9067 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9068 "(%d):2541 Mailbox command x%x "
9069 "(x%x/x%x) failure: "
9070 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9072 mboxq->vport ? mboxq->vport->vpi : 0,
9073 mboxq->u.mb.mbxCommand,
9074 lpfc_sli_config_mbox_subsys_get(phba,
9076 lpfc_sli_config_mbox_opcode_get(phba,
9078 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9079 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9080 bf_get(lpfc_mcqe_ext_status,
9082 psli->sli_flag, flag);
9084 } else if (flag == MBX_POLL) {
9085 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9086 "(%d):2542 Try to issue mailbox command "
9087 "x%x (x%x/x%x) synchronously ahead of async "
9088 "mailbox command queue: x%x x%x\n",
9089 mboxq->vport ? mboxq->vport->vpi : 0,
9090 mboxq->u.mb.mbxCommand,
9091 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9092 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9093 psli->sli_flag, flag);
9094 /* Try to block the asynchronous mailbox posting */
9095 rc = lpfc_sli4_async_mbox_block(phba);
9097 /* Successfully blocked, now issue sync mbox cmd */
9098 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9099 if (rc != MBX_SUCCESS)
9100 lpfc_printf_log(phba, KERN_WARNING,
9102 "(%d):2597 Sync Mailbox command "
9103 "x%x (x%x/x%x) failure: "
9104 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9106 mboxq->vport ? mboxq->vport->vpi : 0,
9107 mboxq->u.mb.mbxCommand,
9108 lpfc_sli_config_mbox_subsys_get(phba,
9110 lpfc_sli_config_mbox_opcode_get(phba,
9112 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9113 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9114 bf_get(lpfc_mcqe_ext_status,
9116 psli->sli_flag, flag);
9117 /* Unblock the async mailbox posting afterward */
9118 lpfc_sli4_async_mbox_unblock(phba);
9123 /* Now, interrupt mode asynchronous mailbox command */
9124 rc = lpfc_mbox_cmd_check(phba, mboxq);
9126 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9127 "(%d):2543 Mailbox command x%x (x%x/x%x) "
9128 "cannot issue Data: x%x x%x\n",
9129 mboxq->vport ? mboxq->vport->vpi : 0,
9130 mboxq->u.mb.mbxCommand,
9131 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9132 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9133 psli->sli_flag, flag);
9134 goto out_not_finished;
9137 /* Put the mailbox command to the driver internal FIFO */
9138 psli->slistat.mbox_busy++;
9139 spin_lock_irqsave(&phba->hbalock, iflags);
9140 lpfc_mbox_put(phba, mboxq);
9141 spin_unlock_irqrestore(&phba->hbalock, iflags);
9142 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9143 "(%d):0354 Mbox cmd issue - Enqueue Data: "
9144 "x%x (x%x/x%x) x%x x%x x%x\n",
9145 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
9146 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
9147 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9148 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9149 phba->pport->port_state,
9150 psli->sli_flag, MBX_NOWAIT);
9151 /* Wake up worker thread to transport mailbox command from head */
9152 lpfc_worker_wake_up(phba);
9157 return MBX_NOT_FINISHED;
9161 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
9162 * @phba: Pointer to HBA context object.
9164 * This function is called by worker thread to send a mailbox command to
9165 * SLI4 HBA firmware.
9169 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
9171 struct lpfc_sli *psli = &phba->sli;
9172 LPFC_MBOXQ_t *mboxq;
9173 int rc = MBX_SUCCESS;
9174 unsigned long iflags;
9175 struct lpfc_mqe *mqe;
9178 /* Check interrupt mode before post async mailbox command */
9179 if (unlikely(!phba->sli4_hba.intr_enable))
9180 return MBX_NOT_FINISHED;
9182 /* Check for mailbox command service token */
9183 spin_lock_irqsave(&phba->hbalock, iflags);
9184 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9185 spin_unlock_irqrestore(&phba->hbalock, iflags);
9186 return MBX_NOT_FINISHED;
9188 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9189 spin_unlock_irqrestore(&phba->hbalock, iflags);
9190 return MBX_NOT_FINISHED;
9192 if (unlikely(phba->sli.mbox_active)) {
9193 spin_unlock_irqrestore(&phba->hbalock, iflags);
9194 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9195 "0384 There is pending active mailbox cmd\n");
9196 return MBX_NOT_FINISHED;
9198 /* Take the mailbox command service token */
9199 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9201 /* Get the next mailbox command from head of queue */
9202 mboxq = lpfc_mbox_get(phba);
9204 /* If no more mailbox command waiting for post, we're done */
9206 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9207 spin_unlock_irqrestore(&phba->hbalock, iflags);
9210 phba->sli.mbox_active = mboxq;
9211 spin_unlock_irqrestore(&phba->hbalock, iflags);
9213 /* Check device readiness for posting mailbox command */
9214 rc = lpfc_mbox_dev_check(phba);
9216 /* Driver clean routine will clean up pending mailbox */
9217 goto out_not_finished;
9219 /* Prepare the mbox command to be posted */
9220 mqe = &mboxq->u.mqe;
9221 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
9223 /* Start timer for the mbox_tmo and log some mailbox post messages */
9224 mod_timer(&psli->mbox_tmo, (jiffies +
9225 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
9227 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9228 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
9230 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9231 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9232 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9233 phba->pport->port_state, psli->sli_flag);
9235 if (mbx_cmnd != MBX_HEARTBEAT) {
9237 lpfc_debugfs_disc_trc(mboxq->vport,
9238 LPFC_DISC_TRC_MBOX_VPORT,
9239 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9240 mbx_cmnd, mqe->un.mb_words[0],
9241 mqe->un.mb_words[1]);
9243 lpfc_debugfs_disc_trc(phba->pport,
9245 "MBOX Send: cmd:x%x mb:x%x x%x",
9246 mbx_cmnd, mqe->un.mb_words[0],
9247 mqe->un.mb_words[1]);
9250 psli->slistat.mbox_cmd++;
9252 /* Post the mailbox command to the port */
9253 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
9254 if (rc != MBX_SUCCESS) {
9255 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9256 "(%d):2533 Mailbox command x%x (x%x/x%x) "
9257 "cannot issue Data: x%x x%x\n",
9258 mboxq->vport ? mboxq->vport->vpi : 0,
9259 mboxq->u.mb.mbxCommand,
9260 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9261 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9262 psli->sli_flag, MBX_NOWAIT);
9263 goto out_not_finished;
9269 spin_lock_irqsave(&phba->hbalock, iflags);
9270 if (phba->sli.mbox_active) {
9271 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
9272 __lpfc_mbox_cmpl_put(phba, mboxq);
9273 /* Release the token */
9274 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9275 phba->sli.mbox_active = NULL;
9277 spin_unlock_irqrestore(&phba->hbalock, iflags);
9279 return MBX_NOT_FINISHED;
9283 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
9284 * @phba: Pointer to HBA context object.
9285 * @pmbox: Pointer to mailbox object.
9286 * @flag: Flag indicating how the mailbox need to be processed.
9288 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
9289 * the API jump table function pointer from the lpfc_hba struct.
9291 * Return codes the caller owns the mailbox command after the return of the
9295 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
9297 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
9301 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
9302 * @phba: The hba struct for which this call is being executed.
9303 * @dev_grp: The HBA PCI-Device group number.
9305 * This routine sets up the mbox interface API function jump table in @phba
9307 * Returns: 0 - success, -ENODEV - failure.
9310 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9314 case LPFC_PCI_DEV_LP:
9315 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
9316 phba->lpfc_sli_handle_slow_ring_event =
9317 lpfc_sli_handle_slow_ring_event_s3;
9318 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
9319 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
9320 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
9322 case LPFC_PCI_DEV_OC:
9323 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
9324 phba->lpfc_sli_handle_slow_ring_event =
9325 lpfc_sli_handle_slow_ring_event_s4;
9326 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
9327 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
9328 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
9331 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9332 "1420 Invalid HBA PCI-device group: 0x%x\n",
9340 * __lpfc_sli_ringtx_put - Add an iocb to the txq
9341 * @phba: Pointer to HBA context object.
9342 * @pring: Pointer to driver SLI ring object.
9343 * @piocb: Pointer to address of newly added command iocb.
9345 * This function is called with hbalock held for SLI3 ports or
9346 * the ring lock held for SLI4 ports to add a command
9347 * iocb to the txq when SLI layer cannot submit the command iocb
9351 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9352 struct lpfc_iocbq *piocb)
9354 if (phba->sli_rev == LPFC_SLI_REV4)
9355 lockdep_assert_held(&pring->ring_lock);
9357 lockdep_assert_held(&phba->hbalock);
9358 /* Insert the caller's iocb in the txq tail for later processing. */
9359 list_add_tail(&piocb->list, &pring->txq);
9363 * lpfc_sli_next_iocb - Get the next iocb in the txq
9364 * @phba: Pointer to HBA context object.
9365 * @pring: Pointer to driver SLI ring object.
9366 * @piocb: Pointer to address of newly added command iocb.
9368 * This function is called with hbalock held before a new
9369 * iocb is submitted to the firmware. This function checks
9370 * txq to flush the iocbs in txq to Firmware before
9371 * submitting new iocbs to the Firmware.
9372 * If there are iocbs in the txq which need to be submitted
9373 * to firmware, lpfc_sli_next_iocb returns the first element
9374 * of the txq after dequeuing it from txq.
9375 * If there is no iocb in the txq then the function will return
9376 * *piocb and *piocb is set to NULL. Caller needs to check
9377 * *piocb to find if there are more commands in the txq.
9379 static struct lpfc_iocbq *
9380 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9381 struct lpfc_iocbq **piocb)
9383 struct lpfc_iocbq * nextiocb;
9385 lockdep_assert_held(&phba->hbalock);
9387 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9397 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
9398 * @phba: Pointer to HBA context object.
9399 * @ring_number: SLI ring number to issue iocb on.
9400 * @piocb: Pointer to command iocb.
9401 * @flag: Flag indicating if this command can be put into txq.
9403 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9404 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9405 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9406 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9407 * this function allows only iocbs for posting buffers. This function finds
9408 * next available slot in the command ring and posts the command to the
9409 * available slot and writes the port attention register to request HBA start
9410 * processing new iocb. If there is no slot available in the ring and
9411 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9412 * the function returns IOCB_BUSY.
9414 * This function is called with hbalock held. The function will return success
9415 * after it successfully submit the iocb to firmware or after adding to the
9419 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9420 struct lpfc_iocbq *piocb, uint32_t flag)
9422 struct lpfc_iocbq *nextiocb;
9424 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9426 lockdep_assert_held(&phba->hbalock);
9428 if (piocb->iocb_cmpl && (!piocb->vport) &&
9429 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9430 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9431 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9432 "1807 IOCB x%x failed. No vport\n",
9433 piocb->iocb.ulpCommand);
9439 /* If the PCI channel is in offline state, do not post iocbs. */
9440 if (unlikely(pci_channel_offline(phba->pcidev)))
9443 /* If HBA has a deferred error attention, fail the iocb. */
9444 if (unlikely(phba->hba_flag & DEFER_ERATT))
9448 * We should never get an IOCB if we are in a < LINK_DOWN state
9450 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9454 * Check to see if we are blocking IOCB processing because of a
9455 * outstanding event.
9457 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9460 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9462 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
9463 * can be issued if the link is not up.
9465 switch (piocb->iocb.ulpCommand) {
9466 case CMD_GEN_REQUEST64_CR:
9467 case CMD_GEN_REQUEST64_CX:
9468 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9469 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9470 FC_RCTL_DD_UNSOL_CMD) ||
9471 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9472 MENLO_TRANSPORT_TYPE))
9476 case CMD_QUE_RING_BUF_CN:
9477 case CMD_QUE_RING_BUF64_CN:
9479 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9480 * completion, iocb_cmpl MUST be 0.
9482 if (piocb->iocb_cmpl)
9483 piocb->iocb_cmpl = NULL;
9485 case CMD_CREATE_XRI_CR:
9486 case CMD_CLOSE_XRI_CN:
9487 case CMD_CLOSE_XRI_CX:
9494 * For FCP commands, we must be in a state where we can process link
9497 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9498 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9502 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9503 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9504 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9507 lpfc_sli_update_ring(phba, pring);
9509 lpfc_sli_update_full_ring(phba, pring);
9512 return IOCB_SUCCESS;
9517 pring->stats.iocb_cmd_delay++;
9521 if (!(flag & SLI_IOCB_RET_IOCB)) {
9522 __lpfc_sli_ringtx_put(phba, pring, piocb);
9523 return IOCB_SUCCESS;
9530 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9531 * @phba: Pointer to HBA context object.
9532 * @piocbq: Pointer to command iocb.
9533 * @sglq: Pointer to the scatter gather queue object.
9535 * This routine converts the bpl or bde that is in the IOCB
9536 * to a sgl list for the sli4 hardware. The physical address
9537 * of the bpl/bde is converted back to a virtual address.
9538 * If the IOCB contains a BPL then the list of BDE's is
9539 * converted to sli4_sge's. If the IOCB contains a single
9540 * BDE then it is converted to a single sli_sge.
9541 * The IOCB is still in cpu endianess so the contents of
9542 * the bpl can be used without byte swapping.
9544 * Returns valid XRI = Success, NO_XRI = Failure.
9547 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9548 struct lpfc_sglq *sglq)
9550 uint16_t xritag = NO_XRI;
9551 struct ulp_bde64 *bpl = NULL;
9552 struct ulp_bde64 bde;
9553 struct sli4_sge *sgl = NULL;
9554 struct lpfc_dmabuf *dmabuf;
9558 uint32_t offset = 0; /* accumulated offset in the sg request list */
9559 int inbound = 0; /* number of sg reply entries inbound from firmware */
9561 if (!piocbq || !sglq)
9564 sgl = (struct sli4_sge *)sglq->sgl;
9565 icmd = &piocbq->iocb;
9566 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9567 return sglq->sli4_xritag;
9568 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9569 numBdes = icmd->un.genreq64.bdl.bdeSize /
9570 sizeof(struct ulp_bde64);
9571 /* The addrHigh and addrLow fields within the IOCB
9572 * have not been byteswapped yet so there is no
9573 * need to swap them back.
9575 if (piocbq->context3)
9576 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9580 bpl = (struct ulp_bde64 *)dmabuf->virt;
9584 for (i = 0; i < numBdes; i++) {
9585 /* Should already be byte swapped. */
9586 sgl->addr_hi = bpl->addrHigh;
9587 sgl->addr_lo = bpl->addrLow;
9589 sgl->word2 = le32_to_cpu(sgl->word2);
9590 if ((i+1) == numBdes)
9591 bf_set(lpfc_sli4_sge_last, sgl, 1);
9593 bf_set(lpfc_sli4_sge_last, sgl, 0);
9594 /* swap the size field back to the cpu so we
9595 * can assign it to the sgl.
9597 bde.tus.w = le32_to_cpu(bpl->tus.w);
9598 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9599 /* The offsets in the sgl need to be accumulated
9600 * separately for the request and reply lists.
9601 * The request is always first, the reply follows.
9603 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9604 /* add up the reply sg entries */
9605 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9607 /* first inbound? reset the offset */
9610 bf_set(lpfc_sli4_sge_offset, sgl, offset);
9611 bf_set(lpfc_sli4_sge_type, sgl,
9612 LPFC_SGE_TYPE_DATA);
9613 offset += bde.tus.f.bdeSize;
9615 sgl->word2 = cpu_to_le32(sgl->word2);
9619 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9620 /* The addrHigh and addrLow fields of the BDE have not
9621 * been byteswapped yet so they need to be swapped
9622 * before putting them in the sgl.
9625 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9627 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9628 sgl->word2 = le32_to_cpu(sgl->word2);
9629 bf_set(lpfc_sli4_sge_last, sgl, 1);
9630 sgl->word2 = cpu_to_le32(sgl->word2);
9632 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9634 return sglq->sli4_xritag;
9638 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
9639 * @phba: Pointer to HBA context object.
9640 * @iocbq: Pointer to command iocb.
9641 * @wqe: Pointer to the work queue entry.
9643 * This routine converts the iocb command to its Work Queue Entry
9644 * equivalent. The wqe pointer should not have any fields set when
9645 * this routine is called because it will memcpy over them.
9646 * This routine does not set the CQ_ID or the WQEC bits in the
9649 * Returns: 0 = Success, IOCB_ERROR = Failure.
9652 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9653 union lpfc_wqe128 *wqe)
9655 uint32_t xmit_len = 0, total_len = 0;
9659 uint8_t command_type = ELS_COMMAND_NON_FIP;
9662 uint16_t abrt_iotag;
9663 struct lpfc_iocbq *abrtiocbq;
9664 struct ulp_bde64 *bpl = NULL;
9665 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9667 struct ulp_bde64 bde;
9668 struct lpfc_nodelist *ndlp;
9672 fip = phba->hba_flag & HBA_FIP_SUPPORT;
9673 /* The fcp commands will set command type */
9674 if (iocbq->iocb_flag & LPFC_IO_FCP)
9675 command_type = FCP_COMMAND;
9676 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9677 command_type = ELS_COMMAND_FIP;
9679 command_type = ELS_COMMAND_NON_FIP;
9681 if (phba->fcp_embed_io)
9682 memset(wqe, 0, sizeof(union lpfc_wqe128));
9683 /* Some of the fields are in the right position already */
9684 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9685 /* The ct field has moved so reset */
9686 wqe->generic.wqe_com.word7 = 0;
9687 wqe->generic.wqe_com.word10 = 0;
9689 abort_tag = (uint32_t) iocbq->iotag;
9690 xritag = iocbq->sli4_xritag;
9691 /* words0-2 bpl convert bde */
9692 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9693 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9694 sizeof(struct ulp_bde64);
9695 bpl = (struct ulp_bde64 *)
9696 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9700 /* Should already be byte swapped. */
9701 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9702 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9703 /* swap the size field back to the cpu so we
9704 * can assign it to the sgl.
9706 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
9707 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9709 for (i = 0; i < numBdes; i++) {
9710 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9711 total_len += bde.tus.f.bdeSize;
9714 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9716 iocbq->iocb.ulpIoTag = iocbq->iotag;
9717 cmnd = iocbq->iocb.ulpCommand;
9719 switch (iocbq->iocb.ulpCommand) {
9720 case CMD_ELS_REQUEST64_CR:
9721 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9722 ndlp = iocbq->context_un.ndlp;
9724 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9725 if (!iocbq->iocb.ulpLe) {
9726 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9727 "2007 Only Limited Edition cmd Format"
9728 " supported 0x%x\n",
9729 iocbq->iocb.ulpCommand);
9733 wqe->els_req.payload_len = xmit_len;
9734 /* Els_reguest64 has a TMO */
9735 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9736 iocbq->iocb.ulpTimeout);
9737 /* Need a VF for word 4 set the vf bit*/
9738 bf_set(els_req64_vf, &wqe->els_req, 0);
9739 /* And a VFID for word 12 */
9740 bf_set(els_req64_vfid, &wqe->els_req, 0);
9741 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9742 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9743 iocbq->iocb.ulpContext);
9744 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9745 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9746 /* CCP CCPE PV PRI in word10 were set in the memcpy */
9747 if (command_type == ELS_COMMAND_FIP)
9748 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9749 >> LPFC_FIP_ELS_ID_SHIFT);
9750 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9751 iocbq->context2)->virt);
9752 if_type = bf_get(lpfc_sli_intf_if_type,
9753 &phba->sli4_hba.sli_intf);
9754 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9755 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9756 *pcmd == ELS_CMD_SCR ||
9757 *pcmd == ELS_CMD_RDF ||
9758 *pcmd == ELS_CMD_RSCN_XMT ||
9759 *pcmd == ELS_CMD_FDISC ||
9760 *pcmd == ELS_CMD_LOGO ||
9761 *pcmd == ELS_CMD_PLOGI)) {
9762 bf_set(els_req64_sp, &wqe->els_req, 1);
9763 bf_set(els_req64_sid, &wqe->els_req,
9764 iocbq->vport->fc_myDID);
9765 if ((*pcmd == ELS_CMD_FLOGI) &&
9766 !(phba->fc_topology ==
9767 LPFC_TOPOLOGY_LOOP))
9768 bf_set(els_req64_sid, &wqe->els_req, 0);
9769 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9770 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9771 phba->vpi_ids[iocbq->vport->vpi]);
9772 } else if (pcmd && iocbq->context1) {
9773 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9774 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9775 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9778 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9779 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9780 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9781 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9782 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9783 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9784 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9785 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9786 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9788 case CMD_XMIT_SEQUENCE64_CX:
9789 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9790 iocbq->iocb.un.ulpWord[3]);
9791 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9792 iocbq->iocb.unsli3.rcvsli3.ox_id);
9793 /* The entire sequence is transmitted for this IOCB */
9794 xmit_len = total_len;
9795 cmnd = CMD_XMIT_SEQUENCE64_CR;
9796 if (phba->link_flag & LS_LOOPBACK_MODE)
9797 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9799 case CMD_XMIT_SEQUENCE64_CR:
9800 /* word3 iocb=io_tag32 wqe=reserved */
9801 wqe->xmit_sequence.rsvd3 = 0;
9802 /* word4 relative_offset memcpy */
9803 /* word5 r_ctl/df_ctl memcpy */
9804 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9805 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9806 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9807 LPFC_WQE_IOD_WRITE);
9808 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9809 LPFC_WQE_LENLOC_WORD12);
9810 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9811 wqe->xmit_sequence.xmit_len = xmit_len;
9812 command_type = OTHER_COMMAND;
9814 case CMD_XMIT_BCAST64_CN:
9815 /* word3 iocb=iotag32 wqe=seq_payload_len */
9816 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9817 /* word4 iocb=rsvd wqe=rsvd */
9818 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9819 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9820 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9821 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9822 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9823 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9824 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9825 LPFC_WQE_LENLOC_WORD3);
9826 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9828 case CMD_FCP_IWRITE64_CR:
9829 command_type = FCP_COMMAND_DATA_OUT;
9830 /* word3 iocb=iotag wqe=payload_offset_len */
9831 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9832 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9833 xmit_len + sizeof(struct fcp_rsp));
9834 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9836 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9837 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9838 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9839 iocbq->iocb.ulpFCP2Rcvy);
9840 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9841 /* Always open the exchange */
9842 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9843 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9844 LPFC_WQE_LENLOC_WORD4);
9845 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9846 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9847 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9848 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9849 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9850 if (iocbq->priority) {
9851 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9852 (iocbq->priority << 1));
9854 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9855 (phba->cfg_XLanePriority << 1));
9858 /* Note, word 10 is already initialized to 0 */
9860 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9861 if (phba->cfg_enable_pbde)
9862 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9864 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9866 if (phba->fcp_embed_io) {
9867 struct lpfc_io_buf *lpfc_cmd;
9868 struct sli4_sge *sgl;
9869 struct fcp_cmnd *fcp_cmnd;
9872 /* 128 byte wqe support here */
9874 lpfc_cmd = iocbq->context1;
9875 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9876 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9878 /* Word 0-2 - FCP_CMND */
9879 wqe->generic.bde.tus.f.bdeFlags =
9880 BUFF_TYPE_BDE_IMMED;
9881 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9882 wqe->generic.bde.addrHigh = 0;
9883 wqe->generic.bde.addrLow = 88; /* Word 22 */
9885 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9886 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9888 /* Word 22-29 FCP CMND Payload */
9889 ptr = &wqe->words[22];
9890 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9893 case CMD_FCP_IREAD64_CR:
9894 /* word3 iocb=iotag wqe=payload_offset_len */
9895 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9896 bf_set(payload_offset_len, &wqe->fcp_iread,
9897 xmit_len + sizeof(struct fcp_rsp));
9898 bf_set(cmd_buff_len, &wqe->fcp_iread,
9900 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9901 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9902 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9903 iocbq->iocb.ulpFCP2Rcvy);
9904 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9905 /* Always open the exchange */
9906 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9907 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9908 LPFC_WQE_LENLOC_WORD4);
9909 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9910 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9911 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9912 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9913 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9914 if (iocbq->priority) {
9915 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9916 (iocbq->priority << 1));
9918 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9919 (phba->cfg_XLanePriority << 1));
9922 /* Note, word 10 is already initialized to 0 */
9924 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9925 if (phba->cfg_enable_pbde)
9926 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9928 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9930 if (phba->fcp_embed_io) {
9931 struct lpfc_io_buf *lpfc_cmd;
9932 struct sli4_sge *sgl;
9933 struct fcp_cmnd *fcp_cmnd;
9936 /* 128 byte wqe support here */
9938 lpfc_cmd = iocbq->context1;
9939 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9940 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9942 /* Word 0-2 - FCP_CMND */
9943 wqe->generic.bde.tus.f.bdeFlags =
9944 BUFF_TYPE_BDE_IMMED;
9945 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9946 wqe->generic.bde.addrHigh = 0;
9947 wqe->generic.bde.addrLow = 88; /* Word 22 */
9949 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9950 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9952 /* Word 22-29 FCP CMND Payload */
9953 ptr = &wqe->words[22];
9954 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9957 case CMD_FCP_ICMND64_CR:
9958 /* word3 iocb=iotag wqe=payload_offset_len */
9959 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9960 bf_set(payload_offset_len, &wqe->fcp_icmd,
9961 xmit_len + sizeof(struct fcp_rsp));
9962 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9964 /* word3 iocb=IO_TAG wqe=reserved */
9965 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9966 /* Always open the exchange */
9967 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9968 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9969 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9970 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9971 LPFC_WQE_LENLOC_NONE);
9972 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9973 iocbq->iocb.ulpFCP2Rcvy);
9974 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9975 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9976 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9977 if (iocbq->priority) {
9978 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9979 (iocbq->priority << 1));
9981 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9982 (phba->cfg_XLanePriority << 1));
9985 /* Note, word 10 is already initialized to 0 */
9987 if (phba->fcp_embed_io) {
9988 struct lpfc_io_buf *lpfc_cmd;
9989 struct sli4_sge *sgl;
9990 struct fcp_cmnd *fcp_cmnd;
9993 /* 128 byte wqe support here */
9995 lpfc_cmd = iocbq->context1;
9996 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9997 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9999 /* Word 0-2 - FCP_CMND */
10000 wqe->generic.bde.tus.f.bdeFlags =
10001 BUFF_TYPE_BDE_IMMED;
10002 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10003 wqe->generic.bde.addrHigh = 0;
10004 wqe->generic.bde.addrLow = 88; /* Word 22 */
10006 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
10007 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
10009 /* Word 22-29 FCP CMND Payload */
10010 ptr = &wqe->words[22];
10011 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10014 case CMD_GEN_REQUEST64_CR:
10015 /* For this command calculate the xmit length of the
10019 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
10020 sizeof(struct ulp_bde64);
10021 for (i = 0; i < numBdes; i++) {
10022 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
10023 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
10025 xmit_len += bde.tus.f.bdeSize;
10027 /* word3 iocb=IO_TAG wqe=request_payload_len */
10028 wqe->gen_req.request_payload_len = xmit_len;
10029 /* word4 iocb=parameter wqe=relative_offset memcpy */
10030 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
10031 /* word6 context tag copied in memcpy */
10032 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
10033 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
10034 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10035 "2015 Invalid CT %x command 0x%x\n",
10036 ct, iocbq->iocb.ulpCommand);
10039 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
10040 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
10041 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
10042 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
10043 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
10044 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
10045 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
10046 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
10047 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
10048 command_type = OTHER_COMMAND;
10050 case CMD_XMIT_ELS_RSP64_CX:
10051 ndlp = (struct lpfc_nodelist *)iocbq->context1;
10052 /* words0-2 BDE memcpy */
10053 /* word3 iocb=iotag32 wqe=response_payload_len */
10054 wqe->xmit_els_rsp.response_payload_len = xmit_len;
10056 wqe->xmit_els_rsp.word4 = 0;
10057 /* word5 iocb=rsvd wge=did */
10058 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
10059 iocbq->iocb.un.xseq64.xmit_els_remoteID);
10061 if_type = bf_get(lpfc_sli_intf_if_type,
10062 &phba->sli4_hba.sli_intf);
10063 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10064 if (iocbq->vport->fc_flag & FC_PT2PT) {
10065 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
10066 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
10067 iocbq->vport->fc_myDID);
10068 if (iocbq->vport->fc_myDID == Fabric_DID) {
10069 bf_set(wqe_els_did,
10070 &wqe->xmit_els_rsp.wqe_dest, 0);
10074 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
10075 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10076 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
10077 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
10078 iocbq->iocb.unsli3.rcvsli3.ox_id);
10079 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
10080 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
10081 phba->vpi_ids[iocbq->vport->vpi]);
10082 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
10083 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
10084 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
10085 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
10086 LPFC_WQE_LENLOC_WORD3);
10087 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
10088 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
10089 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
10090 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
10091 iocbq->context2)->virt);
10092 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
10093 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
10094 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
10095 iocbq->vport->fc_myDID);
10096 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
10097 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
10098 phba->vpi_ids[phba->pport->vpi]);
10100 command_type = OTHER_COMMAND;
10102 case CMD_CLOSE_XRI_CN:
10103 case CMD_ABORT_XRI_CN:
10104 case CMD_ABORT_XRI_CX:
10105 /* words 0-2 memcpy should be 0 rserved */
10106 /* port will send abts */
10107 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
10108 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
10109 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
10110 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
10114 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
10116 * The link is down, or the command was ELS_FIP
10117 * so the fw does not need to send abts
10120 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
10122 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
10123 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
10124 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
10125 wqe->abort_cmd.rsrvd5 = 0;
10126 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
10127 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10128 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
10130 * The abort handler will send us CMD_ABORT_XRI_CN or
10131 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
10133 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
10134 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
10135 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
10136 LPFC_WQE_LENLOC_NONE);
10137 cmnd = CMD_ABORT_XRI_CX;
10138 command_type = OTHER_COMMAND;
10141 case CMD_XMIT_BLS_RSP64_CX:
10142 ndlp = (struct lpfc_nodelist *)iocbq->context1;
10143 /* As BLS ABTS RSP WQE is very different from other WQEs,
10144 * we re-construct this WQE here based on information in
10145 * iocbq from scratch.
10147 memset(wqe, 0, sizeof(*wqe));
10148 /* OX_ID is invariable to who sent ABTS to CT exchange */
10149 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
10150 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
10151 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
10152 LPFC_ABTS_UNSOL_INT) {
10153 /* ABTS sent by initiator to CT exchange, the
10154 * RX_ID field will be filled with the newly
10155 * allocated responder XRI.
10157 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10158 iocbq->sli4_xritag);
10160 /* ABTS sent by responder to CT exchange, the
10161 * RX_ID field will be filled with the responder
10164 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10165 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
10167 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
10168 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
10171 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
10173 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
10174 iocbq->iocb.ulpContext);
10175 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
10176 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
10177 phba->vpi_ids[phba->pport->vpi]);
10178 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
10179 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
10180 LPFC_WQE_LENLOC_NONE);
10181 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
10182 command_type = OTHER_COMMAND;
10183 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
10184 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
10185 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
10186 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
10187 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
10188 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
10189 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
10193 case CMD_SEND_FRAME:
10194 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
10195 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
10196 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
10197 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
10198 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
10199 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10200 bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
10201 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
10202 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10203 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10204 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10206 case CMD_XRI_ABORTED_CX:
10207 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
10208 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
10209 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
10210 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
10211 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
10213 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10214 "2014 Invalid command 0x%x\n",
10215 iocbq->iocb.ulpCommand);
10219 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
10220 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
10221 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
10222 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
10223 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
10224 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
10225 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
10226 LPFC_IO_DIF_INSERT);
10227 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10228 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10229 wqe->generic.wqe_com.abort_tag = abort_tag;
10230 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
10231 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
10232 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
10233 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10238 * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
10239 * @phba: Pointer to HBA context object.
10240 * @ring_number: SLI ring number to issue wqe on.
10241 * @piocb: Pointer to command iocb.
10242 * @flag: Flag indicating if this command can be put into txq.
10244 * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
10245 * send an iocb command to an HBA with SLI-4 interface spec.
10247 * This function takes the hbalock before invoking the lockless version.
10248 * The function will return success after it successfully submit the wqe to
10249 * firmware or after adding to the txq.
10252 __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10253 struct lpfc_iocbq *piocb, uint32_t flag)
10255 unsigned long iflags;
10258 spin_lock_irqsave(&phba->hbalock, iflags);
10259 rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
10260 spin_unlock_irqrestore(&phba->hbalock, iflags);
10266 * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
10267 * @phba: Pointer to HBA context object.
10268 * @ring_number: SLI ring number to issue wqe on.
10269 * @piocb: Pointer to command iocb.
10270 * @flag: Flag indicating if this command can be put into txq.
10272 * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
10273 * an wqe command to an HBA with SLI-4 interface spec.
10275 * This function is a lockless version. The function will return success
10276 * after it successfully submit the wqe to firmware or after adding to the
10280 __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10281 struct lpfc_iocbq *piocb, uint32_t flag)
10284 struct lpfc_io_buf *lpfc_cmd =
10285 (struct lpfc_io_buf *)piocb->context1;
10286 union lpfc_wqe128 *wqe = &piocb->wqe;
10287 struct sli4_sge *sgl;
10289 /* 128 byte wqe support here */
10290 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10292 if (phba->fcp_embed_io) {
10293 struct fcp_cmnd *fcp_cmnd;
10296 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10298 /* Word 0-2 - FCP_CMND */
10299 wqe->generic.bde.tus.f.bdeFlags =
10300 BUFF_TYPE_BDE_IMMED;
10301 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10302 wqe->generic.bde.addrHigh = 0;
10303 wqe->generic.bde.addrLow = 88; /* Word 22 */
10305 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10306 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10308 /* Word 22-29 FCP CMND Payload */
10309 ptr = &wqe->words[22];
10310 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10312 /* Word 0-2 - Inline BDE */
10313 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
10314 wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
10315 wqe->generic.bde.addrHigh = sgl->addr_hi;
10316 wqe->generic.bde.addrLow = sgl->addr_lo;
10319 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10320 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
10323 rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
10328 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10329 * @phba: Pointer to HBA context object.
10330 * @ring_number: SLI ring number to issue iocb on.
10331 * @piocb: Pointer to command iocb.
10332 * @flag: Flag indicating if this command can be put into txq.
10334 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10335 * an iocb command to an HBA with SLI-4 interface spec.
10337 * This function is called with ringlock held. The function will return success
10338 * after it successfully submit the iocb to firmware or after adding to the
10342 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10343 struct lpfc_iocbq *piocb, uint32_t flag)
10345 struct lpfc_sglq *sglq;
10346 union lpfc_wqe128 wqe;
10347 struct lpfc_queue *wq;
10348 struct lpfc_sli_ring *pring;
10351 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
10352 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10353 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10355 wq = phba->sli4_hba.els_wq;
10358 /* Get corresponding ring */
10362 * The WQE can be either 64 or 128 bytes,
10365 lockdep_assert_held(&pring->ring_lock);
10367 if (piocb->sli4_xritag == NO_XRI) {
10368 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10369 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
10372 if (!list_empty(&pring->txq)) {
10373 if (!(flag & SLI_IOCB_RET_IOCB)) {
10374 __lpfc_sli_ringtx_put(phba,
10376 return IOCB_SUCCESS;
10381 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10383 if (!(flag & SLI_IOCB_RET_IOCB)) {
10384 __lpfc_sli_ringtx_put(phba,
10387 return IOCB_SUCCESS;
10393 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
10394 /* These IO's already have an XRI and a mapped sgl. */
10399 * This is a continuation of a commandi,(CX) so this
10400 * sglq is on the active list
10402 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10408 piocb->sli4_lxritag = sglq->sli4_lxritag;
10409 piocb->sli4_xritag = sglq->sli4_xritag;
10410 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
10414 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
10417 if (lpfc_sli4_wq_put(wq, &wqe))
10419 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10425 * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
10427 * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
10428 * or IOCB for sli-3 function.
10429 * pointer from the lpfc_hba struct.
10432 * IOCB_ERROR - Error
10433 * IOCB_SUCCESS - Success
10437 lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
10438 struct lpfc_iocbq *piocb, uint32_t flag)
10440 return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
10444 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10446 * This routine wraps the actual lockless version for issusing IOCB function
10447 * pointer from the lpfc_hba struct.
10450 * IOCB_ERROR - Error
10451 * IOCB_SUCCESS - Success
10455 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10456 struct lpfc_iocbq *piocb, uint32_t flag)
10458 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10462 * lpfc_sli_api_table_setup - Set up sli api function jump table
10463 * @phba: The hba struct for which this call is being executed.
10464 * @dev_grp: The HBA PCI-Device group number.
10466 * This routine sets up the SLI interface API function jump table in @phba
10468 * Returns: 0 - success, -ENODEV - failure.
10471 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10475 case LPFC_PCI_DEV_LP:
10476 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10477 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10478 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
10480 case LPFC_PCI_DEV_OC:
10481 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10482 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10483 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
10486 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10487 "1419 Invalid HBA PCI-device group: 0x%x\n",
10491 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10496 * lpfc_sli4_calc_ring - Calculates which ring to use
10497 * @phba: Pointer to HBA context object.
10498 * @piocb: Pointer to command iocb.
10500 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10501 * hba_wqidx, thus we need to calculate the corresponding ring.
10502 * Since ABORTS must go on the same WQ of the command they are
10503 * aborting, we use command's hba_wqidx.
10505 struct lpfc_sli_ring *
10506 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10508 struct lpfc_io_buf *lpfc_cmd;
10510 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10511 if (unlikely(!phba->sli4_hba.hdwq))
10514 * for abort iocb hba_wqidx should already
10515 * be setup based on what work queue we used.
10517 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10518 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10519 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10521 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
10523 if (unlikely(!phba->sli4_hba.els_wq))
10525 piocb->hba_wqidx = 0;
10526 return phba->sli4_hba.els_wq->pring;
10531 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10532 * @phba: Pointer to HBA context object.
10533 * @ring_number: Ring number
10534 * @piocb: Pointer to command iocb.
10535 * @flag: Flag indicating if this command can be put into txq.
10537 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10538 * function. This function gets the hbalock and calls
10539 * __lpfc_sli_issue_iocb function and will return the error returned
10540 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10541 * functions which do not hold hbalock.
10544 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10545 struct lpfc_iocbq *piocb, uint32_t flag)
10547 struct lpfc_sli_ring *pring;
10548 struct lpfc_queue *eq;
10549 unsigned long iflags;
10552 if (phba->sli_rev == LPFC_SLI_REV4) {
10553 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
10555 pring = lpfc_sli4_calc_ring(phba, piocb);
10556 if (unlikely(pring == NULL))
10559 spin_lock_irqsave(&pring->ring_lock, iflags);
10560 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10561 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10563 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
10565 /* For now, SLI2/3 will still use hbalock */
10566 spin_lock_irqsave(&phba->hbalock, iflags);
10567 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10568 spin_unlock_irqrestore(&phba->hbalock, iflags);
10574 * lpfc_extra_ring_setup - Extra ring setup function
10575 * @phba: Pointer to HBA context object.
10577 * This function is called while driver attaches with the
10578 * HBA to setup the extra ring. The extra ring is used
10579 * only when driver needs to support target mode functionality
10580 * or IP over FC functionalities.
10582 * This function is called with no lock held. SLI3 only.
10585 lpfc_extra_ring_setup( struct lpfc_hba *phba)
10587 struct lpfc_sli *psli;
10588 struct lpfc_sli_ring *pring;
10592 /* Adjust cmd/rsp ring iocb entries more evenly */
10594 /* Take some away from the FCP ring */
10595 pring = &psli->sli3_ring[LPFC_FCP_RING];
10596 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10597 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10598 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10599 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10601 /* and give them to the extra ring */
10602 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10604 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10605 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10606 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10607 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10609 /* Setup default profile for this ring */
10610 pring->iotag_max = 4096;
10611 pring->num_mask = 1;
10612 pring->prt[0].profile = 0; /* Mask 0 */
10613 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10614 pring->prt[0].type = phba->cfg_multi_ring_type;
10615 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10620 lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
10621 struct lpfc_nodelist *ndlp)
10623 unsigned long iflags;
10624 struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
10626 spin_lock_irqsave(&phba->hbalock, iflags);
10627 if (!list_empty(&evtp->evt_listp)) {
10628 spin_unlock_irqrestore(&phba->hbalock, iflags);
10632 /* Incrementing the reference count until the queued work is done. */
10633 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
10634 if (!evtp->evt_arg1) {
10635 spin_unlock_irqrestore(&phba->hbalock, iflags);
10638 evtp->evt = LPFC_EVT_RECOVER_PORT;
10639 list_add_tail(&evtp->evt_listp, &phba->work_list);
10640 spin_unlock_irqrestore(&phba->hbalock, iflags);
10642 lpfc_worker_wake_up(phba);
10645 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10646 * @phba: Pointer to HBA context object.
10647 * @iocbq: Pointer to iocb object.
10649 * The async_event handler calls this routine when it receives
10650 * an ASYNC_STATUS_CN event from the port. The port generates
10651 * this event when an Abort Sequence request to an rport fails
10652 * twice in succession. The abort could be originated by the
10653 * driver or by the port. The ABTS could have been for an ELS
10654 * or FCP IO. The port only generates this event when an ABTS
10655 * fails to complete after one retry.
10658 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10659 struct lpfc_iocbq *iocbq)
10661 struct lpfc_nodelist *ndlp = NULL;
10662 uint16_t rpi = 0, vpi = 0;
10663 struct lpfc_vport *vport = NULL;
10665 /* The rpi in the ulpContext is vport-sensitive. */
10666 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10667 rpi = iocbq->iocb.ulpContext;
10669 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10670 "3092 Port generated ABTS async event "
10671 "on vpi %d rpi %d status 0x%x\n",
10672 vpi, rpi, iocbq->iocb.ulpStatus);
10674 vport = lpfc_find_vport_by_vpid(phba, vpi);
10677 ndlp = lpfc_findnode_rpi(vport, rpi);
10681 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10682 lpfc_sli_abts_recover_port(vport, ndlp);
10686 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10687 "3095 Event Context not found, no "
10688 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10689 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10693 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10694 * @phba: pointer to HBA context object.
10695 * @ndlp: nodelist pointer for the impacted rport.
10696 * @axri: pointer to the wcqe containing the failed exchange.
10698 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10699 * port. The port generates this event when an abort exchange request to an
10700 * rport fails twice in succession with no reply. The abort could be originated
10701 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10704 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10705 struct lpfc_nodelist *ndlp,
10706 struct sli4_wcqe_xri_aborted *axri)
10708 uint32_t ext_status = 0;
10711 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10712 "3115 Node Context not found, driver "
10713 "ignoring abts err event\n");
10717 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10718 "3116 Port generated FCP XRI ABORT event on "
10719 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10720 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10721 bf_get(lpfc_wcqe_xa_xri, axri),
10722 bf_get(lpfc_wcqe_xa_status, axri),
10726 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10727 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10728 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10730 ext_status = axri->parameter & IOERR_PARAM_MASK;
10731 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10732 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10733 lpfc_sli_post_recovery_event(phba, ndlp);
10737 * lpfc_sli_async_event_handler - ASYNC iocb handler function
10738 * @phba: Pointer to HBA context object.
10739 * @pring: Pointer to driver SLI ring object.
10740 * @iocbq: Pointer to iocb object.
10742 * This function is called by the slow ring event handler
10743 * function when there is an ASYNC event iocb in the ring.
10744 * This function is called with no lock held.
10745 * Currently this function handles only temperature related
10746 * ASYNC events. The function decodes the temperature sensor
10747 * event message and posts events for the management applications.
10750 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10751 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10755 struct temp_event temp_event_data;
10756 struct Scsi_Host *shost;
10759 icmd = &iocbq->iocb;
10760 evt_code = icmd->un.asyncstat.evt_code;
10762 switch (evt_code) {
10763 case ASYNC_TEMP_WARN:
10764 case ASYNC_TEMP_SAFE:
10765 temp_event_data.data = (uint32_t) icmd->ulpContext;
10766 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10767 if (evt_code == ASYNC_TEMP_WARN) {
10768 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10769 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10770 "0347 Adapter is very hot, please take "
10771 "corrective action. temperature : %d Celsius\n",
10772 (uint32_t) icmd->ulpContext);
10774 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10775 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10776 "0340 Adapter temperature is OK now. "
10777 "temperature : %d Celsius\n",
10778 (uint32_t) icmd->ulpContext);
10781 /* Send temperature change event to applications */
10782 shost = lpfc_shost_from_vport(phba->pport);
10783 fc_host_post_vendor_event(shost, fc_get_event_number(),
10784 sizeof(temp_event_data), (char *) &temp_event_data,
10785 LPFC_NL_VENDOR_ID);
10787 case ASYNC_STATUS_CN:
10788 lpfc_sli_abts_err_handler(phba, iocbq);
10791 iocb_w = (uint32_t *) icmd;
10792 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10793 "0346 Ring %d handler: unexpected ASYNC_STATUS"
10795 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10796 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10797 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10798 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10799 pring->ringno, icmd->un.asyncstat.evt_code,
10800 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10801 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10802 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10803 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10811 * lpfc_sli4_setup - SLI ring setup function
10812 * @phba: Pointer to HBA context object.
10814 * lpfc_sli_setup sets up rings of the SLI interface with
10815 * number of iocbs per ring and iotags. This function is
10816 * called while driver attach to the HBA and before the
10817 * interrupts are enabled. So there is no need for locking.
10819 * This function always returns 0.
10822 lpfc_sli4_setup(struct lpfc_hba *phba)
10824 struct lpfc_sli_ring *pring;
10826 pring = phba->sli4_hba.els_wq->pring;
10827 pring->num_mask = LPFC_MAX_RING_MASK;
10828 pring->prt[0].profile = 0; /* Mask 0 */
10829 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10830 pring->prt[0].type = FC_TYPE_ELS;
10831 pring->prt[0].lpfc_sli_rcv_unsol_event =
10832 lpfc_els_unsol_event;
10833 pring->prt[1].profile = 0; /* Mask 1 */
10834 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10835 pring->prt[1].type = FC_TYPE_ELS;
10836 pring->prt[1].lpfc_sli_rcv_unsol_event =
10837 lpfc_els_unsol_event;
10838 pring->prt[2].profile = 0; /* Mask 2 */
10839 /* NameServer Inquiry */
10840 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10842 pring->prt[2].type = FC_TYPE_CT;
10843 pring->prt[2].lpfc_sli_rcv_unsol_event =
10844 lpfc_ct_unsol_event;
10845 pring->prt[3].profile = 0; /* Mask 3 */
10846 /* NameServer response */
10847 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10849 pring->prt[3].type = FC_TYPE_CT;
10850 pring->prt[3].lpfc_sli_rcv_unsol_event =
10851 lpfc_ct_unsol_event;
10856 * lpfc_sli_setup - SLI ring setup function
10857 * @phba: Pointer to HBA context object.
10859 * lpfc_sli_setup sets up rings of the SLI interface with
10860 * number of iocbs per ring and iotags. This function is
10861 * called while driver attach to the HBA and before the
10862 * interrupts are enabled. So there is no need for locking.
10864 * This function always returns 0. SLI3 only.
10867 lpfc_sli_setup(struct lpfc_hba *phba)
10869 int i, totiocbsize = 0;
10870 struct lpfc_sli *psli = &phba->sli;
10871 struct lpfc_sli_ring *pring;
10873 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10874 psli->sli_flag = 0;
10876 psli->iocbq_lookup = NULL;
10877 psli->iocbq_lookup_len = 0;
10878 psli->last_iotag = 0;
10880 for (i = 0; i < psli->num_rings; i++) {
10881 pring = &psli->sli3_ring[i];
10883 case LPFC_FCP_RING: /* ring 0 - FCP */
10884 /* numCiocb and numRiocb are used in config_port */
10885 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10886 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10887 pring->sli.sli3.numCiocb +=
10888 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10889 pring->sli.sli3.numRiocb +=
10890 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10891 pring->sli.sli3.numCiocb +=
10892 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10893 pring->sli.sli3.numRiocb +=
10894 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10895 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10896 SLI3_IOCB_CMD_SIZE :
10897 SLI2_IOCB_CMD_SIZE;
10898 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10899 SLI3_IOCB_RSP_SIZE :
10900 SLI2_IOCB_RSP_SIZE;
10901 pring->iotag_ctr = 0;
10903 (phba->cfg_hba_queue_depth * 2);
10904 pring->fast_iotag = pring->iotag_max;
10905 pring->num_mask = 0;
10907 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
10908 /* numCiocb and numRiocb are used in config_port */
10909 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10910 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10911 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10912 SLI3_IOCB_CMD_SIZE :
10913 SLI2_IOCB_CMD_SIZE;
10914 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10915 SLI3_IOCB_RSP_SIZE :
10916 SLI2_IOCB_RSP_SIZE;
10917 pring->iotag_max = phba->cfg_hba_queue_depth;
10918 pring->num_mask = 0;
10920 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10921 /* numCiocb and numRiocb are used in config_port */
10922 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10923 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10924 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10925 SLI3_IOCB_CMD_SIZE :
10926 SLI2_IOCB_CMD_SIZE;
10927 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10928 SLI3_IOCB_RSP_SIZE :
10929 SLI2_IOCB_RSP_SIZE;
10930 pring->fast_iotag = 0;
10931 pring->iotag_ctr = 0;
10932 pring->iotag_max = 4096;
10933 pring->lpfc_sli_rcv_async_status =
10934 lpfc_sli_async_event_handler;
10935 pring->num_mask = LPFC_MAX_RING_MASK;
10936 pring->prt[0].profile = 0; /* Mask 0 */
10937 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10938 pring->prt[0].type = FC_TYPE_ELS;
10939 pring->prt[0].lpfc_sli_rcv_unsol_event =
10940 lpfc_els_unsol_event;
10941 pring->prt[1].profile = 0; /* Mask 1 */
10942 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10943 pring->prt[1].type = FC_TYPE_ELS;
10944 pring->prt[1].lpfc_sli_rcv_unsol_event =
10945 lpfc_els_unsol_event;
10946 pring->prt[2].profile = 0; /* Mask 2 */
10947 /* NameServer Inquiry */
10948 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10950 pring->prt[2].type = FC_TYPE_CT;
10951 pring->prt[2].lpfc_sli_rcv_unsol_event =
10952 lpfc_ct_unsol_event;
10953 pring->prt[3].profile = 0; /* Mask 3 */
10954 /* NameServer response */
10955 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10957 pring->prt[3].type = FC_TYPE_CT;
10958 pring->prt[3].lpfc_sli_rcv_unsol_event =
10959 lpfc_ct_unsol_event;
10962 totiocbsize += (pring->sli.sli3.numCiocb *
10963 pring->sli.sli3.sizeCiocb) +
10964 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10966 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10967 /* Too many cmd / rsp ring entries in SLI2 SLIM */
10968 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10969 "SLI2 SLIM Data: x%x x%lx\n",
10970 phba->brd_no, totiocbsize,
10971 (unsigned long) MAX_SLIM_IOCB_SIZE);
10973 if (phba->cfg_multi_ring_support == 2)
10974 lpfc_extra_ring_setup(phba);
10980 * lpfc_sli4_queue_init - Queue initialization function
10981 * @phba: Pointer to HBA context object.
10983 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
10984 * ring. This function also initializes ring indices of each ring.
10985 * This function is called during the initialization of the SLI
10986 * interface of an HBA.
10987 * This function is called with no lock held and always returns
10991 lpfc_sli4_queue_init(struct lpfc_hba *phba)
10993 struct lpfc_sli *psli;
10994 struct lpfc_sli_ring *pring;
10998 spin_lock_irq(&phba->hbalock);
10999 INIT_LIST_HEAD(&psli->mboxq);
11000 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11001 /* Initialize list headers for txq and txcmplq as double linked lists */
11002 for (i = 0; i < phba->cfg_hdw_queue; i++) {
11003 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
11005 pring->ringno = LPFC_FCP_RING;
11006 pring->txcmplq_cnt = 0;
11007 INIT_LIST_HEAD(&pring->txq);
11008 INIT_LIST_HEAD(&pring->txcmplq);
11009 INIT_LIST_HEAD(&pring->iocb_continueq);
11010 spin_lock_init(&pring->ring_lock);
11012 pring = phba->sli4_hba.els_wq->pring;
11014 pring->ringno = LPFC_ELS_RING;
11015 pring->txcmplq_cnt = 0;
11016 INIT_LIST_HEAD(&pring->txq);
11017 INIT_LIST_HEAD(&pring->txcmplq);
11018 INIT_LIST_HEAD(&pring->iocb_continueq);
11019 spin_lock_init(&pring->ring_lock);
11021 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11022 pring = phba->sli4_hba.nvmels_wq->pring;
11024 pring->ringno = LPFC_ELS_RING;
11025 pring->txcmplq_cnt = 0;
11026 INIT_LIST_HEAD(&pring->txq);
11027 INIT_LIST_HEAD(&pring->txcmplq);
11028 INIT_LIST_HEAD(&pring->iocb_continueq);
11029 spin_lock_init(&pring->ring_lock);
11032 spin_unlock_irq(&phba->hbalock);
11036 * lpfc_sli_queue_init - Queue initialization function
11037 * @phba: Pointer to HBA context object.
11039 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
11040 * ring. This function also initializes ring indices of each ring.
11041 * This function is called during the initialization of the SLI
11042 * interface of an HBA.
11043 * This function is called with no lock held and always returns
11047 lpfc_sli_queue_init(struct lpfc_hba *phba)
11049 struct lpfc_sli *psli;
11050 struct lpfc_sli_ring *pring;
11054 spin_lock_irq(&phba->hbalock);
11055 INIT_LIST_HEAD(&psli->mboxq);
11056 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11057 /* Initialize list headers for txq and txcmplq as double linked lists */
11058 for (i = 0; i < psli->num_rings; i++) {
11059 pring = &psli->sli3_ring[i];
11061 pring->sli.sli3.next_cmdidx = 0;
11062 pring->sli.sli3.local_getidx = 0;
11063 pring->sli.sli3.cmdidx = 0;
11064 INIT_LIST_HEAD(&pring->iocb_continueq);
11065 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
11066 INIT_LIST_HEAD(&pring->postbufq);
11068 INIT_LIST_HEAD(&pring->txq);
11069 INIT_LIST_HEAD(&pring->txcmplq);
11070 spin_lock_init(&pring->ring_lock);
11072 spin_unlock_irq(&phba->hbalock);
11076 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
11077 * @phba: Pointer to HBA context object.
11079 * This routine flushes the mailbox command subsystem. It will unconditionally
11080 * flush all the mailbox commands in the three possible stages in the mailbox
11081 * command sub-system: pending mailbox command queue; the outstanding mailbox
11082 * command; and completed mailbox command queue. It is caller's responsibility
11083 * to make sure that the driver is in the proper state to flush the mailbox
11084 * command sub-system. Namely, the posting of mailbox commands into the
11085 * pending mailbox command queue from the various clients must be stopped;
11086 * either the HBA is in a state that it will never works on the outstanding
11087 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
11088 * mailbox command has been completed.
11091 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11093 LIST_HEAD(completions);
11094 struct lpfc_sli *psli = &phba->sli;
11096 unsigned long iflag;
11098 /* Disable softirqs, including timers from obtaining phba->hbalock */
11099 local_bh_disable();
11101 /* Flush all the mailbox commands in the mbox system */
11102 spin_lock_irqsave(&phba->hbalock, iflag);
11104 /* The pending mailbox command queue */
11105 list_splice_init(&phba->sli.mboxq, &completions);
11106 /* The outstanding active mailbox command */
11107 if (psli->mbox_active) {
11108 list_add_tail(&psli->mbox_active->list, &completions);
11109 psli->mbox_active = NULL;
11110 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11112 /* The completed mailbox command queue */
11113 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11114 spin_unlock_irqrestore(&phba->hbalock, iflag);
11116 /* Enable softirqs again, done with phba->hbalock */
11119 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
11120 while (!list_empty(&completions)) {
11121 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11122 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11123 if (pmb->mbox_cmpl)
11124 pmb->mbox_cmpl(phba, pmb);
11129 * lpfc_sli_host_down - Vport cleanup function
11130 * @vport: Pointer to virtual port object.
11132 * lpfc_sli_host_down is called to clean up the resources
11133 * associated with a vport before destroying virtual
11134 * port data structures.
11135 * This function does following operations:
11136 * - Free discovery resources associated with this virtual
11138 * - Free iocbs associated with this virtual port in
11140 * - Send abort for all iocb commands associated with this
11141 * vport in txcmplq.
11143 * This function is called with no lock held and always returns 1.
11146 lpfc_sli_host_down(struct lpfc_vport *vport)
11148 LIST_HEAD(completions);
11149 struct lpfc_hba *phba = vport->phba;
11150 struct lpfc_sli *psli = &phba->sli;
11151 struct lpfc_queue *qp = NULL;
11152 struct lpfc_sli_ring *pring;
11153 struct lpfc_iocbq *iocb, *next_iocb;
11155 unsigned long flags = 0;
11156 uint16_t prev_pring_flag;
11158 lpfc_cleanup_discovery_resources(vport);
11160 spin_lock_irqsave(&phba->hbalock, flags);
11163 * Error everything on the txq since these iocbs
11164 * have not been given to the FW yet.
11165 * Also issue ABTS for everything on the txcmplq
11167 if (phba->sli_rev != LPFC_SLI_REV4) {
11168 for (i = 0; i < psli->num_rings; i++) {
11169 pring = &psli->sli3_ring[i];
11170 prev_pring_flag = pring->flag;
11171 /* Only slow rings */
11172 if (pring->ringno == LPFC_ELS_RING) {
11173 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11174 /* Set the lpfc data pending flag */
11175 set_bit(LPFC_DATA_READY, &phba->data_flags);
11177 list_for_each_entry_safe(iocb, next_iocb,
11178 &pring->txq, list) {
11179 if (iocb->vport != vport)
11181 list_move_tail(&iocb->list, &completions);
11183 list_for_each_entry_safe(iocb, next_iocb,
11184 &pring->txcmplq, list) {
11185 if (iocb->vport != vport)
11187 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11190 pring->flag = prev_pring_flag;
11193 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11197 if (pring == phba->sli4_hba.els_wq->pring) {
11198 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11199 /* Set the lpfc data pending flag */
11200 set_bit(LPFC_DATA_READY, &phba->data_flags);
11202 prev_pring_flag = pring->flag;
11203 spin_lock(&pring->ring_lock);
11204 list_for_each_entry_safe(iocb, next_iocb,
11205 &pring->txq, list) {
11206 if (iocb->vport != vport)
11208 list_move_tail(&iocb->list, &completions);
11210 spin_unlock(&pring->ring_lock);
11211 list_for_each_entry_safe(iocb, next_iocb,
11212 &pring->txcmplq, list) {
11213 if (iocb->vport != vport)
11215 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11218 pring->flag = prev_pring_flag;
11221 spin_unlock_irqrestore(&phba->hbalock, flags);
11223 /* Make sure HBA is alive */
11224 lpfc_issue_hb_tmo(phba);
11226 /* Cancel all the IOCBs from the completions list */
11227 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11233 * lpfc_sli_hba_down - Resource cleanup function for the HBA
11234 * @phba: Pointer to HBA context object.
11236 * This function cleans up all iocb, buffers, mailbox commands
11237 * while shutting down the HBA. This function is called with no
11238 * lock held and always returns 1.
11239 * This function does the following to cleanup driver resources:
11240 * - Free discovery resources for each virtual port
11241 * - Cleanup any pending fabric iocbs
11242 * - Iterate through the iocb txq and free each entry
11244 * - Free up any buffer posted to the HBA
11245 * - Free mailbox commands in the mailbox queue.
11248 lpfc_sli_hba_down(struct lpfc_hba *phba)
11250 LIST_HEAD(completions);
11251 struct lpfc_sli *psli = &phba->sli;
11252 struct lpfc_queue *qp = NULL;
11253 struct lpfc_sli_ring *pring;
11254 struct lpfc_dmabuf *buf_ptr;
11255 unsigned long flags = 0;
11258 /* Shutdown the mailbox command sub-system */
11259 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
11261 lpfc_hba_down_prep(phba);
11263 /* Disable softirqs, including timers from obtaining phba->hbalock */
11264 local_bh_disable();
11266 lpfc_fabric_abort_hba(phba);
11268 spin_lock_irqsave(&phba->hbalock, flags);
11271 * Error everything on the txq since these iocbs
11272 * have not been given to the FW yet.
11274 if (phba->sli_rev != LPFC_SLI_REV4) {
11275 for (i = 0; i < psli->num_rings; i++) {
11276 pring = &psli->sli3_ring[i];
11277 /* Only slow rings */
11278 if (pring->ringno == LPFC_ELS_RING) {
11279 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11280 /* Set the lpfc data pending flag */
11281 set_bit(LPFC_DATA_READY, &phba->data_flags);
11283 list_splice_init(&pring->txq, &completions);
11286 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11290 spin_lock(&pring->ring_lock);
11291 list_splice_init(&pring->txq, &completions);
11292 spin_unlock(&pring->ring_lock);
11293 if (pring == phba->sli4_hba.els_wq->pring) {
11294 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11295 /* Set the lpfc data pending flag */
11296 set_bit(LPFC_DATA_READY, &phba->data_flags);
11300 spin_unlock_irqrestore(&phba->hbalock, flags);
11302 /* Cancel all the IOCBs from the completions list */
11303 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11306 spin_lock_irqsave(&phba->hbalock, flags);
11307 list_splice_init(&phba->elsbuf, &completions);
11308 phba->elsbuf_cnt = 0;
11309 phba->elsbuf_prev_cnt = 0;
11310 spin_unlock_irqrestore(&phba->hbalock, flags);
11312 while (!list_empty(&completions)) {
11313 list_remove_head(&completions, buf_ptr,
11314 struct lpfc_dmabuf, list);
11315 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
11319 /* Enable softirqs again, done with phba->hbalock */
11322 /* Return any active mbox cmds */
11323 del_timer_sync(&psli->mbox_tmo);
11325 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
11326 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11327 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
11333 * lpfc_sli_pcimem_bcopy - SLI memory copy function
11334 * @srcp: Source memory pointer.
11335 * @destp: Destination memory pointer.
11336 * @cnt: Number of words required to be copied.
11338 * This function is used for copying data between driver memory
11339 * and the SLI memory. This function also changes the endianness
11340 * of each word if native endianness is different from SLI
11341 * endianness. This function can be called with or without
11345 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
11347 uint32_t *src = srcp;
11348 uint32_t *dest = destp;
11352 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
11354 ldata = le32_to_cpu(ldata);
11363 * lpfc_sli_bemem_bcopy - SLI memory copy function
11364 * @srcp: Source memory pointer.
11365 * @destp: Destination memory pointer.
11366 * @cnt: Number of words required to be copied.
11368 * This function is used for copying data between a data structure
11369 * with big endian representation to local endianness.
11370 * This function can be called with or without lock.
11373 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
11375 uint32_t *src = srcp;
11376 uint32_t *dest = destp;
11380 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
11382 ldata = be32_to_cpu(ldata);
11390 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
11391 * @phba: Pointer to HBA context object.
11392 * @pring: Pointer to driver SLI ring object.
11393 * @mp: Pointer to driver buffer object.
11395 * This function is called with no lock held.
11396 * It always return zero after adding the buffer to the postbufq
11400 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11401 struct lpfc_dmabuf *mp)
11403 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
11405 spin_lock_irq(&phba->hbalock);
11406 list_add_tail(&mp->list, &pring->postbufq);
11407 pring->postbufq_cnt++;
11408 spin_unlock_irq(&phba->hbalock);
11413 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
11414 * @phba: Pointer to HBA context object.
11416 * When HBQ is enabled, buffers are searched based on tags. This function
11417 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
11418 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
11419 * does not conflict with tags of buffer posted for unsolicited events.
11420 * The function returns the allocated tag. The function is called with
11424 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
11426 spin_lock_irq(&phba->hbalock);
11427 phba->buffer_tag_count++;
11429 * Always set the QUE_BUFTAG_BIT to distiguish between
11430 * a tag assigned by HBQ.
11432 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
11433 spin_unlock_irq(&phba->hbalock);
11434 return phba->buffer_tag_count;
11438 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
11439 * @phba: Pointer to HBA context object.
11440 * @pring: Pointer to driver SLI ring object.
11441 * @tag: Buffer tag.
11443 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
11444 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
11445 * iocb is posted to the response ring with the tag of the buffer.
11446 * This function searches the pring->postbufq list using the tag
11447 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
11448 * iocb. If the buffer is found then lpfc_dmabuf object of the
11449 * buffer is returned to the caller else NULL is returned.
11450 * This function is called with no lock held.
11452 struct lpfc_dmabuf *
11453 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11456 struct lpfc_dmabuf *mp, *next_mp;
11457 struct list_head *slp = &pring->postbufq;
11459 /* Search postbufq, from the beginning, looking for a match on tag */
11460 spin_lock_irq(&phba->hbalock);
11461 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11462 if (mp->buffer_tag == tag) {
11463 list_del_init(&mp->list);
11464 pring->postbufq_cnt--;
11465 spin_unlock_irq(&phba->hbalock);
11470 spin_unlock_irq(&phba->hbalock);
11471 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11472 "0402 Cannot find virtual addr for buffer tag on "
11473 "ring %d Data x%lx x%px x%px x%x\n",
11474 pring->ringno, (unsigned long) tag,
11475 slp->next, slp->prev, pring->postbufq_cnt);
11481 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
11482 * @phba: Pointer to HBA context object.
11483 * @pring: Pointer to driver SLI ring object.
11484 * @phys: DMA address of the buffer.
11486 * This function searches the buffer list using the dma_address
11487 * of unsolicited event to find the driver's lpfc_dmabuf object
11488 * corresponding to the dma_address. The function returns the
11489 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11490 * This function is called by the ct and els unsolicited event
11491 * handlers to get the buffer associated with the unsolicited
11494 * This function is called with no lock held.
11496 struct lpfc_dmabuf *
11497 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11500 struct lpfc_dmabuf *mp, *next_mp;
11501 struct list_head *slp = &pring->postbufq;
11503 /* Search postbufq, from the beginning, looking for a match on phys */
11504 spin_lock_irq(&phba->hbalock);
11505 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11506 if (mp->phys == phys) {
11507 list_del_init(&mp->list);
11508 pring->postbufq_cnt--;
11509 spin_unlock_irq(&phba->hbalock);
11514 spin_unlock_irq(&phba->hbalock);
11515 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11516 "0410 Cannot find virtual addr for mapped buf on "
11517 "ring %d Data x%llx x%px x%px x%x\n",
11518 pring->ringno, (unsigned long long)phys,
11519 slp->next, slp->prev, pring->postbufq_cnt);
11524 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
11525 * @phba: Pointer to HBA context object.
11526 * @cmdiocb: Pointer to driver command iocb object.
11527 * @rspiocb: Pointer to driver response iocb object.
11529 * This function is the completion handler for the abort iocbs for
11530 * ELS commands. This function is called from the ELS ring event
11531 * handler with no lock held. This function frees memory resources
11532 * associated with the abort iocb.
11535 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11536 struct lpfc_iocbq *rspiocb)
11538 IOCB_t *irsp = &rspiocb->iocb;
11539 uint16_t abort_iotag, abort_context;
11540 struct lpfc_iocbq *abort_iocb = NULL;
11542 if (irsp->ulpStatus) {
11545 * Assume that the port already completed and returned, or
11546 * will return the iocb. Just Log the message.
11548 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11549 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11551 spin_lock_irq(&phba->hbalock);
11552 if (phba->sli_rev < LPFC_SLI_REV4) {
11553 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11554 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11555 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11556 spin_unlock_irq(&phba->hbalock);
11559 if (abort_iotag != 0 &&
11560 abort_iotag <= phba->sli.last_iotag)
11562 phba->sli.iocbq_lookup[abort_iotag];
11564 /* For sli4 the abort_tag is the XRI,
11565 * so the abort routine puts the iotag of the iocb
11566 * being aborted in the context field of the abort
11569 abort_iocb = phba->sli.iocbq_lookup[abort_context];
11571 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11572 "0327 Cannot abort els iocb x%px "
11573 "with tag %x context %x, abort status %x, "
11575 abort_iocb, abort_iotag, abort_context,
11576 irsp->ulpStatus, irsp->un.ulpWord[4]);
11578 spin_unlock_irq(&phba->hbalock);
11581 lpfc_sli_release_iocbq(phba, cmdiocb);
11586 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
11587 * @phba: Pointer to HBA context object.
11588 * @cmdiocb: Pointer to driver command iocb object.
11589 * @rspiocb: Pointer to driver response iocb object.
11591 * The function is called from SLI ring event handler with no
11592 * lock held. This function is the completion handler for ELS commands
11593 * which are aborted. The function frees memory resources used for
11594 * the aborted ELS commands.
11597 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11598 struct lpfc_iocbq *rspiocb)
11600 IOCB_t *irsp = &rspiocb->iocb;
11602 /* ELS cmd tag <ulpIoTag> completes */
11603 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11604 "0139 Ignoring ELS cmd tag x%x completion Data: "
11606 irsp->ulpIoTag, irsp->ulpStatus,
11607 irsp->un.ulpWord[4], irsp->ulpTimeout);
11608 lpfc_nlp_put((struct lpfc_nodelist *)cmdiocb->context1);
11609 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11610 lpfc_ct_free_iocb(phba, cmdiocb);
11612 lpfc_els_free_iocb(phba, cmdiocb);
11616 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11617 * @phba: Pointer to HBA context object.
11618 * @pring: Pointer to driver SLI ring object.
11619 * @cmdiocb: Pointer to driver command iocb object.
11620 * @cmpl: completion function.
11622 * This function issues an abort iocb for the provided command iocb. In case
11623 * of unloading, the abort iocb will not be issued to commands on the ELS
11624 * ring. Instead, the callback function shall be changed to those commands
11625 * so that nothing happens when them finishes. This function is called with
11626 * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
11627 * when the command iocb is an abort request.
11631 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11632 struct lpfc_iocbq *cmdiocb, void *cmpl)
11634 struct lpfc_vport *vport = cmdiocb->vport;
11635 struct lpfc_iocbq *abtsiocbp;
11636 IOCB_t *icmd = NULL;
11637 IOCB_t *iabt = NULL;
11638 int retval = IOCB_ERROR;
11639 unsigned long iflags;
11640 struct lpfc_nodelist *ndlp;
11643 * There are certain command types we don't want to abort. And we
11644 * don't want to abort commands that are already in the process of
11647 icmd = &cmdiocb->iocb;
11648 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11649 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11650 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11651 return IOCB_ABORTING;
11654 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11655 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11657 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11662 * If we're unloading, don't abort iocb on the ELS ring, but change
11663 * the callback so that nothing happens when it finishes.
11665 if ((vport->load_flag & FC_UNLOADING) &&
11666 pring->ringno == LPFC_ELS_RING) {
11667 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11668 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11670 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11674 /* issue ABTS for this IOCB based on iotag */
11675 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11676 if (abtsiocbp == NULL)
11677 return IOCB_NORESOURCE;
11679 /* This signals the response to set the correct status
11680 * before calling the completion handler
11682 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11684 iabt = &abtsiocbp->iocb;
11685 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11686 iabt->un.acxri.abortContextTag = icmd->ulpContext;
11687 if (phba->sli_rev == LPFC_SLI_REV4) {
11688 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11689 if (pring->ringno == LPFC_ELS_RING)
11690 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11692 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11693 if (pring->ringno == LPFC_ELS_RING) {
11694 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11695 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11699 iabt->ulpClass = icmd->ulpClass;
11701 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11702 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11703 if (cmdiocb->iocb_flag & LPFC_IO_FCP) {
11704 abtsiocbp->iocb_flag |= LPFC_IO_FCP;
11705 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11707 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11708 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11710 if (phba->link_state >= LPFC_LINK_UP)
11711 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11713 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11716 abtsiocbp->iocb_cmpl = cmpl;
11718 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11719 abtsiocbp->vport = vport;
11721 if (phba->sli_rev == LPFC_SLI_REV4) {
11722 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11723 if (unlikely(pring == NULL))
11724 goto abort_iotag_exit;
11725 /* Note: both hbalock and ring_lock need to be set here */
11726 spin_lock_irqsave(&pring->ring_lock, iflags);
11727 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11729 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11731 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11737 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11738 "0339 Abort xri x%x, original iotag x%x, "
11739 "abort cmd iotag x%x retval x%x\n",
11740 iabt->un.acxri.abortIoTag,
11741 iabt->un.acxri.abortContextTag,
11742 abtsiocbp->iotag, retval);
11745 cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
11746 __lpfc_sli_release_iocbq(phba, abtsiocbp);
11750 * Caller to this routine should check for IOCB_ERROR
11751 * and handle it properly. This routine no longer removes
11752 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11758 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11759 * @phba: pointer to lpfc HBA data structure.
11761 * This routine will abort all pending and outstanding iocbs to an HBA.
11764 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11766 struct lpfc_sli *psli = &phba->sli;
11767 struct lpfc_sli_ring *pring;
11768 struct lpfc_queue *qp = NULL;
11771 if (phba->sli_rev != LPFC_SLI_REV4) {
11772 for (i = 0; i < psli->num_rings; i++) {
11773 pring = &psli->sli3_ring[i];
11774 lpfc_sli_abort_iocb_ring(phba, pring);
11778 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11782 lpfc_sli_abort_iocb_ring(phba, pring);
11787 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11788 * @iocbq: Pointer to driver iocb object.
11789 * @vport: Pointer to driver virtual port object.
11790 * @tgt_id: SCSI ID of the target.
11791 * @lun_id: LUN ID of the scsi device.
11792 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11794 * This function acts as an iocb filter for functions which abort or count
11795 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11796 * 0 if the filtering criteria is met for the given iocb and will return
11797 * 1 if the filtering criteria is not met.
11798 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11799 * given iocb is for the SCSI device specified by vport, tgt_id and
11800 * lun_id parameter.
11801 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11802 * given iocb is for the SCSI target specified by vport and tgt_id
11804 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11805 * given iocb is for the SCSI host associated with the given vport.
11806 * This function is called with no locks held.
11809 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11810 uint16_t tgt_id, uint64_t lun_id,
11811 lpfc_ctx_cmd ctx_cmd)
11813 struct lpfc_io_buf *lpfc_cmd;
11816 if (!iocbq || iocbq->vport != vport)
11819 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11820 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
11823 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11825 if (lpfc_cmd->pCmd == NULL)
11830 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11831 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11832 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11836 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11837 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11840 case LPFC_CTX_HOST:
11844 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11845 __func__, ctx_cmd);
11853 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11854 * @vport: Pointer to virtual port.
11855 * @tgt_id: SCSI ID of the target.
11856 * @lun_id: LUN ID of the scsi device.
11857 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11859 * This function returns number of FCP commands pending for the vport.
11860 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11861 * commands pending on the vport associated with SCSI device specified
11862 * by tgt_id and lun_id parameters.
11863 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11864 * commands pending on the vport associated with SCSI target specified
11865 * by tgt_id parameter.
11866 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11867 * commands pending on the vport.
11868 * This function returns the number of iocbs which satisfy the filter.
11869 * This function is called without any lock held.
11872 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11873 lpfc_ctx_cmd ctx_cmd)
11875 struct lpfc_hba *phba = vport->phba;
11876 struct lpfc_iocbq *iocbq;
11879 spin_lock_irq(&phba->hbalock);
11880 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11881 iocbq = phba->sli.iocbq_lookup[i];
11883 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11887 spin_unlock_irq(&phba->hbalock);
11893 * lpfc_sli4_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11894 * @phba: Pointer to HBA context object
11895 * @cmdiocb: Pointer to command iocb object.
11896 * @wcqe: pointer to the complete wcqe
11898 * This function is called when an aborted FCP iocb completes. This
11899 * function is called by the ring event handler with no lock held.
11900 * This function frees the iocb. It is called for sli-4 adapters.
11903 lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11904 struct lpfc_wcqe_complete *wcqe)
11906 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11907 "3017 ABORT_XRI_CN completing on rpi x%x "
11908 "original iotag x%x, abort cmd iotag x%x "
11909 "status 0x%x, reason 0x%x\n",
11910 cmdiocb->iocb.un.acxri.abortContextTag,
11911 cmdiocb->iocb.un.acxri.abortIoTag,
11913 (bf_get(lpfc_wcqe_c_status, wcqe)
11914 & LPFC_IOCB_STATUS_MASK),
11916 lpfc_sli_release_iocbq(phba, cmdiocb);
11920 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11921 * @phba: Pointer to HBA context object
11922 * @cmdiocb: Pointer to command iocb object.
11923 * @rspiocb: Pointer to response iocb object.
11925 * This function is called when an aborted FCP iocb completes. This
11926 * function is called by the ring event handler with no lock held.
11927 * This function frees the iocb.
11930 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11931 struct lpfc_iocbq *rspiocb)
11933 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11934 "3096 ABORT_XRI_CN completing on rpi x%x "
11935 "original iotag x%x, abort cmd iotag x%x "
11936 "status 0x%x, reason 0x%x\n",
11937 cmdiocb->iocb.un.acxri.abortContextTag,
11938 cmdiocb->iocb.un.acxri.abortIoTag,
11939 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11940 rspiocb->iocb.un.ulpWord[4]);
11941 lpfc_sli_release_iocbq(phba, cmdiocb);
11946 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11947 * @vport: Pointer to virtual port.
11948 * @pring: Pointer to driver SLI ring object.
11949 * @tgt_id: SCSI ID of the target.
11950 * @lun_id: LUN ID of the scsi device.
11951 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11953 * This function sends an abort command for every SCSI command
11954 * associated with the given virtual port pending on the ring
11955 * filtered by lpfc_sli_validate_fcp_iocb function.
11956 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11957 * FCP iocbs associated with lun specified by tgt_id and lun_id
11959 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11960 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11961 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11962 * FCP iocbs associated with virtual port.
11963 * This function returns number of iocbs it failed to abort.
11964 * This function is called with no locks held.
11967 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11968 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11970 struct lpfc_hba *phba = vport->phba;
11971 struct lpfc_iocbq *iocbq;
11972 int errcnt = 0, ret_val = 0;
11973 unsigned long iflags;
11976 /* all I/Os are in process of being flushed */
11977 if (phba->hba_flag & HBA_IOQ_FLUSH)
11980 for (i = 1; i <= phba->sli.last_iotag; i++) {
11981 iocbq = phba->sli.iocbq_lookup[i];
11983 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11987 spin_lock_irqsave(&phba->hbalock, iflags);
11988 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
11989 lpfc_sli_abort_fcp_cmpl);
11990 spin_unlock_irqrestore(&phba->hbalock, iflags);
11991 if (ret_val != IOCB_SUCCESS)
11999 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
12000 * @vport: Pointer to virtual port.
12001 * @pring: Pointer to driver SLI ring object.
12002 * @tgt_id: SCSI ID of the target.
12003 * @lun_id: LUN ID of the scsi device.
12004 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12006 * This function sends an abort command for every SCSI command
12007 * associated with the given virtual port pending on the ring
12008 * filtered by lpfc_sli_validate_fcp_iocb function.
12009 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
12010 * FCP iocbs associated with lun specified by tgt_id and lun_id
12012 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
12013 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12014 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
12015 * FCP iocbs associated with virtual port.
12016 * This function returns number of iocbs it aborted .
12017 * This function is called with no locks held right after a taskmgmt
12021 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12022 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12024 struct lpfc_hba *phba = vport->phba;
12025 struct lpfc_io_buf *lpfc_cmd;
12026 struct lpfc_iocbq *abtsiocbq;
12027 struct lpfc_nodelist *ndlp;
12028 struct lpfc_iocbq *iocbq;
12030 int sum, i, ret_val;
12031 unsigned long iflags;
12032 struct lpfc_sli_ring *pring_s4 = NULL;
12034 spin_lock_irqsave(&phba->hbalock, iflags);
12036 /* all I/Os are in process of being flushed */
12037 if (phba->hba_flag & HBA_IOQ_FLUSH) {
12038 spin_unlock_irqrestore(&phba->hbalock, iflags);
12043 for (i = 1; i <= phba->sli.last_iotag; i++) {
12044 iocbq = phba->sli.iocbq_lookup[i];
12046 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12050 /* Guard against IO completion being called at same time */
12051 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12052 spin_lock(&lpfc_cmd->buf_lock);
12054 if (!lpfc_cmd->pCmd) {
12055 spin_unlock(&lpfc_cmd->buf_lock);
12059 if (phba->sli_rev == LPFC_SLI_REV4) {
12061 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
12063 spin_unlock(&lpfc_cmd->buf_lock);
12066 /* Note: both hbalock and ring_lock must be set here */
12067 spin_lock(&pring_s4->ring_lock);
12071 * If the iocbq is already being aborted, don't take a second
12072 * action, but do count it.
12074 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
12075 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
12076 if (phba->sli_rev == LPFC_SLI_REV4)
12077 spin_unlock(&pring_s4->ring_lock);
12078 spin_unlock(&lpfc_cmd->buf_lock);
12082 /* issue ABTS for this IOCB based on iotag */
12083 abtsiocbq = __lpfc_sli_get_iocbq(phba);
12085 if (phba->sli_rev == LPFC_SLI_REV4)
12086 spin_unlock(&pring_s4->ring_lock);
12087 spin_unlock(&lpfc_cmd->buf_lock);
12091 icmd = &iocbq->iocb;
12092 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
12093 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
12094 if (phba->sli_rev == LPFC_SLI_REV4)
12095 abtsiocbq->iocb.un.acxri.abortIoTag =
12096 iocbq->sli4_xritag;
12098 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
12099 abtsiocbq->iocb.ulpLe = 1;
12100 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
12101 abtsiocbq->vport = vport;
12103 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12104 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
12105 if (iocbq->iocb_flag & LPFC_IO_FCP)
12106 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
12107 if (iocbq->iocb_flag & LPFC_IO_FOF)
12108 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
12110 ndlp = lpfc_cmd->rdata->pnode;
12112 if (lpfc_is_link_up(phba) &&
12113 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
12114 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
12116 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
12118 /* Setup callback routine and issue the command. */
12119 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
12122 * Indicate the IO is being aborted by the driver and set
12123 * the caller's flag into the aborted IO.
12125 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
12127 if (phba->sli_rev == LPFC_SLI_REV4) {
12128 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12130 spin_unlock(&pring_s4->ring_lock);
12132 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12136 spin_unlock(&lpfc_cmd->buf_lock);
12138 if (ret_val == IOCB_ERROR)
12139 __lpfc_sli_release_iocbq(phba, abtsiocbq);
12143 spin_unlock_irqrestore(&phba->hbalock, iflags);
12148 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
12149 * @phba: Pointer to HBA context object.
12150 * @cmdiocbq: Pointer to command iocb.
12151 * @rspiocbq: Pointer to response iocb.
12153 * This function is the completion handler for iocbs issued using
12154 * lpfc_sli_issue_iocb_wait function. This function is called by the
12155 * ring event handler function without any lock held. This function
12156 * can be called from both worker thread context and interrupt
12157 * context. This function also can be called from other thread which
12158 * cleans up the SLI layer objects.
12159 * This function copy the contents of the response iocb to the
12160 * response iocb memory object provided by the caller of
12161 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
12162 * sleeps for the iocb completion.
12165 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
12166 struct lpfc_iocbq *cmdiocbq,
12167 struct lpfc_iocbq *rspiocbq)
12169 wait_queue_head_t *pdone_q;
12170 unsigned long iflags;
12171 struct lpfc_io_buf *lpfc_cmd;
12173 spin_lock_irqsave(&phba->hbalock, iflags);
12174 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
12177 * A time out has occurred for the iocb. If a time out
12178 * completion handler has been supplied, call it. Otherwise,
12179 * just free the iocbq.
12182 spin_unlock_irqrestore(&phba->hbalock, iflags);
12183 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
12184 cmdiocbq->wait_iocb_cmpl = NULL;
12185 if (cmdiocbq->iocb_cmpl)
12186 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
12188 lpfc_sli_release_iocbq(phba, cmdiocbq);
12192 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
12193 if (cmdiocbq->context2 && rspiocbq)
12194 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
12195 &rspiocbq->iocb, sizeof(IOCB_t));
12197 /* Set the exchange busy flag for task management commands */
12198 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
12199 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
12200 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
12202 if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
12203 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
12205 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
12208 pdone_q = cmdiocbq->context_un.wait_queue;
12211 spin_unlock_irqrestore(&phba->hbalock, iflags);
12216 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
12217 * @phba: Pointer to HBA context object..
12218 * @piocbq: Pointer to command iocb.
12219 * @flag: Flag to test.
12221 * This routine grabs the hbalock and then test the iocb_flag to
12222 * see if the passed in flag is set.
12224 * 1 if flag is set.
12225 * 0 if flag is not set.
12228 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
12229 struct lpfc_iocbq *piocbq, uint32_t flag)
12231 unsigned long iflags;
12234 spin_lock_irqsave(&phba->hbalock, iflags);
12235 ret = piocbq->iocb_flag & flag;
12236 spin_unlock_irqrestore(&phba->hbalock, iflags);
12242 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
12243 * @phba: Pointer to HBA context object..
12244 * @ring_number: Ring number
12245 * @piocb: Pointer to command iocb.
12246 * @prspiocbq: Pointer to response iocb.
12247 * @timeout: Timeout in number of seconds.
12249 * This function issues the iocb to firmware and waits for the
12250 * iocb to complete. The iocb_cmpl field of the shall be used
12251 * to handle iocbs which time out. If the field is NULL, the
12252 * function shall free the iocbq structure. If more clean up is
12253 * needed, the caller is expected to provide a completion function
12254 * that will provide the needed clean up. If the iocb command is
12255 * not completed within timeout seconds, the function will either
12256 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
12257 * completion function set in the iocb_cmpl field and then return
12258 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
12259 * resources if this function returns IOCB_TIMEDOUT.
12260 * The function waits for the iocb completion using an
12261 * non-interruptible wait.
12262 * This function will sleep while waiting for iocb completion.
12263 * So, this function should not be called from any context which
12264 * does not allow sleeping. Due to the same reason, this function
12265 * cannot be called with interrupt disabled.
12266 * This function assumes that the iocb completions occur while
12267 * this function sleep. So, this function cannot be called from
12268 * the thread which process iocb completion for this ring.
12269 * This function clears the iocb_flag of the iocb object before
12270 * issuing the iocb and the iocb completion handler sets this
12271 * flag and wakes this thread when the iocb completes.
12272 * The contents of the response iocb will be copied to prspiocbq
12273 * by the completion handler when the command completes.
12274 * This function returns IOCB_SUCCESS when success.
12275 * This function is called with no lock held.
12278 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
12279 uint32_t ring_number,
12280 struct lpfc_iocbq *piocb,
12281 struct lpfc_iocbq *prspiocbq,
12284 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
12285 long timeleft, timeout_req = 0;
12286 int retval = IOCB_SUCCESS;
12288 struct lpfc_iocbq *iocb;
12290 int txcmplq_cnt = 0;
12291 struct lpfc_sli_ring *pring;
12292 unsigned long iflags;
12293 bool iocb_completed = true;
12295 if (phba->sli_rev >= LPFC_SLI_REV4)
12296 pring = lpfc_sli4_calc_ring(phba, piocb);
12298 pring = &phba->sli.sli3_ring[ring_number];
12300 * If the caller has provided a response iocbq buffer, then context2
12301 * is NULL or its an error.
12304 if (piocb->context2)
12306 piocb->context2 = prspiocbq;
12309 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
12310 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
12311 piocb->context_un.wait_queue = &done_q;
12312 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
12314 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12315 if (lpfc_readl(phba->HCregaddr, &creg_val))
12317 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
12318 writel(creg_val, phba->HCregaddr);
12319 readl(phba->HCregaddr); /* flush */
12322 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
12323 SLI_IOCB_RET_IOCB);
12324 if (retval == IOCB_SUCCESS) {
12325 timeout_req = msecs_to_jiffies(timeout * 1000);
12326 timeleft = wait_event_timeout(done_q,
12327 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
12329 spin_lock_irqsave(&phba->hbalock, iflags);
12330 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
12333 * IOCB timed out. Inform the wake iocb wait
12334 * completion function and set local status
12337 iocb_completed = false;
12338 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
12340 spin_unlock_irqrestore(&phba->hbalock, iflags);
12341 if (iocb_completed) {
12342 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12343 "0331 IOCB wake signaled\n");
12344 /* Note: we are not indicating if the IOCB has a success
12345 * status or not - that's for the caller to check.
12346 * IOCB_SUCCESS means just that the command was sent and
12347 * completed. Not that it completed successfully.
12349 } else if (timeleft == 0) {
12350 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12351 "0338 IOCB wait timeout error - no "
12352 "wake response Data x%x\n", timeout);
12353 retval = IOCB_TIMEDOUT;
12355 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12356 "0330 IOCB wake NOT set, "
12358 timeout, (timeleft / jiffies));
12359 retval = IOCB_TIMEDOUT;
12361 } else if (retval == IOCB_BUSY) {
12362 if (phba->cfg_log_verbose & LOG_SLI) {
12363 list_for_each_entry(iocb, &pring->txq, list) {
12366 list_for_each_entry(iocb, &pring->txcmplq, list) {
12369 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12370 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12371 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12375 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12376 "0332 IOCB wait issue failed, Data x%x\n",
12378 retval = IOCB_ERROR;
12381 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12382 if (lpfc_readl(phba->HCregaddr, &creg_val))
12384 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12385 writel(creg_val, phba->HCregaddr);
12386 readl(phba->HCregaddr); /* flush */
12390 piocb->context2 = NULL;
12392 piocb->context_un.wait_queue = NULL;
12393 piocb->iocb_cmpl = NULL;
12398 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
12399 * @phba: Pointer to HBA context object.
12400 * @pmboxq: Pointer to driver mailbox object.
12401 * @timeout: Timeout in number of seconds.
12403 * This function issues the mailbox to firmware and waits for the
12404 * mailbox command to complete. If the mailbox command is not
12405 * completed within timeout seconds, it returns MBX_TIMEOUT.
12406 * The function waits for the mailbox completion using an
12407 * interruptible wait. If the thread is woken up due to a
12408 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12409 * should not free the mailbox resources, if this function returns
12411 * This function will sleep while waiting for mailbox completion.
12412 * So, this function should not be called from any context which
12413 * does not allow sleeping. Due to the same reason, this function
12414 * cannot be called with interrupt disabled.
12415 * This function assumes that the mailbox completion occurs while
12416 * this function sleep. So, this function cannot be called from
12417 * the worker thread which processes mailbox completion.
12418 * This function is called in the context of HBA management
12420 * This function returns MBX_SUCCESS when successful.
12421 * This function is called with no lock held.
12424 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
12427 struct completion mbox_done;
12429 unsigned long flag;
12431 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12432 /* setup wake call as IOCB callback */
12433 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12435 /* setup context3 field to pass wait_queue pointer to wake function */
12436 init_completion(&mbox_done);
12437 pmboxq->context3 = &mbox_done;
12438 /* now issue the command */
12439 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12440 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
12441 wait_for_completion_timeout(&mbox_done,
12442 msecs_to_jiffies(timeout * 1000));
12444 spin_lock_irqsave(&phba->hbalock, flag);
12445 pmboxq->context3 = NULL;
12447 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12448 * else do not free the resources.
12450 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12451 retval = MBX_SUCCESS;
12453 retval = MBX_TIMEOUT;
12454 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12456 spin_unlock_irqrestore(&phba->hbalock, flag);
12462 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
12463 * @phba: Pointer to HBA context.
12464 * @mbx_action: Mailbox shutdown options.
12466 * This function is called to shutdown the driver's mailbox sub-system.
12467 * It first marks the mailbox sub-system is in a block state to prevent
12468 * the asynchronous mailbox command from issued off the pending mailbox
12469 * command queue. If the mailbox command sub-system shutdown is due to
12470 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12471 * the mailbox sub-system flush routine to forcefully bring down the
12472 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12473 * as with offline or HBA function reset), this routine will wait for the
12474 * outstanding mailbox command to complete before invoking the mailbox
12475 * sub-system flush routine to gracefully bring down mailbox sub-system.
12478 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12480 struct lpfc_sli *psli = &phba->sli;
12481 unsigned long timeout;
12483 if (mbx_action == LPFC_MBX_NO_WAIT) {
12484 /* delay 100ms for port state */
12486 lpfc_sli_mbox_sys_flush(phba);
12489 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12491 /* Disable softirqs, including timers from obtaining phba->hbalock */
12492 local_bh_disable();
12494 spin_lock_irq(&phba->hbalock);
12495 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12497 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12498 /* Determine how long we might wait for the active mailbox
12499 * command to be gracefully completed by firmware.
12501 if (phba->sli.mbox_active)
12502 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12503 phba->sli.mbox_active) *
12505 spin_unlock_irq(&phba->hbalock);
12507 /* Enable softirqs again, done with phba->hbalock */
12510 while (phba->sli.mbox_active) {
12511 /* Check active mailbox complete status every 2ms */
12513 if (time_after(jiffies, timeout))
12514 /* Timeout, let the mailbox flush routine to
12515 * forcefully release active mailbox command
12520 spin_unlock_irq(&phba->hbalock);
12522 /* Enable softirqs again, done with phba->hbalock */
12526 lpfc_sli_mbox_sys_flush(phba);
12530 * lpfc_sli_eratt_read - read sli-3 error attention events
12531 * @phba: Pointer to HBA context.
12533 * This function is called to read the SLI3 device error attention registers
12534 * for possible error attention events. The caller must hold the hostlock
12535 * with spin_lock_irq().
12537 * This function returns 1 when there is Error Attention in the Host Attention
12538 * Register and returns 0 otherwise.
12541 lpfc_sli_eratt_read(struct lpfc_hba *phba)
12545 /* Read chip Host Attention (HA) register */
12546 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12549 if (ha_copy & HA_ERATT) {
12550 /* Read host status register to retrieve error event */
12551 if (lpfc_sli_read_hs(phba))
12554 /* Check if there is a deferred error condition is active */
12555 if ((HS_FFER1 & phba->work_hs) &&
12556 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12557 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12558 phba->hba_flag |= DEFER_ERATT;
12559 /* Clear all interrupt enable conditions */
12560 writel(0, phba->HCregaddr);
12561 readl(phba->HCregaddr);
12564 /* Set the driver HA work bitmap */
12565 phba->work_ha |= HA_ERATT;
12566 /* Indicate polling handles this ERATT */
12567 phba->hba_flag |= HBA_ERATT_HANDLED;
12573 /* Set the driver HS work bitmap */
12574 phba->work_hs |= UNPLUG_ERR;
12575 /* Set the driver HA work bitmap */
12576 phba->work_ha |= HA_ERATT;
12577 /* Indicate polling handles this ERATT */
12578 phba->hba_flag |= HBA_ERATT_HANDLED;
12583 * lpfc_sli4_eratt_read - read sli-4 error attention events
12584 * @phba: Pointer to HBA context.
12586 * This function is called to read the SLI4 device error attention registers
12587 * for possible error attention events. The caller must hold the hostlock
12588 * with spin_lock_irq().
12590 * This function returns 1 when there is Error Attention in the Host Attention
12591 * Register and returns 0 otherwise.
12594 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12596 uint32_t uerr_sta_hi, uerr_sta_lo;
12597 uint32_t if_type, portsmphr;
12598 struct lpfc_register portstat_reg;
12601 * For now, use the SLI4 device internal unrecoverable error
12602 * registers for error attention. This can be changed later.
12604 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12606 case LPFC_SLI_INTF_IF_TYPE_0:
12607 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12609 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12611 phba->work_hs |= UNPLUG_ERR;
12612 phba->work_ha |= HA_ERATT;
12613 phba->hba_flag |= HBA_ERATT_HANDLED;
12616 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12617 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12618 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12619 "1423 HBA Unrecoverable error: "
12620 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12621 "ue_mask_lo_reg=0x%x, "
12622 "ue_mask_hi_reg=0x%x\n",
12623 uerr_sta_lo, uerr_sta_hi,
12624 phba->sli4_hba.ue_mask_lo,
12625 phba->sli4_hba.ue_mask_hi);
12626 phba->work_status[0] = uerr_sta_lo;
12627 phba->work_status[1] = uerr_sta_hi;
12628 phba->work_ha |= HA_ERATT;
12629 phba->hba_flag |= HBA_ERATT_HANDLED;
12633 case LPFC_SLI_INTF_IF_TYPE_2:
12634 case LPFC_SLI_INTF_IF_TYPE_6:
12635 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12636 &portstat_reg.word0) ||
12637 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12639 phba->work_hs |= UNPLUG_ERR;
12640 phba->work_ha |= HA_ERATT;
12641 phba->hba_flag |= HBA_ERATT_HANDLED;
12644 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12645 phba->work_status[0] =
12646 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12647 phba->work_status[1] =
12648 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12649 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12650 "2885 Port Status Event: "
12651 "port status reg 0x%x, "
12652 "port smphr reg 0x%x, "
12653 "error 1=0x%x, error 2=0x%x\n",
12654 portstat_reg.word0,
12656 phba->work_status[0],
12657 phba->work_status[1]);
12658 phba->work_ha |= HA_ERATT;
12659 phba->hba_flag |= HBA_ERATT_HANDLED;
12663 case LPFC_SLI_INTF_IF_TYPE_1:
12665 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12666 "2886 HBA Error Attention on unsupported "
12667 "if type %d.", if_type);
12675 * lpfc_sli_check_eratt - check error attention events
12676 * @phba: Pointer to HBA context.
12678 * This function is called from timer soft interrupt context to check HBA's
12679 * error attention register bit for error attention events.
12681 * This function returns 1 when there is Error Attention in the Host Attention
12682 * Register and returns 0 otherwise.
12685 lpfc_sli_check_eratt(struct lpfc_hba *phba)
12689 /* If somebody is waiting to handle an eratt, don't process it
12690 * here. The brdkill function will do this.
12692 if (phba->link_flag & LS_IGNORE_ERATT)
12695 /* Check if interrupt handler handles this ERATT */
12696 spin_lock_irq(&phba->hbalock);
12697 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12698 /* Interrupt handler has handled ERATT */
12699 spin_unlock_irq(&phba->hbalock);
12704 * If there is deferred error attention, do not check for error
12707 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12708 spin_unlock_irq(&phba->hbalock);
12712 /* If PCI channel is offline, don't process it */
12713 if (unlikely(pci_channel_offline(phba->pcidev))) {
12714 spin_unlock_irq(&phba->hbalock);
12718 switch (phba->sli_rev) {
12719 case LPFC_SLI_REV2:
12720 case LPFC_SLI_REV3:
12721 /* Read chip Host Attention (HA) register */
12722 ha_copy = lpfc_sli_eratt_read(phba);
12724 case LPFC_SLI_REV4:
12725 /* Read device Uncoverable Error (UERR) registers */
12726 ha_copy = lpfc_sli4_eratt_read(phba);
12729 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12730 "0299 Invalid SLI revision (%d)\n",
12735 spin_unlock_irq(&phba->hbalock);
12741 * lpfc_intr_state_check - Check device state for interrupt handling
12742 * @phba: Pointer to HBA context.
12744 * This inline routine checks whether a device or its PCI slot is in a state
12745 * that the interrupt should be handled.
12747 * This function returns 0 if the device or the PCI slot is in a state that
12748 * interrupt should be handled, otherwise -EIO.
12751 lpfc_intr_state_check(struct lpfc_hba *phba)
12753 /* If the pci channel is offline, ignore all the interrupts */
12754 if (unlikely(pci_channel_offline(phba->pcidev)))
12757 /* Update device level interrupt statistics */
12758 phba->sli.slistat.sli_intr++;
12760 /* Ignore all interrupts during initialization. */
12761 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12768 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
12769 * @irq: Interrupt number.
12770 * @dev_id: The device context pointer.
12772 * This function is directly called from the PCI layer as an interrupt
12773 * service routine when device with SLI-3 interface spec is enabled with
12774 * MSI-X multi-message interrupt mode and there are slow-path events in
12775 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12776 * interrupt mode, this function is called as part of the device-level
12777 * interrupt handler. When the PCI slot is in error recovery or the HBA
12778 * is undergoing initialization, the interrupt handler will not process
12779 * the interrupt. The link attention and ELS ring attention events are
12780 * handled by the worker thread. The interrupt handler signals the worker
12781 * thread and returns for these events. This function is called without
12782 * any lock held. It gets the hbalock to access and update SLI data
12785 * This function returns IRQ_HANDLED when interrupt is handled else it
12786 * returns IRQ_NONE.
12789 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12791 struct lpfc_hba *phba;
12792 uint32_t ha_copy, hc_copy;
12793 uint32_t work_ha_copy;
12794 unsigned long status;
12795 unsigned long iflag;
12798 MAILBOX_t *mbox, *pmbox;
12799 struct lpfc_vport *vport;
12800 struct lpfc_nodelist *ndlp;
12801 struct lpfc_dmabuf *mp;
12806 * Get the driver's phba structure from the dev_id and
12807 * assume the HBA is not interrupting.
12809 phba = (struct lpfc_hba *)dev_id;
12811 if (unlikely(!phba))
12815 * Stuff needs to be attented to when this function is invoked as an
12816 * individual interrupt handler in MSI-X multi-message interrupt mode
12818 if (phba->intr_type == MSIX) {
12819 /* Check device state for handling interrupt */
12820 if (lpfc_intr_state_check(phba))
12822 /* Need to read HA REG for slow-path events */
12823 spin_lock_irqsave(&phba->hbalock, iflag);
12824 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12826 /* If somebody is waiting to handle an eratt don't process it
12827 * here. The brdkill function will do this.
12829 if (phba->link_flag & LS_IGNORE_ERATT)
12830 ha_copy &= ~HA_ERATT;
12831 /* Check the need for handling ERATT in interrupt handler */
12832 if (ha_copy & HA_ERATT) {
12833 if (phba->hba_flag & HBA_ERATT_HANDLED)
12834 /* ERATT polling has handled ERATT */
12835 ha_copy &= ~HA_ERATT;
12837 /* Indicate interrupt handler handles ERATT */
12838 phba->hba_flag |= HBA_ERATT_HANDLED;
12842 * If there is deferred error attention, do not check for any
12845 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12846 spin_unlock_irqrestore(&phba->hbalock, iflag);
12850 /* Clear up only attention source related to slow-path */
12851 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12854 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12855 HC_LAINT_ENA | HC_ERINT_ENA),
12857 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12859 writel(hc_copy, phba->HCregaddr);
12860 readl(phba->HAregaddr); /* flush */
12861 spin_unlock_irqrestore(&phba->hbalock, iflag);
12863 ha_copy = phba->ha_copy;
12865 work_ha_copy = ha_copy & phba->work_ha_mask;
12867 if (work_ha_copy) {
12868 if (work_ha_copy & HA_LATT) {
12869 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12871 * Turn off Link Attention interrupts
12872 * until CLEAR_LA done
12874 spin_lock_irqsave(&phba->hbalock, iflag);
12875 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12876 if (lpfc_readl(phba->HCregaddr, &control))
12878 control &= ~HC_LAINT_ENA;
12879 writel(control, phba->HCregaddr);
12880 readl(phba->HCregaddr); /* flush */
12881 spin_unlock_irqrestore(&phba->hbalock, iflag);
12884 work_ha_copy &= ~HA_LATT;
12887 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12889 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12890 * the only slow ring.
12892 status = (work_ha_copy &
12893 (HA_RXMASK << (4*LPFC_ELS_RING)));
12894 status >>= (4*LPFC_ELS_RING);
12895 if (status & HA_RXMASK) {
12896 spin_lock_irqsave(&phba->hbalock, iflag);
12897 if (lpfc_readl(phba->HCregaddr, &control))
12900 lpfc_debugfs_slow_ring_trc(phba,
12901 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12903 (uint32_t)phba->sli.slistat.sli_intr);
12905 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12906 lpfc_debugfs_slow_ring_trc(phba,
12907 "ISR Disable ring:"
12908 "pwork:x%x hawork:x%x wait:x%x",
12909 phba->work_ha, work_ha_copy,
12910 (uint32_t)((unsigned long)
12911 &phba->work_waitq));
12914 ~(HC_R0INT_ENA << LPFC_ELS_RING);
12915 writel(control, phba->HCregaddr);
12916 readl(phba->HCregaddr); /* flush */
12919 lpfc_debugfs_slow_ring_trc(phba,
12920 "ISR slow ring: pwork:"
12921 "x%x hawork:x%x wait:x%x",
12922 phba->work_ha, work_ha_copy,
12923 (uint32_t)((unsigned long)
12924 &phba->work_waitq));
12926 spin_unlock_irqrestore(&phba->hbalock, iflag);
12929 spin_lock_irqsave(&phba->hbalock, iflag);
12930 if (work_ha_copy & HA_ERATT) {
12931 if (lpfc_sli_read_hs(phba))
12934 * Check if there is a deferred error condition
12937 if ((HS_FFER1 & phba->work_hs) &&
12938 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12939 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12941 phba->hba_flag |= DEFER_ERATT;
12942 /* Clear all interrupt enable conditions */
12943 writel(0, phba->HCregaddr);
12944 readl(phba->HCregaddr);
12948 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12949 pmb = phba->sli.mbox_active;
12950 pmbox = &pmb->u.mb;
12952 vport = pmb->vport;
12954 /* First check out the status word */
12955 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12956 if (pmbox->mbxOwner != OWN_HOST) {
12957 spin_unlock_irqrestore(&phba->hbalock, iflag);
12959 * Stray Mailbox Interrupt, mbxCommand <cmd>
12960 * mbxStatus <status>
12962 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12963 "(%d):0304 Stray Mailbox "
12964 "Interrupt mbxCommand x%x "
12966 (vport ? vport->vpi : 0),
12969 /* clear mailbox attention bit */
12970 work_ha_copy &= ~HA_MBATT;
12972 phba->sli.mbox_active = NULL;
12973 spin_unlock_irqrestore(&phba->hbalock, iflag);
12974 phba->last_completion_time = jiffies;
12975 del_timer(&phba->sli.mbox_tmo);
12976 if (pmb->mbox_cmpl) {
12977 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12979 if (pmb->out_ext_byte_len &&
12981 lpfc_sli_pcimem_bcopy(
12984 pmb->out_ext_byte_len);
12986 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12987 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12989 lpfc_debugfs_disc_trc(vport,
12990 LPFC_DISC_TRC_MBOX_VPORT,
12991 "MBOX dflt rpi: : "
12992 "status:x%x rpi:x%x",
12993 (uint32_t)pmbox->mbxStatus,
12994 pmbox->un.varWords[0], 0);
12996 if (!pmbox->mbxStatus) {
12997 mp = (struct lpfc_dmabuf *)
12999 ndlp = (struct lpfc_nodelist *)
13002 /* Reg_LOGIN of dflt RPI was
13003 * successful. new lets get
13004 * rid of the RPI using the
13005 * same mbox buffer.
13007 lpfc_unreg_login(phba,
13009 pmbox->un.varWords[0],
13012 lpfc_mbx_cmpl_dflt_rpi;
13014 pmb->ctx_ndlp = ndlp;
13015 pmb->vport = vport;
13016 rc = lpfc_sli_issue_mbox(phba,
13019 if (rc != MBX_BUSY)
13020 lpfc_printf_log(phba,
13023 "0350 rc should have"
13024 "been MBX_BUSY\n");
13025 if (rc != MBX_NOT_FINISHED)
13026 goto send_current_mbox;
13030 &phba->pport->work_port_lock,
13032 phba->pport->work_port_events &=
13034 spin_unlock_irqrestore(
13035 &phba->pport->work_port_lock,
13038 /* Do NOT queue MBX_HEARTBEAT to the worker
13039 * thread for processing.
13041 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13042 /* Process mbox now */
13043 phba->sli.mbox_active = NULL;
13044 phba->sli.sli_flag &=
13045 ~LPFC_SLI_MBOX_ACTIVE;
13046 if (pmb->mbox_cmpl)
13047 pmb->mbox_cmpl(phba, pmb);
13049 /* Queue to worker thread to process */
13050 lpfc_mbox_cmpl_put(phba, pmb);
13054 spin_unlock_irqrestore(&phba->hbalock, iflag);
13056 if ((work_ha_copy & HA_MBATT) &&
13057 (phba->sli.mbox_active == NULL)) {
13059 /* Process next mailbox command if there is one */
13061 rc = lpfc_sli_issue_mbox(phba, NULL,
13063 } while (rc == MBX_NOT_FINISHED);
13064 if (rc != MBX_SUCCESS)
13065 lpfc_printf_log(phba, KERN_ERR,
13067 "0349 rc should be "
13071 spin_lock_irqsave(&phba->hbalock, iflag);
13072 phba->work_ha |= work_ha_copy;
13073 spin_unlock_irqrestore(&phba->hbalock, iflag);
13074 lpfc_worker_wake_up(phba);
13076 return IRQ_HANDLED;
13078 spin_unlock_irqrestore(&phba->hbalock, iflag);
13079 return IRQ_HANDLED;
13081 } /* lpfc_sli_sp_intr_handler */
13084 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
13085 * @irq: Interrupt number.
13086 * @dev_id: The device context pointer.
13088 * This function is directly called from the PCI layer as an interrupt
13089 * service routine when device with SLI-3 interface spec is enabled with
13090 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13091 * ring event in the HBA. However, when the device is enabled with either
13092 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13093 * device-level interrupt handler. When the PCI slot is in error recovery
13094 * or the HBA is undergoing initialization, the interrupt handler will not
13095 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13096 * the intrrupt context. This function is called without any lock held.
13097 * It gets the hbalock to access and update SLI data structures.
13099 * This function returns IRQ_HANDLED when interrupt is handled else it
13100 * returns IRQ_NONE.
13103 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
13105 struct lpfc_hba *phba;
13107 unsigned long status;
13108 unsigned long iflag;
13109 struct lpfc_sli_ring *pring;
13111 /* Get the driver's phba structure from the dev_id and
13112 * assume the HBA is not interrupting.
13114 phba = (struct lpfc_hba *) dev_id;
13116 if (unlikely(!phba))
13120 * Stuff needs to be attented to when this function is invoked as an
13121 * individual interrupt handler in MSI-X multi-message interrupt mode
13123 if (phba->intr_type == MSIX) {
13124 /* Check device state for handling interrupt */
13125 if (lpfc_intr_state_check(phba))
13127 /* Need to read HA REG for FCP ring and other ring events */
13128 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13129 return IRQ_HANDLED;
13130 /* Clear up only attention source related to fast-path */
13131 spin_lock_irqsave(&phba->hbalock, iflag);
13133 * If there is deferred error attention, do not check for
13136 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13137 spin_unlock_irqrestore(&phba->hbalock, iflag);
13140 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
13142 readl(phba->HAregaddr); /* flush */
13143 spin_unlock_irqrestore(&phba->hbalock, iflag);
13145 ha_copy = phba->ha_copy;
13148 * Process all events on FCP ring. Take the optimized path for FCP IO.
13150 ha_copy &= ~(phba->work_ha_mask);
13152 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13153 status >>= (4*LPFC_FCP_RING);
13154 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
13155 if (status & HA_RXMASK)
13156 lpfc_sli_handle_fast_ring_event(phba, pring, status);
13158 if (phba->cfg_multi_ring_support == 2) {
13160 * Process all events on extra ring. Take the optimized path
13161 * for extra ring IO.
13163 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13164 status >>= (4*LPFC_EXTRA_RING);
13165 if (status & HA_RXMASK) {
13166 lpfc_sli_handle_fast_ring_event(phba,
13167 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
13171 return IRQ_HANDLED;
13172 } /* lpfc_sli_fp_intr_handler */
13175 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
13176 * @irq: Interrupt number.
13177 * @dev_id: The device context pointer.
13179 * This function is the HBA device-level interrupt handler to device with
13180 * SLI-3 interface spec, called from the PCI layer when either MSI or
13181 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
13182 * requires driver attention. This function invokes the slow-path interrupt
13183 * attention handling function and fast-path interrupt attention handling
13184 * function in turn to process the relevant HBA attention events. This
13185 * function is called without any lock held. It gets the hbalock to access
13186 * and update SLI data structures.
13188 * This function returns IRQ_HANDLED when interrupt is handled, else it
13189 * returns IRQ_NONE.
13192 lpfc_sli_intr_handler(int irq, void *dev_id)
13194 struct lpfc_hba *phba;
13195 irqreturn_t sp_irq_rc, fp_irq_rc;
13196 unsigned long status1, status2;
13200 * Get the driver's phba structure from the dev_id and
13201 * assume the HBA is not interrupting.
13203 phba = (struct lpfc_hba *) dev_id;
13205 if (unlikely(!phba))
13208 /* Check device state for handling interrupt */
13209 if (lpfc_intr_state_check(phba))
13212 spin_lock(&phba->hbalock);
13213 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
13214 spin_unlock(&phba->hbalock);
13215 return IRQ_HANDLED;
13218 if (unlikely(!phba->ha_copy)) {
13219 spin_unlock(&phba->hbalock);
13221 } else if (phba->ha_copy & HA_ERATT) {
13222 if (phba->hba_flag & HBA_ERATT_HANDLED)
13223 /* ERATT polling has handled ERATT */
13224 phba->ha_copy &= ~HA_ERATT;
13226 /* Indicate interrupt handler handles ERATT */
13227 phba->hba_flag |= HBA_ERATT_HANDLED;
13231 * If there is deferred error attention, do not check for any interrupt.
13233 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13234 spin_unlock(&phba->hbalock);
13238 /* Clear attention sources except link and error attentions */
13239 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
13240 spin_unlock(&phba->hbalock);
13241 return IRQ_HANDLED;
13243 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
13244 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
13246 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
13247 writel(hc_copy, phba->HCregaddr);
13248 readl(phba->HAregaddr); /* flush */
13249 spin_unlock(&phba->hbalock);
13252 * Invokes slow-path host attention interrupt handling as appropriate.
13255 /* status of events with mailbox and link attention */
13256 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
13258 /* status of events with ELS ring */
13259 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
13260 status2 >>= (4*LPFC_ELS_RING);
13262 if (status1 || (status2 & HA_RXMASK))
13263 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
13265 sp_irq_rc = IRQ_NONE;
13268 * Invoke fast-path host attention interrupt handling as appropriate.
13271 /* status of events with FCP ring */
13272 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13273 status1 >>= (4*LPFC_FCP_RING);
13275 /* status of events with extra ring */
13276 if (phba->cfg_multi_ring_support == 2) {
13277 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13278 status2 >>= (4*LPFC_EXTRA_RING);
13282 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
13283 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
13285 fp_irq_rc = IRQ_NONE;
13287 /* Return device-level interrupt handling status */
13288 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
13289 } /* lpfc_sli_intr_handler */
13292 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
13293 * @phba: pointer to lpfc hba data structure.
13295 * This routine is invoked by the worker thread to process all the pending
13296 * SLI4 els abort xri events.
13298 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
13300 struct lpfc_cq_event *cq_event;
13301 unsigned long iflags;
13303 /* First, declare the els xri abort event has been handled */
13304 spin_lock_irqsave(&phba->hbalock, iflags);
13305 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
13306 spin_unlock_irqrestore(&phba->hbalock, iflags);
13308 /* Now, handle all the els xri abort events */
13309 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13310 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
13311 /* Get the first event from the head of the event queue */
13312 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
13313 cq_event, struct lpfc_cq_event, list);
13314 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13316 /* Notify aborted XRI for ELS work queue */
13317 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
13319 /* Free the event processed back to the free pool */
13320 lpfc_sli4_cq_event_release(phba, cq_event);
13321 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13324 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13328 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
13329 * @phba: pointer to lpfc hba data structure
13330 * @pIocbIn: pointer to the rspiocbq
13331 * @pIocbOut: pointer to the cmdiocbq
13332 * @wcqe: pointer to the complete wcqe
13334 * This routine transfers the fields of a command iocbq to a response iocbq
13335 * by copying all the IOCB fields from command iocbq and transferring the
13336 * completion status information from the complete wcqe.
13339 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
13340 struct lpfc_iocbq *pIocbIn,
13341 struct lpfc_iocbq *pIocbOut,
13342 struct lpfc_wcqe_complete *wcqe)
13345 unsigned long iflags;
13346 uint32_t status, max_response;
13347 struct lpfc_dmabuf *dmabuf;
13348 struct ulp_bde64 *bpl, bde;
13349 size_t offset = offsetof(struct lpfc_iocbq, iocb);
13351 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
13352 sizeof(struct lpfc_iocbq) - offset);
13353 /* Map WCQE parameters into irspiocb parameters */
13354 status = bf_get(lpfc_wcqe_c_status, wcqe);
13355 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
13356 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
13357 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
13358 pIocbIn->iocb.un.fcpi.fcpi_parm =
13359 pIocbOut->iocb.un.fcpi.fcpi_parm -
13360 wcqe->total_data_placed;
13362 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13364 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13365 switch (pIocbOut->iocb.ulpCommand) {
13366 case CMD_ELS_REQUEST64_CR:
13367 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13368 bpl = (struct ulp_bde64 *)dmabuf->virt;
13369 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
13370 max_response = bde.tus.f.bdeSize;
13372 case CMD_GEN_REQUEST64_CR:
13374 if (!pIocbOut->context3)
13376 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
13377 sizeof(struct ulp_bde64);
13378 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13379 bpl = (struct ulp_bde64 *)dmabuf->virt;
13380 for (i = 0; i < numBdes; i++) {
13381 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
13382 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
13383 max_response += bde.tus.f.bdeSize;
13387 max_response = wcqe->total_data_placed;
13390 if (max_response < wcqe->total_data_placed)
13391 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
13393 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
13394 wcqe->total_data_placed;
13397 /* Convert BG errors for completion status */
13398 if (status == CQE_STATUS_DI_ERROR) {
13399 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
13401 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
13402 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
13404 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
13406 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
13407 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
13408 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13409 BGS_GUARD_ERR_MASK;
13410 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
13411 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13412 BGS_APPTAG_ERR_MASK;
13413 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
13414 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13415 BGS_REFTAG_ERR_MASK;
13417 /* Check to see if there was any good data before the error */
13418 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13419 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13420 BGS_HI_WATER_MARK_PRESENT_MASK;
13421 pIocbIn->iocb.unsli3.sli3_bg.bghm =
13422 wcqe->total_data_placed;
13426 * Set ALL the error bits to indicate we don't know what
13427 * type of error it is.
13429 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13430 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13431 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13432 BGS_GUARD_ERR_MASK);
13435 /* Pick up HBA exchange busy condition */
13436 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13437 spin_lock_irqsave(&phba->hbalock, iflags);
13438 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13439 spin_unlock_irqrestore(&phba->hbalock, iflags);
13444 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13445 * @phba: Pointer to HBA context object.
13446 * @irspiocbq: Pointer to work-queue completion queue entry.
13448 * This routine handles an ELS work-queue completion event and construct
13449 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13450 * discovery engine to handle.
13452 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13454 static struct lpfc_iocbq *
13455 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13456 struct lpfc_iocbq *irspiocbq)
13458 struct lpfc_sli_ring *pring;
13459 struct lpfc_iocbq *cmdiocbq;
13460 struct lpfc_wcqe_complete *wcqe;
13461 unsigned long iflags;
13463 pring = lpfc_phba_elsring(phba);
13464 if (unlikely(!pring))
13467 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13468 pring->stats.iocb_event++;
13469 /* Look up the ELS command IOCB and create pseudo response IOCB */
13470 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13471 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13472 if (unlikely(!cmdiocbq)) {
13473 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13474 "0386 ELS complete with no corresponding "
13475 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13476 wcqe->word0, wcqe->total_data_placed,
13477 wcqe->parameter, wcqe->word3);
13478 lpfc_sli_release_iocbq(phba, irspiocbq);
13482 spin_lock_irqsave(&pring->ring_lock, iflags);
13483 /* Put the iocb back on the txcmplq */
13484 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13485 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13487 /* Fake the irspiocbq and copy necessary response information */
13488 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13493 inline struct lpfc_cq_event *
13494 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13496 struct lpfc_cq_event *cq_event;
13498 /* Allocate a new internal CQ_EVENT entry */
13499 cq_event = lpfc_sli4_cq_event_alloc(phba);
13501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13502 "0602 Failed to alloc CQ_EVENT entry\n");
13506 /* Move the CQE into the event */
13507 memcpy(&cq_event->cqe, entry, size);
13512 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
13513 * @phba: Pointer to HBA context object.
13514 * @mcqe: Pointer to mailbox completion queue entry.
13516 * This routine process a mailbox completion queue entry with asynchronous
13519 * Return: true if work posted to worker thread, otherwise false.
13522 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13524 struct lpfc_cq_event *cq_event;
13525 unsigned long iflags;
13527 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13528 "0392 Async Event: word0:x%x, word1:x%x, "
13529 "word2:x%x, word3:x%x\n", mcqe->word0,
13530 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13532 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13536 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
13537 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13538 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
13540 /* Set the async event flag */
13541 spin_lock_irqsave(&phba->hbalock, iflags);
13542 phba->hba_flag |= ASYNC_EVENT;
13543 spin_unlock_irqrestore(&phba->hbalock, iflags);
13549 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13550 * @phba: Pointer to HBA context object.
13551 * @mcqe: Pointer to mailbox completion queue entry.
13553 * This routine process a mailbox completion queue entry with mailbox
13554 * completion event.
13556 * Return: true if work posted to worker thread, otherwise false.
13559 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13561 uint32_t mcqe_status;
13562 MAILBOX_t *mbox, *pmbox;
13563 struct lpfc_mqe *mqe;
13564 struct lpfc_vport *vport;
13565 struct lpfc_nodelist *ndlp;
13566 struct lpfc_dmabuf *mp;
13567 unsigned long iflags;
13569 bool workposted = false;
13572 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13573 if (!bf_get(lpfc_trailer_completed, mcqe))
13574 goto out_no_mqe_complete;
13576 /* Get the reference to the active mbox command */
13577 spin_lock_irqsave(&phba->hbalock, iflags);
13578 pmb = phba->sli.mbox_active;
13579 if (unlikely(!pmb)) {
13580 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13581 "1832 No pending MBOX command to handle\n");
13582 spin_unlock_irqrestore(&phba->hbalock, iflags);
13583 goto out_no_mqe_complete;
13585 spin_unlock_irqrestore(&phba->hbalock, iflags);
13587 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13589 vport = pmb->vport;
13591 /* Reset heartbeat timer */
13592 phba->last_completion_time = jiffies;
13593 del_timer(&phba->sli.mbox_tmo);
13595 /* Move mbox data to caller's mailbox region, do endian swapping */
13596 if (pmb->mbox_cmpl && mbox)
13597 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13600 * For mcqe errors, conditionally move a modified error code to
13601 * the mbox so that the error will not be missed.
13603 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13604 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13605 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13606 bf_set(lpfc_mqe_status, mqe,
13607 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13609 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13610 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13611 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13612 "MBOX dflt rpi: status:x%x rpi:x%x",
13614 pmbox->un.varWords[0], 0);
13615 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13616 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13617 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13618 /* Reg_LOGIN of dflt RPI was successful. Now lets get
13619 * RID of the PPI using the same mbox buffer.
13621 lpfc_unreg_login(phba, vport->vpi,
13622 pmbox->un.varWords[0], pmb);
13623 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13626 /* No reference taken here. This is a default
13627 * RPI reg/immediate unreg cycle. The reference was
13628 * taken in the reg rpi path and is released when
13629 * this mailbox completes.
13631 pmb->ctx_ndlp = ndlp;
13632 pmb->vport = vport;
13633 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13634 if (rc != MBX_BUSY)
13635 lpfc_printf_log(phba, KERN_ERR,
13638 "have been MBX_BUSY\n");
13639 if (rc != MBX_NOT_FINISHED)
13640 goto send_current_mbox;
13643 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13644 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13645 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13647 /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
13648 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13649 spin_lock_irqsave(&phba->hbalock, iflags);
13650 /* Release the mailbox command posting token */
13651 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13652 phba->sli.mbox_active = NULL;
13653 if (bf_get(lpfc_trailer_consumed, mcqe))
13654 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13655 spin_unlock_irqrestore(&phba->hbalock, iflags);
13657 /* Post the next mbox command, if there is one */
13658 lpfc_sli4_post_async_mbox(phba);
13660 /* Process cmpl now */
13661 if (pmb->mbox_cmpl)
13662 pmb->mbox_cmpl(phba, pmb);
13666 /* There is mailbox completion work to queue to the worker thread */
13667 spin_lock_irqsave(&phba->hbalock, iflags);
13668 __lpfc_mbox_cmpl_put(phba, pmb);
13669 phba->work_ha |= HA_MBATT;
13670 spin_unlock_irqrestore(&phba->hbalock, iflags);
13674 spin_lock_irqsave(&phba->hbalock, iflags);
13675 /* Release the mailbox command posting token */
13676 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13677 /* Setting active mailbox pointer need to be in sync to flag clear */
13678 phba->sli.mbox_active = NULL;
13679 if (bf_get(lpfc_trailer_consumed, mcqe))
13680 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13681 spin_unlock_irqrestore(&phba->hbalock, iflags);
13682 /* Wake up worker thread to post the next pending mailbox command */
13683 lpfc_worker_wake_up(phba);
13686 out_no_mqe_complete:
13687 spin_lock_irqsave(&phba->hbalock, iflags);
13688 if (bf_get(lpfc_trailer_consumed, mcqe))
13689 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13690 spin_unlock_irqrestore(&phba->hbalock, iflags);
13695 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13696 * @phba: Pointer to HBA context object.
13697 * @cq: Pointer to associated CQ
13698 * @cqe: Pointer to mailbox completion queue entry.
13700 * This routine process a mailbox completion queue entry, it invokes the
13701 * proper mailbox complete handling or asynchronous event handling routine
13702 * according to the MCQE's async bit.
13704 * Return: true if work posted to worker thread, otherwise false.
13707 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13708 struct lpfc_cqe *cqe)
13710 struct lpfc_mcqe mcqe;
13715 /* Copy the mailbox MCQE and convert endian order as needed */
13716 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13718 /* Invoke the proper event handling routine */
13719 if (!bf_get(lpfc_trailer_async, &mcqe))
13720 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13722 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13727 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13728 * @phba: Pointer to HBA context object.
13729 * @cq: Pointer to associated CQ
13730 * @wcqe: Pointer to work-queue completion queue entry.
13732 * This routine handles an ELS work-queue completion event.
13734 * Return: true if work posted to worker thread, otherwise false.
13737 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13738 struct lpfc_wcqe_complete *wcqe)
13740 struct lpfc_iocbq *irspiocbq;
13741 unsigned long iflags;
13742 struct lpfc_sli_ring *pring = cq->pring;
13744 int txcmplq_cnt = 0;
13746 /* Check for response status */
13747 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13748 /* Log the error status */
13749 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13750 "0357 ELS CQE error: status=x%x: "
13751 "CQE: %08x %08x %08x %08x\n",
13752 bf_get(lpfc_wcqe_c_status, wcqe),
13753 wcqe->word0, wcqe->total_data_placed,
13754 wcqe->parameter, wcqe->word3);
13757 /* Get an irspiocbq for later ELS response processing use */
13758 irspiocbq = lpfc_sli_get_iocbq(phba);
13760 if (!list_empty(&pring->txq))
13762 if (!list_empty(&pring->txcmplq))
13764 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13765 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13766 "els_txcmplq_cnt=%d\n",
13767 txq_cnt, phba->iocb_cnt,
13772 /* Save off the slow-path queue event for work thread to process */
13773 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13774 spin_lock_irqsave(&phba->hbalock, iflags);
13775 list_add_tail(&irspiocbq->cq_event.list,
13776 &phba->sli4_hba.sp_queue_event);
13777 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13778 spin_unlock_irqrestore(&phba->hbalock, iflags);
13784 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13785 * @phba: Pointer to HBA context object.
13786 * @wcqe: Pointer to work-queue completion queue entry.
13788 * This routine handles slow-path WQ entry consumed event by invoking the
13789 * proper WQ release routine to the slow-path WQ.
13792 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13793 struct lpfc_wcqe_release *wcqe)
13795 /* sanity check on queue memory */
13796 if (unlikely(!phba->sli4_hba.els_wq))
13798 /* Check for the slow-path ELS work queue */
13799 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13800 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13801 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13803 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13804 "2579 Slow-path wqe consume event carries "
13805 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13806 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13807 phba->sli4_hba.els_wq->queue_id);
13811 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13812 * @phba: Pointer to HBA context object.
13813 * @cq: Pointer to a WQ completion queue.
13814 * @wcqe: Pointer to work-queue completion queue entry.
13816 * This routine handles an XRI abort event.
13818 * Return: true if work posted to worker thread, otherwise false.
13821 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13822 struct lpfc_queue *cq,
13823 struct sli4_wcqe_xri_aborted *wcqe)
13825 bool workposted = false;
13826 struct lpfc_cq_event *cq_event;
13827 unsigned long iflags;
13829 switch (cq->subtype) {
13831 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
13832 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13833 /* Notify aborted XRI for NVME work queue */
13834 if (phba->nvmet_support)
13835 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13837 workposted = false;
13839 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
13841 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
13843 workposted = false;
13846 cq_event->hdwq = cq->hdwq;
13847 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13849 list_add_tail(&cq_event->list,
13850 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13851 /* Set the els xri abort event flag */
13852 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13853 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13858 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13859 "0603 Invalid CQ subtype %d: "
13860 "%08x %08x %08x %08x\n",
13861 cq->subtype, wcqe->word0, wcqe->parameter,
13862 wcqe->word2, wcqe->word3);
13863 workposted = false;
13869 #define FC_RCTL_MDS_DIAGS 0xF4
13872 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13873 * @phba: Pointer to HBA context object.
13874 * @rcqe: Pointer to receive-queue completion queue entry.
13876 * This routine process a receive-queue completion queue entry.
13878 * Return: true if work posted to worker thread, otherwise false.
13881 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13883 bool workposted = false;
13884 struct fc_frame_header *fc_hdr;
13885 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13886 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13887 struct lpfc_nvmet_tgtport *tgtp;
13888 struct hbq_dmabuf *dma_buf;
13889 uint32_t status, rq_id;
13890 unsigned long iflags;
13892 /* sanity check on queue memory */
13893 if (unlikely(!hrq) || unlikely(!drq))
13896 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13897 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13899 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13900 if (rq_id != hrq->queue_id)
13903 status = bf_get(lpfc_rcqe_status, rcqe);
13905 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13906 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13907 "2537 Receive Frame Truncated!!\n");
13909 case FC_STATUS_RQ_SUCCESS:
13910 spin_lock_irqsave(&phba->hbalock, iflags);
13911 lpfc_sli4_rq_release(hrq, drq);
13912 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13914 hrq->RQ_no_buf_found++;
13915 spin_unlock_irqrestore(&phba->hbalock, iflags);
13919 hrq->RQ_buf_posted--;
13920 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13922 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13924 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13925 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13926 spin_unlock_irqrestore(&phba->hbalock, iflags);
13927 /* Handle MDS Loopback frames */
13928 if (!(phba->pport->load_flag & FC_UNLOADING))
13929 lpfc_sli4_handle_mds_loopback(phba->pport,
13932 lpfc_in_buf_free(phba, &dma_buf->dbuf);
13936 /* save off the frame for the work thread to process */
13937 list_add_tail(&dma_buf->cq_event.list,
13938 &phba->sli4_hba.sp_queue_event);
13939 /* Frame received */
13940 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13941 spin_unlock_irqrestore(&phba->hbalock, iflags);
13944 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13945 if (phba->nvmet_support) {
13946 tgtp = phba->targetport->private;
13947 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13948 "6402 RQE Error x%x, posted %d err_cnt "
13950 status, hrq->RQ_buf_posted,
13951 hrq->RQ_no_posted_buf,
13952 atomic_read(&tgtp->rcv_fcp_cmd_in),
13953 atomic_read(&tgtp->rcv_fcp_cmd_out),
13954 atomic_read(&tgtp->xmt_fcp_release));
13958 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13959 hrq->RQ_no_posted_buf++;
13960 /* Post more buffers if possible */
13961 spin_lock_irqsave(&phba->hbalock, iflags);
13962 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13963 spin_unlock_irqrestore(&phba->hbalock, iflags);
13972 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13973 * @phba: Pointer to HBA context object.
13974 * @cq: Pointer to the completion queue.
13975 * @cqe: Pointer to a completion queue entry.
13977 * This routine process a slow-path work-queue or receive queue completion queue
13980 * Return: true if work posted to worker thread, otherwise false.
13983 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13984 struct lpfc_cqe *cqe)
13986 struct lpfc_cqe cqevt;
13987 bool workposted = false;
13989 /* Copy the work queue CQE and convert endian order if needed */
13990 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13992 /* Check and process for different type of WCQE and dispatch */
13993 switch (bf_get(lpfc_cqe_code, &cqevt)) {
13994 case CQE_CODE_COMPL_WQE:
13995 /* Process the WQ/RQ complete event */
13996 phba->last_completion_time = jiffies;
13997 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13998 (struct lpfc_wcqe_complete *)&cqevt);
14000 case CQE_CODE_RELEASE_WQE:
14001 /* Process the WQ release event */
14002 lpfc_sli4_sp_handle_rel_wcqe(phba,
14003 (struct lpfc_wcqe_release *)&cqevt);
14005 case CQE_CODE_XRI_ABORTED:
14006 /* Process the WQ XRI abort event */
14007 phba->last_completion_time = jiffies;
14008 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14009 (struct sli4_wcqe_xri_aborted *)&cqevt);
14011 case CQE_CODE_RECEIVE:
14012 case CQE_CODE_RECEIVE_V1:
14013 /* Process the RQ event */
14014 phba->last_completion_time = jiffies;
14015 workposted = lpfc_sli4_sp_handle_rcqe(phba,
14016 (struct lpfc_rcqe *)&cqevt);
14019 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14020 "0388 Not a valid WCQE code: x%x\n",
14021 bf_get(lpfc_cqe_code, &cqevt));
14028 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
14029 * @phba: Pointer to HBA context object.
14030 * @eqe: Pointer to fast-path event queue entry.
14031 * @speq: Pointer to slow-path event queue.
14033 * This routine process a event queue entry from the slow-path event queue.
14034 * It will check the MajorCode and MinorCode to determine this is for a
14035 * completion event on a completion queue, if not, an error shall be logged
14036 * and just return. Otherwise, it will get to the corresponding completion
14037 * queue and process all the entries on that completion queue, rearm the
14038 * completion queue, and then return.
14042 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14043 struct lpfc_queue *speq)
14045 struct lpfc_queue *cq = NULL, *childq;
14049 /* Get the reference to the corresponding CQ */
14050 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14052 list_for_each_entry(childq, &speq->child_list, list) {
14053 if (childq->queue_id == cqid) {
14058 if (unlikely(!cq)) {
14059 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14060 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14061 "0365 Slow-path CQ identifier "
14062 "(%d) does not exist\n", cqid);
14066 /* Save EQ associated with this CQ */
14067 cq->assoc_qp = speq;
14069 if (is_kdump_kernel())
14070 ret = queue_work(phba->wq, &cq->spwork);
14072 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14075 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14076 "0390 Cannot schedule queue work "
14077 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14078 cqid, cq->queue_id, raw_smp_processor_id());
14082 * __lpfc_sli4_process_cq - Process elements of a CQ
14083 * @phba: Pointer to HBA context object.
14084 * @cq: Pointer to CQ to be processed
14085 * @handler: Routine to process each cqe
14086 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
14087 * @poll_mode: Polling mode we were called from
14089 * This routine processes completion queue entries in a CQ. While a valid
14090 * queue element is found, the handler is called. During processing checks
14091 * are made for periodic doorbell writes to let the hardware know of
14092 * element consumption.
14094 * If the max limit on cqes to process is hit, or there are no more valid
14095 * entries, the loop stops. If we processed a sufficient number of elements,
14096 * meaning there is sufficient load, rather than rearming and generating
14097 * another interrupt, a cq rescheduling delay will be set. A delay of 0
14098 * indicates no rescheduling.
14100 * Returns True if work scheduled, False otherwise.
14103 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14104 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
14105 struct lpfc_cqe *), unsigned long *delay,
14106 enum lpfc_poll_mode poll_mode)
14108 struct lpfc_cqe *cqe;
14109 bool workposted = false;
14110 int count = 0, consumed = 0;
14113 /* default - no reschedule */
14116 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14117 goto rearm_and_exit;
14119 /* Process all the entries to the CQ */
14121 cqe = lpfc_sli4_cq_get(cq);
14123 workposted |= handler(phba, cq, cqe);
14124 __lpfc_sli4_consume_cqe(phba, cq, cqe);
14127 if (!(++count % cq->max_proc_limit))
14130 if (!(count % cq->notify_interval)) {
14131 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14134 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
14137 if (count == LPFC_NVMET_CQ_NOTIFY)
14138 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14140 cqe = lpfc_sli4_cq_get(cq);
14142 if (count >= phba->cfg_cq_poll_threshold) {
14147 /* Note: complete the irq_poll softirq before rearming CQ */
14148 if (poll_mode == LPFC_IRQ_POLL)
14149 irq_poll_complete(&cq->iop);
14151 /* Track the max number of CQEs processed in 1 EQ */
14152 if (count > cq->CQ_max_cqe)
14153 cq->CQ_max_cqe = count;
14155 cq->assoc_qp->EQ_cqe_cnt += count;
14157 /* Catch the no cq entry condition */
14158 if (unlikely(count == 0))
14159 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14160 "0369 No entry from completion queue "
14161 "qid=%d\n", cq->queue_id);
14163 xchg(&cq->queue_claimed, 0);
14166 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14167 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14173 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
14174 * @cq: pointer to CQ to process
14176 * This routine calls the cq processing routine with a handler specific
14177 * to the type of queue bound to it.
14179 * The CQ routine returns two values: the first is the calling status,
14180 * which indicates whether work was queued to the background discovery
14181 * thread. If true, the routine should wakeup the discovery thread;
14182 * the second is the delay parameter. If non-zero, rather than rearming
14183 * the CQ and yet another interrupt, the CQ handler should be queued so
14184 * that it is processed in a subsequent polling action. The value of
14185 * the delay indicates when to reschedule it.
14188 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14190 struct lpfc_hba *phba = cq->phba;
14191 unsigned long delay;
14192 bool workposted = false;
14195 /* Process and rearm the CQ */
14196 switch (cq->type) {
14198 workposted |= __lpfc_sli4_process_cq(phba, cq,
14199 lpfc_sli4_sp_handle_mcqe,
14200 &delay, LPFC_QUEUE_WORK);
14203 if (cq->subtype == LPFC_IO)
14204 workposted |= __lpfc_sli4_process_cq(phba, cq,
14205 lpfc_sli4_fp_handle_cqe,
14206 &delay, LPFC_QUEUE_WORK);
14208 workposted |= __lpfc_sli4_process_cq(phba, cq,
14209 lpfc_sli4_sp_handle_cqe,
14210 &delay, LPFC_QUEUE_WORK);
14213 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14214 "0370 Invalid completion queue type (%d)\n",
14220 if (is_kdump_kernel())
14221 ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14224 ret = queue_delayed_work_on(cq->chann, phba->wq,
14225 &cq->sched_spwork, delay);
14227 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14228 "0394 Cannot schedule queue work "
14229 "for cqid=%d on CPU %d\n",
14230 cq->queue_id, cq->chann);
14233 /* wake up worker thread if there are works to be done */
14235 lpfc_worker_wake_up(phba);
14239 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14241 * @work: pointer to work element
14243 * translates from the work handler and calls the slow-path handler.
14246 lpfc_sli4_sp_process_cq(struct work_struct *work)
14248 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
14250 __lpfc_sli4_sp_process_cq(cq);
14254 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
14255 * @work: pointer to work element
14257 * translates from the work handler and calls the slow-path handler.
14260 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
14262 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14263 struct lpfc_queue, sched_spwork);
14265 __lpfc_sli4_sp_process_cq(cq);
14269 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
14270 * @phba: Pointer to HBA context object.
14271 * @cq: Pointer to associated CQ
14272 * @wcqe: Pointer to work-queue completion queue entry.
14274 * This routine process a fast-path work queue completion entry from fast-path
14275 * event queue for FCP command response completion.
14278 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14279 struct lpfc_wcqe_complete *wcqe)
14281 struct lpfc_sli_ring *pring = cq->pring;
14282 struct lpfc_iocbq *cmdiocbq;
14283 struct lpfc_iocbq irspiocbq;
14284 unsigned long iflags;
14286 /* Check for response status */
14287 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14288 /* If resource errors reported from HBA, reduce queue
14289 * depth of the SCSI device.
14291 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
14292 IOSTAT_LOCAL_REJECT)) &&
14293 ((wcqe->parameter & IOERR_PARAM_MASK) ==
14294 IOERR_NO_RESOURCES))
14295 phba->lpfc_rampdown_queue_depth(phba);
14297 /* Log the cmpl status */
14298 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14299 "0373 FCP CQE cmpl: status=x%x: "
14300 "CQE: %08x %08x %08x %08x\n",
14301 bf_get(lpfc_wcqe_c_status, wcqe),
14302 wcqe->word0, wcqe->total_data_placed,
14303 wcqe->parameter, wcqe->word3);
14306 /* Look up the FCP command IOCB and create pseudo response IOCB */
14307 spin_lock_irqsave(&pring->ring_lock, iflags);
14308 pring->stats.iocb_event++;
14309 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14310 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14311 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14312 if (unlikely(!cmdiocbq)) {
14313 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14314 "0374 FCP complete with no corresponding "
14315 "cmdiocb: iotag (%d)\n",
14316 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14319 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
14320 cmdiocbq->isr_timestamp = cq->isr_timestamp;
14322 if (cmdiocbq->iocb_cmpl == NULL) {
14323 if (cmdiocbq->wqe_cmpl) {
14324 /* For FCP the flag is cleared in wqe_cmpl */
14325 if (!(cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
14326 cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
14327 spin_lock_irqsave(&phba->hbalock, iflags);
14328 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
14329 spin_unlock_irqrestore(&phba->hbalock, iflags);
14332 /* Pass the cmd_iocb and the wcqe to the upper layer */
14333 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
14336 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14337 "0375 FCP cmdiocb not callback function "
14339 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14343 /* Only SLI4 non-IO commands stil use IOCB */
14344 /* Fake the irspiocb and copy necessary response information */
14345 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
14347 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
14348 spin_lock_irqsave(&phba->hbalock, iflags);
14349 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
14350 spin_unlock_irqrestore(&phba->hbalock, iflags);
14353 /* Pass the cmd_iocb and the rsp state to the upper layer */
14354 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
14358 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
14359 * @phba: Pointer to HBA context object.
14360 * @cq: Pointer to completion queue.
14361 * @wcqe: Pointer to work-queue completion queue entry.
14363 * This routine handles an fast-path WQ entry consumed event by invoking the
14364 * proper WQ release routine to the slow-path WQ.
14367 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14368 struct lpfc_wcqe_release *wcqe)
14370 struct lpfc_queue *childwq;
14371 bool wqid_matched = false;
14374 /* Check for fast-path FCP work queue release */
14375 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
14376 list_for_each_entry(childwq, &cq->child_list, list) {
14377 if (childwq->queue_id == hba_wqid) {
14378 lpfc_sli4_wq_release(childwq,
14379 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14380 if (childwq->q_flag & HBA_NVMET_WQFULL)
14381 lpfc_nvmet_wqfull_process(phba, childwq);
14382 wqid_matched = true;
14386 /* Report warning log message if no match found */
14387 if (wqid_matched != true)
14388 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14389 "2580 Fast-path wqe consume event carries "
14390 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
14394 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
14395 * @phba: Pointer to HBA context object.
14396 * @cq: Pointer to completion queue.
14397 * @rcqe: Pointer to receive-queue completion queue entry.
14399 * This routine process a receive-queue completion queue entry.
14401 * Return: true if work posted to worker thread, otherwise false.
14404 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14405 struct lpfc_rcqe *rcqe)
14407 bool workposted = false;
14408 struct lpfc_queue *hrq;
14409 struct lpfc_queue *drq;
14410 struct rqb_dmabuf *dma_buf;
14411 struct fc_frame_header *fc_hdr;
14412 struct lpfc_nvmet_tgtport *tgtp;
14413 uint32_t status, rq_id;
14414 unsigned long iflags;
14415 uint32_t fctl, idx;
14417 if ((phba->nvmet_support == 0) ||
14418 (phba->sli4_hba.nvmet_cqset == NULL))
14421 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
14422 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
14423 drq = phba->sli4_hba.nvmet_mrq_data[idx];
14425 /* sanity check on queue memory */
14426 if (unlikely(!hrq) || unlikely(!drq))
14429 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14430 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14432 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14434 if ((phba->nvmet_support == 0) ||
14435 (rq_id != hrq->queue_id))
14438 status = bf_get(lpfc_rcqe_status, rcqe);
14440 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14441 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14442 "6126 Receive Frame Truncated!!\n");
14444 case FC_STATUS_RQ_SUCCESS:
14445 spin_lock_irqsave(&phba->hbalock, iflags);
14446 lpfc_sli4_rq_release(hrq, drq);
14447 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
14449 hrq->RQ_no_buf_found++;
14450 spin_unlock_irqrestore(&phba->hbalock, iflags);
14453 spin_unlock_irqrestore(&phba->hbalock, iflags);
14455 hrq->RQ_buf_posted--;
14456 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14458 /* Just some basic sanity checks on FCP Command frame */
14459 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
14460 fc_hdr->fh_f_ctl[1] << 8 |
14461 fc_hdr->fh_f_ctl[2]);
14463 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14464 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14465 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
14468 if (fc_hdr->fh_type == FC_TYPE_FCP) {
14469 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
14470 lpfc_nvmet_unsol_fcp_event(
14471 phba, idx, dma_buf, cq->isr_timestamp,
14472 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
14476 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
14478 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14479 if (phba->nvmet_support) {
14480 tgtp = phba->targetport->private;
14481 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14482 "6401 RQE Error x%x, posted %d err_cnt "
14484 status, hrq->RQ_buf_posted,
14485 hrq->RQ_no_posted_buf,
14486 atomic_read(&tgtp->rcv_fcp_cmd_in),
14487 atomic_read(&tgtp->rcv_fcp_cmd_out),
14488 atomic_read(&tgtp->xmt_fcp_release));
14492 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14493 hrq->RQ_no_posted_buf++;
14494 /* Post more buffers if possible */
14502 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
14503 * @phba: adapter with cq
14504 * @cq: Pointer to the completion queue.
14505 * @cqe: Pointer to fast-path completion queue entry.
14507 * This routine process a fast-path work queue completion entry from fast-path
14508 * event queue for FCP command response completion.
14510 * Return: true if work posted to worker thread, otherwise false.
14513 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14514 struct lpfc_cqe *cqe)
14516 struct lpfc_wcqe_release wcqe;
14517 bool workposted = false;
14519 /* Copy the work queue CQE and convert endian order if needed */
14520 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14522 /* Check and process for different type of WCQE and dispatch */
14523 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14524 case CQE_CODE_COMPL_WQE:
14525 case CQE_CODE_NVME_ERSP:
14527 /* Process the WQ complete event */
14528 phba->last_completion_time = jiffies;
14529 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
14530 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14531 (struct lpfc_wcqe_complete *)&wcqe);
14533 case CQE_CODE_RELEASE_WQE:
14534 cq->CQ_release_wqe++;
14535 /* Process the WQ release event */
14536 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14537 (struct lpfc_wcqe_release *)&wcqe);
14539 case CQE_CODE_XRI_ABORTED:
14540 cq->CQ_xri_aborted++;
14541 /* Process the WQ XRI abort event */
14542 phba->last_completion_time = jiffies;
14543 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14544 (struct sli4_wcqe_xri_aborted *)&wcqe);
14546 case CQE_CODE_RECEIVE_V1:
14547 case CQE_CODE_RECEIVE:
14548 phba->last_completion_time = jiffies;
14549 if (cq->subtype == LPFC_NVMET) {
14550 workposted = lpfc_sli4_nvmet_handle_rcqe(
14551 phba, cq, (struct lpfc_rcqe *)&wcqe);
14555 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14556 "0144 Not a valid CQE code: x%x\n",
14557 bf_get(lpfc_wcqe_c_code, &wcqe));
14564 * lpfc_sli4_sched_cq_work - Schedules cq work
14565 * @phba: Pointer to HBA context object.
14566 * @cq: Pointer to CQ
14569 * This routine checks the poll mode of the CQ corresponding to
14570 * cq->chann, then either schedules a softirq or queue_work to complete
14573 * queue_work path is taken if in NVMET mode, or if poll_mode is in
14574 * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken.
14577 static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
14578 struct lpfc_queue *cq, uint16_t cqid)
14582 switch (cq->poll_mode) {
14583 case LPFC_IRQ_POLL:
14584 irq_poll_sched(&cq->iop);
14586 case LPFC_QUEUE_WORK:
14588 if (is_kdump_kernel())
14589 ret = queue_work(phba->wq, &cq->irqwork);
14591 ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
14593 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14594 "0383 Cannot schedule queue work "
14595 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14596 cqid, cq->queue_id,
14597 raw_smp_processor_id());
14602 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
14603 * @phba: Pointer to HBA context object.
14604 * @eq: Pointer to the queue structure.
14605 * @eqe: Pointer to fast-path event queue entry.
14607 * This routine process a event queue entry from the fast-path event queue.
14608 * It will check the MajorCode and MinorCode to determine this is for a
14609 * completion event on a completion queue, if not, an error shall be logged
14610 * and just return. Otherwise, it will get to the corresponding completion
14611 * queue and process all the entries on the completion queue, rearm the
14612 * completion queue, and then return.
14615 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14616 struct lpfc_eqe *eqe)
14618 struct lpfc_queue *cq = NULL;
14619 uint32_t qidx = eq->hdwq;
14622 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14623 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14624 "0366 Not a valid completion "
14625 "event: majorcode=x%x, minorcode=x%x\n",
14626 bf_get_le32(lpfc_eqe_major_code, eqe),
14627 bf_get_le32(lpfc_eqe_minor_code, eqe));
14631 /* Get the reference to the corresponding CQ */
14632 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14634 /* Use the fast lookup method first */
14635 if (cqid <= phba->sli4_hba.cq_max) {
14636 cq = phba->sli4_hba.cq_lookup[cqid];
14641 /* Next check for NVMET completion */
14642 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14643 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14644 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14645 /* Process NVMET unsol rcv */
14646 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14651 if (phba->sli4_hba.nvmels_cq &&
14652 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14653 /* Process NVME unsol rcv */
14654 cq = phba->sli4_hba.nvmels_cq;
14657 /* Otherwise this is a Slow path event */
14659 lpfc_sli4_sp_handle_eqe(phba, eqe,
14660 phba->sli4_hba.hdwq[qidx].hba_eq);
14665 if (unlikely(cqid != cq->queue_id)) {
14666 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14667 "0368 Miss-matched fast-path completion "
14668 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14669 cqid, cq->queue_id);
14674 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14675 if (phba->ktime_on)
14676 cq->isr_timestamp = ktime_get_ns();
14678 cq->isr_timestamp = 0;
14680 lpfc_sli4_sched_cq_work(phba, cq, cqid);
14684 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14685 * @cq: Pointer to CQ to be processed
14686 * @poll_mode: Enum lpfc_poll_state to determine poll mode
14688 * This routine calls the cq processing routine with the handler for
14691 * The CQ routine returns two values: the first is the calling status,
14692 * which indicates whether work was queued to the background discovery
14693 * thread. If true, the routine should wakeup the discovery thread;
14694 * the second is the delay parameter. If non-zero, rather than rearming
14695 * the CQ and yet another interrupt, the CQ handler should be queued so
14696 * that it is processed in a subsequent polling action. The value of
14697 * the delay indicates when to reschedule it.
14700 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
14701 enum lpfc_poll_mode poll_mode)
14703 struct lpfc_hba *phba = cq->phba;
14704 unsigned long delay;
14705 bool workposted = false;
14708 /* process and rearm the CQ */
14709 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14710 &delay, poll_mode);
14713 if (is_kdump_kernel())
14714 ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
14717 ret = queue_delayed_work_on(cq->chann, phba->wq,
14718 &cq->sched_irqwork, delay);
14720 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14721 "0367 Cannot schedule queue work "
14722 "for cqid=%d on CPU %d\n",
14723 cq->queue_id, cq->chann);
14726 /* wake up worker thread if there are works to be done */
14728 lpfc_worker_wake_up(phba);
14732 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14734 * @work: pointer to work element
14736 * translates from the work handler and calls the fast-path handler.
14739 lpfc_sli4_hba_process_cq(struct work_struct *work)
14741 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
14743 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
14747 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
14748 * @work: pointer to work element
14750 * translates from the work handler and calls the fast-path handler.
14753 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
14755 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14756 struct lpfc_queue, sched_irqwork);
14758 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
14762 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
14763 * @irq: Interrupt number.
14764 * @dev_id: The device context pointer.
14766 * This function is directly called from the PCI layer as an interrupt
14767 * service routine when device with SLI-4 interface spec is enabled with
14768 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14769 * ring event in the HBA. However, when the device is enabled with either
14770 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14771 * device-level interrupt handler. When the PCI slot is in error recovery
14772 * or the HBA is undergoing initialization, the interrupt handler will not
14773 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14774 * the intrrupt context. This function is called without any lock held.
14775 * It gets the hbalock to access and update SLI data structures. Note that,
14776 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14777 * equal to that of FCP CQ index.
14779 * The link attention and ELS ring attention events are handled
14780 * by the worker thread. The interrupt handler signals the worker thread
14781 * and returns for these events. This function is called without any lock
14782 * held. It gets the hbalock to access and update SLI data structures.
14784 * This function returns IRQ_HANDLED when interrupt is handled else it
14785 * returns IRQ_NONE.
14788 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14790 struct lpfc_hba *phba;
14791 struct lpfc_hba_eq_hdl *hba_eq_hdl;
14792 struct lpfc_queue *fpeq;
14793 unsigned long iflag;
14796 struct lpfc_eq_intr_info *eqi;
14798 /* Get the driver's phba structure from the dev_id */
14799 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14800 phba = hba_eq_hdl->phba;
14801 hba_eqidx = hba_eq_hdl->idx;
14803 if (unlikely(!phba))
14805 if (unlikely(!phba->sli4_hba.hdwq))
14808 /* Get to the EQ struct associated with this vector */
14809 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
14810 if (unlikely(!fpeq))
14813 /* Check device state for handling interrupt */
14814 if (unlikely(lpfc_intr_state_check(phba))) {
14815 /* Check again for link_state with lock held */
14816 spin_lock_irqsave(&phba->hbalock, iflag);
14817 if (phba->link_state < LPFC_LINK_DOWN)
14818 /* Flush, clear interrupt, and rearm the EQ */
14819 lpfc_sli4_eqcq_flush(phba, fpeq);
14820 spin_unlock_irqrestore(&phba->hbalock, iflag);
14824 eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
14827 fpeq->last_cpu = raw_smp_processor_id();
14829 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
14830 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
14831 phba->cfg_auto_imax &&
14832 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14833 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14834 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14836 /* process and rearm the EQ */
14837 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
14839 if (unlikely(ecount == 0)) {
14840 fpeq->EQ_no_entry++;
14841 if (phba->intr_type == MSIX)
14842 /* MSI-X treated interrupt served as no EQ share INT */
14843 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14844 "0358 MSI-X interrupt with no EQE\n");
14846 /* Non MSI-X treated on interrupt as EQ share INT */
14850 return IRQ_HANDLED;
14851 } /* lpfc_sli4_fp_intr_handler */
14854 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14855 * @irq: Interrupt number.
14856 * @dev_id: The device context pointer.
14858 * This function is the device-level interrupt handler to device with SLI-4
14859 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14860 * interrupt mode is enabled and there is an event in the HBA which requires
14861 * driver attention. This function invokes the slow-path interrupt attention
14862 * handling function and fast-path interrupt attention handling function in
14863 * turn to process the relevant HBA attention events. This function is called
14864 * without any lock held. It gets the hbalock to access and update SLI data
14867 * This function returns IRQ_HANDLED when interrupt is handled, else it
14868 * returns IRQ_NONE.
14871 lpfc_sli4_intr_handler(int irq, void *dev_id)
14873 struct lpfc_hba *phba;
14874 irqreturn_t hba_irq_rc;
14875 bool hba_handled = false;
14878 /* Get the driver's phba structure from the dev_id */
14879 phba = (struct lpfc_hba *)dev_id;
14881 if (unlikely(!phba))
14885 * Invoke fast-path host attention interrupt handling as appropriate.
14887 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
14888 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14889 &phba->sli4_hba.hba_eq_hdl[qidx]);
14890 if (hba_irq_rc == IRQ_HANDLED)
14891 hba_handled |= true;
14894 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14895 } /* lpfc_sli4_intr_handler */
14897 void lpfc_sli4_poll_hbtimer(struct timer_list *t)
14899 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
14900 struct lpfc_queue *eq;
14905 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
14906 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
14907 if (!list_empty(&phba->poll_list))
14908 mod_timer(&phba->cpuhp_poll_timer,
14909 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14914 inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
14916 struct lpfc_hba *phba = eq->phba;
14920 * Unlocking an irq is one of the entry point to check
14921 * for re-schedule, but we are good for io submission
14922 * path as midlayer does a get_cpu to glue us in. Flush
14923 * out the invalidate queue so we can see the updated
14928 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
14929 /* We will not likely get the completion for the caller
14930 * during this iteration but i guess that's fine.
14931 * Future io's coming on this eq should be able to
14932 * pick it up. As for the case of single io's, they
14933 * will be handled through a sched from polling timer
14934 * function which is currently triggered every 1msec.
14936 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
14941 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
14943 struct lpfc_hba *phba = eq->phba;
14945 /* kickstart slowpath processing if needed */
14946 if (list_empty(&phba->poll_list))
14947 mod_timer(&phba->cpuhp_poll_timer,
14948 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14950 list_add_rcu(&eq->_poll_list, &phba->poll_list);
14954 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
14956 struct lpfc_hba *phba = eq->phba;
14958 /* Disable slowpath processing for this eq. Kick start the eq
14959 * by RE-ARMING the eq's ASAP
14961 list_del_rcu(&eq->_poll_list);
14964 if (list_empty(&phba->poll_list))
14965 del_timer_sync(&phba->cpuhp_poll_timer);
14968 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
14970 struct lpfc_queue *eq, *next;
14972 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
14973 list_del(&eq->_poll_list);
14975 INIT_LIST_HEAD(&phba->poll_list);
14980 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
14982 if (mode == eq->mode)
14985 * currently this function is only called during a hotplug
14986 * event and the cpu on which this function is executing
14987 * is going offline. By now the hotplug has instructed
14988 * the scheduler to remove this cpu from cpu active mask.
14989 * So we don't need to work about being put aside by the
14990 * scheduler for a high priority process. Yes, the inte-
14991 * rrupts could come but they are known to retire ASAP.
14994 /* Disable polling in the fastpath */
14995 WRITE_ONCE(eq->mode, mode);
14996 /* flush out the store buffer */
15000 * Add this eq to the polling list and start polling. For
15001 * a grace period both interrupt handler and poller will
15002 * try to process the eq _but_ that's fine. We have a
15003 * synchronization mechanism in place (queue_claimed) to
15004 * deal with it. This is just a draining phase for int-
15005 * errupt handler (not eq's) as we have guranteed through
15006 * barrier that all the CPUs have seen the new CQ_POLLED
15007 * state. which will effectively disable the REARMING of
15008 * the EQ. The whole idea is eq's die off eventually as
15009 * we are not rearming EQ's anymore.
15011 mode ? lpfc_sli4_add_to_poll_list(eq) :
15012 lpfc_sli4_remove_from_poll_list(eq);
15015 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15017 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15020 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15022 struct lpfc_hba *phba = eq->phba;
15024 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15026 /* Kick start for the pending io's in h/w.
15027 * Once we switch back to interrupt processing on a eq
15028 * the io path completion will only arm eq's when it
15029 * receives a completion. But since eq's are in disa-
15030 * rmed state it doesn't receive a completion. This
15031 * creates a deadlock scenaro.
15033 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15037 * lpfc_sli4_queue_free - free a queue structure and associated memory
15038 * @queue: The queue structure to free.
15040 * This function frees a queue structure and the DMAable memory used for
15041 * the host resident queue. This function must be called after destroying the
15042 * queue on the HBA.
15045 lpfc_sli4_queue_free(struct lpfc_queue *queue)
15047 struct lpfc_dmabuf *dmabuf;
15052 if (!list_empty(&queue->wq_list))
15053 list_del(&queue->wq_list);
15055 while (!list_empty(&queue->page_list)) {
15056 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15058 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
15059 dmabuf->virt, dmabuf->phys);
15063 lpfc_free_rq_buffer(queue->phba, queue);
15064 kfree(queue->rqbp);
15067 if (!list_empty(&queue->cpu_list))
15068 list_del(&queue->cpu_list);
15075 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
15076 * @phba: The HBA that this queue is being created on.
15077 * @page_size: The size of a queue page
15078 * @entry_size: The size of each queue entry for this queue.
15079 * @entry_count: The number of entries that this queue will handle.
15080 * @cpu: The cpu that will primarily utilize this queue.
15082 * This function allocates a queue structure and the DMAable memory used for
15083 * the host resident queue. This function must be called before creating the
15084 * queue on the HBA.
15086 struct lpfc_queue *
15087 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
15088 uint32_t entry_size, uint32_t entry_count, int cpu)
15090 struct lpfc_queue *queue;
15091 struct lpfc_dmabuf *dmabuf;
15092 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15095 if (!phba->sli4_hba.pc_sli4_params.supported)
15096 hw_page_size = page_size;
15098 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
15100 /* If needed, Adjust page count to match the max the adapter supports */
15101 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15102 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15104 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15105 GFP_KERNEL, cpu_to_node(cpu));
15109 INIT_LIST_HEAD(&queue->list);
15110 INIT_LIST_HEAD(&queue->_poll_list);
15111 INIT_LIST_HEAD(&queue->wq_list);
15112 INIT_LIST_HEAD(&queue->wqfull_list);
15113 INIT_LIST_HEAD(&queue->page_list);
15114 INIT_LIST_HEAD(&queue->child_list);
15115 INIT_LIST_HEAD(&queue->cpu_list);
15117 /* Set queue parameters now. If the system cannot provide memory
15118 * resources, the free routine needs to know what was allocated.
15120 queue->page_count = pgcnt;
15121 queue->q_pgs = (void **)&queue[1];
15122 queue->entry_cnt_per_pg = hw_page_size / entry_size;
15123 queue->entry_size = entry_size;
15124 queue->entry_count = entry_count;
15125 queue->page_size = hw_page_size;
15126 queue->phba = phba;
15128 for (x = 0; x < queue->page_count; x++) {
15129 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15130 dev_to_node(&phba->pcidev->dev));
15133 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15134 hw_page_size, &dmabuf->phys,
15136 if (!dmabuf->virt) {
15140 dmabuf->buffer_tag = x;
15141 list_add_tail(&dmabuf->list, &queue->page_list);
15142 /* use lpfc_sli4_qe to index a paritcular entry in this page */
15143 queue->q_pgs[x] = dmabuf->virt;
15145 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15146 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
15147 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15148 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
15150 /* notify_interval will be set during q creation */
15154 lpfc_sli4_queue_free(queue);
15159 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
15160 * @phba: HBA structure that indicates port to create a queue on.
15161 * @pci_barset: PCI BAR set flag.
15163 * This function shall perform iomap of the specified PCI BAR address to host
15164 * memory address if not already done so and return it. The returned host
15165 * memory address can be NULL.
15167 static void __iomem *
15168 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15173 switch (pci_barset) {
15174 case WQ_PCI_BAR_0_AND_1:
15175 return phba->pci_bar0_memmap_p;
15176 case WQ_PCI_BAR_2_AND_3:
15177 return phba->pci_bar2_memmap_p;
15178 case WQ_PCI_BAR_4_AND_5:
15179 return phba->pci_bar4_memmap_p;
15187 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
15188 * @phba: HBA structure that EQs are on.
15189 * @startq: The starting EQ index to modify
15190 * @numq: The number of EQs (consecutive indexes) to modify
15191 * @usdelay: amount of delay
15193 * This function revises the EQ delay on 1 or more EQs. The EQ delay
15194 * is set either by writing to a register (if supported by the SLI Port)
15195 * or by mailbox command. The mailbox command allows several EQs to be
15198 * The @phba struct is used to send a mailbox command to HBA. The @startq
15199 * is used to get the starting EQ index to change. The @numq value is
15200 * used to specify how many consecutive EQ indexes, starting at EQ index,
15201 * are to be changed. This function is asynchronous and will wait for any
15202 * mailbox commands to finish before returning.
15204 * On success this function will return a zero. If unable to allocate
15205 * enough memory this function will return -ENOMEM. If a mailbox command
15206 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
15207 * have had their delay multipler changed.
15210 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
15211 uint32_t numq, uint32_t usdelay)
15213 struct lpfc_mbx_modify_eq_delay *eq_delay;
15214 LPFC_MBOXQ_t *mbox;
15215 struct lpfc_queue *eq;
15216 int cnt = 0, rc, length;
15217 uint32_t shdr_status, shdr_add_status;
15220 union lpfc_sli4_cfg_shdr *shdr;
15222 if (startq >= phba->cfg_irq_chann)
15225 if (usdelay > 0xFFFF) {
15226 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15227 "6429 usdelay %d too large. Scaled down to "
15228 "0xFFFF.\n", usdelay);
15232 /* set values by EQ_DELAY register if supported */
15233 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15234 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15235 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15239 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15247 /* Otherwise, set values by mailbox cmd */
15249 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15251 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15252 "6428 Failed allocating mailbox cmd buffer."
15253 " EQ delay was not set.\n");
15256 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
15257 sizeof(struct lpfc_sli4_cfg_mhdr));
15258 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15259 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
15260 length, LPFC_SLI4_MBX_EMBED);
15261 eq_delay = &mbox->u.mqe.un.eq_delay;
15263 /* Calculate delay multiper from maximum interrupt per second */
15264 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
15267 if (dmult > LPFC_DMULT_MAX)
15268 dmult = LPFC_DMULT_MAX;
15270 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15271 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15274 eq->q_mode = usdelay;
15275 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
15276 eq_delay->u.request.eq[cnt].phase = 0;
15277 eq_delay->u.request.eq[cnt].delay_multi = dmult;
15282 eq_delay->u.request.num_eq = cnt;
15284 mbox->vport = phba->pport;
15285 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15286 mbox->ctx_buf = NULL;
15287 mbox->ctx_ndlp = NULL;
15288 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15289 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
15290 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15291 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15292 if (shdr_status || shdr_add_status || rc) {
15293 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15294 "2512 MODIFY_EQ_DELAY mailbox failed with "
15295 "status x%x add_status x%x, mbx status x%x\n",
15296 shdr_status, shdr_add_status, rc);
15298 mempool_free(mbox, phba->mbox_mem_pool);
15303 * lpfc_eq_create - Create an Event Queue on the HBA
15304 * @phba: HBA structure that indicates port to create a queue on.
15305 * @eq: The queue structure to use to create the event queue.
15306 * @imax: The maximum interrupt per second limit.
15308 * This function creates an event queue, as detailed in @eq, on a port,
15309 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
15311 * The @phba struct is used to send mailbox command to HBA. The @eq struct
15312 * is used to get the entry count and entry size that are necessary to
15313 * determine the number of pages to allocate and use for this queue. This
15314 * function will send the EQ_CREATE mailbox command to the HBA to setup the
15315 * event queue. This function is asynchronous and will wait for the mailbox
15316 * command to finish before continuing.
15318 * On success this function will return a zero. If unable to allocate enough
15319 * memory this function will return -ENOMEM. If the queue create mailbox command
15320 * fails this function will return -ENXIO.
15323 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
15325 struct lpfc_mbx_eq_create *eq_create;
15326 LPFC_MBOXQ_t *mbox;
15327 int rc, length, status = 0;
15328 struct lpfc_dmabuf *dmabuf;
15329 uint32_t shdr_status, shdr_add_status;
15330 union lpfc_sli4_cfg_shdr *shdr;
15332 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15334 /* sanity check on queue memory */
15337 if (!phba->sli4_hba.pc_sli4_params.supported)
15338 hw_page_size = SLI4_PAGE_SIZE;
15340 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15343 length = (sizeof(struct lpfc_mbx_eq_create) -
15344 sizeof(struct lpfc_sli4_cfg_mhdr));
15345 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15346 LPFC_MBOX_OPCODE_EQ_CREATE,
15347 length, LPFC_SLI4_MBX_EMBED);
15348 eq_create = &mbox->u.mqe.un.eq_create;
15349 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
15350 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
15352 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
15354 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
15356 /* Use version 2 of CREATE_EQ if eqav is set */
15357 if (phba->sli4_hba.pc_sli4_params.eqav) {
15358 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15359 LPFC_Q_CREATE_VERSION_2);
15360 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
15361 phba->sli4_hba.pc_sli4_params.eqav);
15364 /* don't setup delay multiplier using EQ_CREATE */
15366 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
15368 switch (eq->entry_count) {
15370 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15371 "0360 Unsupported EQ count. (%d)\n",
15373 if (eq->entry_count < 256) {
15377 fallthrough; /* otherwise default to smallest count */
15379 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15383 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15387 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15391 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15395 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15399 list_for_each_entry(dmabuf, &eq->page_list, list) {
15400 memset(dmabuf->virt, 0, hw_page_size);
15401 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15402 putPaddrLow(dmabuf->phys);
15403 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15404 putPaddrHigh(dmabuf->phys);
15406 mbox->vport = phba->pport;
15407 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15408 mbox->ctx_buf = NULL;
15409 mbox->ctx_ndlp = NULL;
15410 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15411 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15412 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15413 if (shdr_status || shdr_add_status || rc) {
15414 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15415 "2500 EQ_CREATE mailbox failed with "
15416 "status x%x add_status x%x, mbx status x%x\n",
15417 shdr_status, shdr_add_status, rc);
15420 eq->type = LPFC_EQ;
15421 eq->subtype = LPFC_NONE;
15422 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
15423 if (eq->queue_id == 0xFFFF)
15425 eq->host_index = 0;
15426 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
15427 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
15429 mempool_free(mbox, phba->mbox_mem_pool);
15433 static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
15435 struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
15437 __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
15443 * lpfc_cq_create - Create a Completion Queue on the HBA
15444 * @phba: HBA structure that indicates port to create a queue on.
15445 * @cq: The queue structure to use to create the completion queue.
15446 * @eq: The event queue to bind this completion queue to.
15447 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15448 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
15450 * This function creates a completion queue, as detailed in @wq, on a port,
15451 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
15453 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15454 * is used to get the entry count and entry size that are necessary to
15455 * determine the number of pages to allocate and use for this queue. The @eq
15456 * is used to indicate which event queue to bind this completion queue to. This
15457 * function will send the CQ_CREATE mailbox command to the HBA to setup the
15458 * completion queue. This function is asynchronous and will wait for the mailbox
15459 * command to finish before continuing.
15461 * On success this function will return a zero. If unable to allocate enough
15462 * memory this function will return -ENOMEM. If the queue create mailbox command
15463 * fails this function will return -ENXIO.
15466 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
15467 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
15469 struct lpfc_mbx_cq_create *cq_create;
15470 struct lpfc_dmabuf *dmabuf;
15471 LPFC_MBOXQ_t *mbox;
15472 int rc, length, status = 0;
15473 uint32_t shdr_status, shdr_add_status;
15474 union lpfc_sli4_cfg_shdr *shdr;
15476 /* sanity check on queue memory */
15480 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15483 length = (sizeof(struct lpfc_mbx_cq_create) -
15484 sizeof(struct lpfc_sli4_cfg_mhdr));
15485 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15486 LPFC_MBOX_OPCODE_CQ_CREATE,
15487 length, LPFC_SLI4_MBX_EMBED);
15488 cq_create = &mbox->u.mqe.un.cq_create;
15489 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
15490 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
15492 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
15493 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
15494 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15495 phba->sli4_hba.pc_sli4_params.cqv);
15496 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
15497 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
15498 (cq->page_size / SLI4_PAGE_SIZE));
15499 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
15501 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
15502 phba->sli4_hba.pc_sli4_params.cqav);
15504 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
15507 switch (cq->entry_count) {
15510 if (phba->sli4_hba.pc_sli4_params.cqv ==
15511 LPFC_Q_CREATE_VERSION_2) {
15512 cq_create->u.request.context.lpfc_cq_context_count =
15514 bf_set(lpfc_cq_context_count,
15515 &cq_create->u.request.context,
15516 LPFC_CQ_CNT_WORD7);
15521 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15522 "0361 Unsupported CQ count: "
15523 "entry cnt %d sz %d pg cnt %d\n",
15524 cq->entry_count, cq->entry_size,
15526 if (cq->entry_count < 256) {
15530 fallthrough; /* otherwise default to smallest count */
15532 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15536 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15540 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15544 list_for_each_entry(dmabuf, &cq->page_list, list) {
15545 memset(dmabuf->virt, 0, cq->page_size);
15546 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15547 putPaddrLow(dmabuf->phys);
15548 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15549 putPaddrHigh(dmabuf->phys);
15551 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15553 /* The IOCTL status is embedded in the mailbox subheader. */
15554 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15555 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15556 if (shdr_status || shdr_add_status || rc) {
15557 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15558 "2501 CQ_CREATE mailbox failed with "
15559 "status x%x add_status x%x, mbx status x%x\n",
15560 shdr_status, shdr_add_status, rc);
15564 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15565 if (cq->queue_id == 0xFFFF) {
15569 /* link the cq onto the parent eq child list */
15570 list_add_tail(&cq->list, &eq->child_list);
15571 /* Set up completion queue's type and subtype */
15573 cq->subtype = subtype;
15574 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15575 cq->assoc_qid = eq->queue_id;
15577 cq->host_index = 0;
15578 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15579 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
15581 if (cq->queue_id > phba->sli4_hba.cq_max)
15582 phba->sli4_hba.cq_max = cq->queue_id;
15584 irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
15586 mempool_free(mbox, phba->mbox_mem_pool);
15591 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
15592 * @phba: HBA structure that indicates port to create a queue on.
15593 * @cqp: The queue structure array to use to create the completion queues.
15594 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
15595 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15596 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
15598 * This function creates a set of completion queue, s to support MRQ
15599 * as detailed in @cqp, on a port,
15600 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
15602 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15603 * is used to get the entry count and entry size that are necessary to
15604 * determine the number of pages to allocate and use for this queue. The @eq
15605 * is used to indicate which event queue to bind this completion queue to. This
15606 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
15607 * completion queue. This function is asynchronous and will wait for the mailbox
15608 * command to finish before continuing.
15610 * On success this function will return a zero. If unable to allocate enough
15611 * memory this function will return -ENOMEM. If the queue create mailbox command
15612 * fails this function will return -ENXIO.
15615 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15616 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
15619 struct lpfc_queue *cq;
15620 struct lpfc_queue *eq;
15621 struct lpfc_mbx_cq_create_set *cq_set;
15622 struct lpfc_dmabuf *dmabuf;
15623 LPFC_MBOXQ_t *mbox;
15624 int rc, length, alloclen, status = 0;
15625 int cnt, idx, numcq, page_idx = 0;
15626 uint32_t shdr_status, shdr_add_status;
15627 union lpfc_sli4_cfg_shdr *shdr;
15628 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15630 /* sanity check on queue memory */
15631 numcq = phba->cfg_nvmet_mrq;
15632 if (!cqp || !hdwq || !numcq)
15635 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15639 length = sizeof(struct lpfc_mbx_cq_create_set);
15640 length += ((numcq * cqp[0]->page_count) *
15641 sizeof(struct dma_address));
15642 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15643 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15644 LPFC_SLI4_MBX_NEMBED);
15645 if (alloclen < length) {
15646 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15647 "3098 Allocated DMA memory size (%d) is "
15648 "less than the requested DMA memory size "
15649 "(%d)\n", alloclen, length);
15653 cq_set = mbox->sge_array->addr[0];
15654 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15655 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15657 for (idx = 0; idx < numcq; idx++) {
15659 eq = hdwq[idx].hba_eq;
15664 if (!phba->sli4_hba.pc_sli4_params.supported)
15665 hw_page_size = cq->page_size;
15669 bf_set(lpfc_mbx_cq_create_set_page_size,
15670 &cq_set->u.request,
15671 (hw_page_size / SLI4_PAGE_SIZE));
15672 bf_set(lpfc_mbx_cq_create_set_num_pages,
15673 &cq_set->u.request, cq->page_count);
15674 bf_set(lpfc_mbx_cq_create_set_evt,
15675 &cq_set->u.request, 1);
15676 bf_set(lpfc_mbx_cq_create_set_valid,
15677 &cq_set->u.request, 1);
15678 bf_set(lpfc_mbx_cq_create_set_cqe_size,
15679 &cq_set->u.request, 0);
15680 bf_set(lpfc_mbx_cq_create_set_num_cq,
15681 &cq_set->u.request, numcq);
15682 bf_set(lpfc_mbx_cq_create_set_autovalid,
15683 &cq_set->u.request,
15684 phba->sli4_hba.pc_sli4_params.cqav);
15685 switch (cq->entry_count) {
15688 if (phba->sli4_hba.pc_sli4_params.cqv ==
15689 LPFC_Q_CREATE_VERSION_2) {
15690 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15691 &cq_set->u.request,
15693 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15694 &cq_set->u.request,
15695 LPFC_CQ_CNT_WORD7);
15700 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15701 "3118 Bad CQ count. (%d)\n",
15703 if (cq->entry_count < 256) {
15707 fallthrough; /* otherwise default to smallest */
15709 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15710 &cq_set->u.request, LPFC_CQ_CNT_256);
15713 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15714 &cq_set->u.request, LPFC_CQ_CNT_512);
15717 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15718 &cq_set->u.request, LPFC_CQ_CNT_1024);
15721 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15722 &cq_set->u.request, eq->queue_id);
15725 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15726 &cq_set->u.request, eq->queue_id);
15729 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15730 &cq_set->u.request, eq->queue_id);
15733 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15734 &cq_set->u.request, eq->queue_id);
15737 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15738 &cq_set->u.request, eq->queue_id);
15741 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15742 &cq_set->u.request, eq->queue_id);
15745 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15746 &cq_set->u.request, eq->queue_id);
15749 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15750 &cq_set->u.request, eq->queue_id);
15753 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15754 &cq_set->u.request, eq->queue_id);
15757 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15758 &cq_set->u.request, eq->queue_id);
15761 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15762 &cq_set->u.request, eq->queue_id);
15765 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15766 &cq_set->u.request, eq->queue_id);
15769 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15770 &cq_set->u.request, eq->queue_id);
15773 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15774 &cq_set->u.request, eq->queue_id);
15777 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15778 &cq_set->u.request, eq->queue_id);
15781 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15782 &cq_set->u.request, eq->queue_id);
15786 /* link the cq onto the parent eq child list */
15787 list_add_tail(&cq->list, &eq->child_list);
15788 /* Set up completion queue's type and subtype */
15790 cq->subtype = subtype;
15791 cq->assoc_qid = eq->queue_id;
15793 cq->host_index = 0;
15794 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15795 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15800 list_for_each_entry(dmabuf, &cq->page_list, list) {
15801 memset(dmabuf->virt, 0, hw_page_size);
15802 cnt = page_idx + dmabuf->buffer_tag;
15803 cq_set->u.request.page[cnt].addr_lo =
15804 putPaddrLow(dmabuf->phys);
15805 cq_set->u.request.page[cnt].addr_hi =
15806 putPaddrHigh(dmabuf->phys);
15812 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15814 /* The IOCTL status is embedded in the mailbox subheader. */
15815 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15816 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15817 if (shdr_status || shdr_add_status || rc) {
15818 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15819 "3119 CQ_CREATE_SET mailbox failed with "
15820 "status x%x add_status x%x, mbx status x%x\n",
15821 shdr_status, shdr_add_status, rc);
15825 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15826 if (rc == 0xFFFF) {
15831 for (idx = 0; idx < numcq; idx++) {
15833 cq->queue_id = rc + idx;
15834 if (cq->queue_id > phba->sli4_hba.cq_max)
15835 phba->sli4_hba.cq_max = cq->queue_id;
15839 lpfc_sli4_mbox_cmd_free(phba, mbox);
15844 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15845 * @phba: HBA structure that indicates port to create a queue on.
15846 * @mq: The queue structure to use to create the mailbox queue.
15847 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15848 * @cq: The completion queue to associate with this cq.
15850 * This function provides failback (fb) functionality when the
15851 * mq_create_ext fails on older FW generations. It's purpose is identical
15852 * to mq_create_ext otherwise.
15854 * This routine cannot fail as all attributes were previously accessed and
15855 * initialized in mq_create_ext.
15858 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15859 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15861 struct lpfc_mbx_mq_create *mq_create;
15862 struct lpfc_dmabuf *dmabuf;
15865 length = (sizeof(struct lpfc_mbx_mq_create) -
15866 sizeof(struct lpfc_sli4_cfg_mhdr));
15867 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15868 LPFC_MBOX_OPCODE_MQ_CREATE,
15869 length, LPFC_SLI4_MBX_EMBED);
15870 mq_create = &mbox->u.mqe.un.mq_create;
15871 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15873 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15875 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15876 switch (mq->entry_count) {
15878 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15879 LPFC_MQ_RING_SIZE_16);
15882 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15883 LPFC_MQ_RING_SIZE_32);
15886 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15887 LPFC_MQ_RING_SIZE_64);
15890 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15891 LPFC_MQ_RING_SIZE_128);
15894 list_for_each_entry(dmabuf, &mq->page_list, list) {
15895 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15896 putPaddrLow(dmabuf->phys);
15897 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15898 putPaddrHigh(dmabuf->phys);
15903 * lpfc_mq_create - Create a mailbox Queue on the HBA
15904 * @phba: HBA structure that indicates port to create a queue on.
15905 * @mq: The queue structure to use to create the mailbox queue.
15906 * @cq: The completion queue to associate with this cq.
15907 * @subtype: The queue's subtype.
15909 * This function creates a mailbox queue, as detailed in @mq, on a port,
15910 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15912 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15913 * is used to get the entry count and entry size that are necessary to
15914 * determine the number of pages to allocate and use for this queue. This
15915 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15916 * mailbox queue. This function is asynchronous and will wait for the mailbox
15917 * command to finish before continuing.
15919 * On success this function will return a zero. If unable to allocate enough
15920 * memory this function will return -ENOMEM. If the queue create mailbox command
15921 * fails this function will return -ENXIO.
15924 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15925 struct lpfc_queue *cq, uint32_t subtype)
15927 struct lpfc_mbx_mq_create *mq_create;
15928 struct lpfc_mbx_mq_create_ext *mq_create_ext;
15929 struct lpfc_dmabuf *dmabuf;
15930 LPFC_MBOXQ_t *mbox;
15931 int rc, length, status = 0;
15932 uint32_t shdr_status, shdr_add_status;
15933 union lpfc_sli4_cfg_shdr *shdr;
15934 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15936 /* sanity check on queue memory */
15939 if (!phba->sli4_hba.pc_sli4_params.supported)
15940 hw_page_size = SLI4_PAGE_SIZE;
15942 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15945 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15946 sizeof(struct lpfc_sli4_cfg_mhdr));
15947 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15948 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15949 length, LPFC_SLI4_MBX_EMBED);
15951 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15952 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15953 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15954 &mq_create_ext->u.request, mq->page_count);
15955 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15956 &mq_create_ext->u.request, 1);
15957 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
15958 &mq_create_ext->u.request, 1);
15959 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15960 &mq_create_ext->u.request, 1);
15961 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15962 &mq_create_ext->u.request, 1);
15963 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15964 &mq_create_ext->u.request, 1);
15965 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
15966 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15967 phba->sli4_hba.pc_sli4_params.mqv);
15968 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15969 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15972 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15974 switch (mq->entry_count) {
15976 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15977 "0362 Unsupported MQ count. (%d)\n",
15979 if (mq->entry_count < 16) {
15983 fallthrough; /* otherwise default to smallest count */
15985 bf_set(lpfc_mq_context_ring_size,
15986 &mq_create_ext->u.request.context,
15987 LPFC_MQ_RING_SIZE_16);
15990 bf_set(lpfc_mq_context_ring_size,
15991 &mq_create_ext->u.request.context,
15992 LPFC_MQ_RING_SIZE_32);
15995 bf_set(lpfc_mq_context_ring_size,
15996 &mq_create_ext->u.request.context,
15997 LPFC_MQ_RING_SIZE_64);
16000 bf_set(lpfc_mq_context_ring_size,
16001 &mq_create_ext->u.request.context,
16002 LPFC_MQ_RING_SIZE_128);
16005 list_for_each_entry(dmabuf, &mq->page_list, list) {
16006 memset(dmabuf->virt, 0, hw_page_size);
16007 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16008 putPaddrLow(dmabuf->phys);
16009 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
16010 putPaddrHigh(dmabuf->phys);
16012 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16013 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16014 &mq_create_ext->u.response);
16015 if (rc != MBX_SUCCESS) {
16016 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16017 "2795 MQ_CREATE_EXT failed with "
16018 "status x%x. Failback to MQ_CREATE.\n",
16020 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16021 mq_create = &mbox->u.mqe.un.mq_create;
16022 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16023 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16024 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16025 &mq_create->u.response);
16028 /* The IOCTL status is embedded in the mailbox subheader. */
16029 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16030 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16031 if (shdr_status || shdr_add_status || rc) {
16032 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16033 "2502 MQ_CREATE mailbox failed with "
16034 "status x%x add_status x%x, mbx status x%x\n",
16035 shdr_status, shdr_add_status, rc);
16039 if (mq->queue_id == 0xFFFF) {
16043 mq->type = LPFC_MQ;
16044 mq->assoc_qid = cq->queue_id;
16045 mq->subtype = subtype;
16046 mq->host_index = 0;
16049 /* link the mq onto the parent cq child list */
16050 list_add_tail(&mq->list, &cq->child_list);
16052 mempool_free(mbox, phba->mbox_mem_pool);
16057 * lpfc_wq_create - Create a Work Queue on the HBA
16058 * @phba: HBA structure that indicates port to create a queue on.
16059 * @wq: The queue structure to use to create the work queue.
16060 * @cq: The completion queue to bind this work queue to.
16061 * @subtype: The subtype of the work queue indicating its functionality.
16063 * This function creates a work queue, as detailed in @wq, on a port, described
16064 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
16066 * The @phba struct is used to send mailbox command to HBA. The @wq struct
16067 * is used to get the entry count and entry size that are necessary to
16068 * determine the number of pages to allocate and use for this queue. The @cq
16069 * is used to indicate which completion queue to bind this work queue to. This
16070 * function will send the WQ_CREATE mailbox command to the HBA to setup the
16071 * work queue. This function is asynchronous and will wait for the mailbox
16072 * command to finish before continuing.
16074 * On success this function will return a zero. If unable to allocate enough
16075 * memory this function will return -ENOMEM. If the queue create mailbox command
16076 * fails this function will return -ENXIO.
16079 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16080 struct lpfc_queue *cq, uint32_t subtype)
16082 struct lpfc_mbx_wq_create *wq_create;
16083 struct lpfc_dmabuf *dmabuf;
16084 LPFC_MBOXQ_t *mbox;
16085 int rc, length, status = 0;
16086 uint32_t shdr_status, shdr_add_status;
16087 union lpfc_sli4_cfg_shdr *shdr;
16088 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16089 struct dma_address *page;
16090 void __iomem *bar_memmap_p;
16091 uint32_t db_offset;
16092 uint16_t pci_barset;
16093 uint8_t dpp_barset;
16094 uint32_t dpp_offset;
16095 uint8_t wq_create_version;
16097 unsigned long pg_addr;
16100 /* sanity check on queue memory */
16103 if (!phba->sli4_hba.pc_sli4_params.supported)
16104 hw_page_size = wq->page_size;
16106 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16109 length = (sizeof(struct lpfc_mbx_wq_create) -
16110 sizeof(struct lpfc_sli4_cfg_mhdr));
16111 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16112 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16113 length, LPFC_SLI4_MBX_EMBED);
16114 wq_create = &mbox->u.mqe.un.wq_create;
16115 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
16116 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16118 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16121 /* wqv is the earliest version supported, NOT the latest */
16122 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16123 phba->sli4_hba.pc_sli4_params.wqv);
16125 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16126 (wq->page_size > SLI4_PAGE_SIZE))
16127 wq_create_version = LPFC_Q_CREATE_VERSION_1;
16129 wq_create_version = LPFC_Q_CREATE_VERSION_0;
16131 switch (wq_create_version) {
16132 case LPFC_Q_CREATE_VERSION_1:
16133 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16135 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16136 LPFC_Q_CREATE_VERSION_1);
16138 switch (wq->entry_size) {
16141 bf_set(lpfc_mbx_wq_create_wqe_size,
16142 &wq_create->u.request_1,
16143 LPFC_WQ_WQE_SIZE_64);
16146 bf_set(lpfc_mbx_wq_create_wqe_size,
16147 &wq_create->u.request_1,
16148 LPFC_WQ_WQE_SIZE_128);
16151 /* Request DPP by default */
16152 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
16153 bf_set(lpfc_mbx_wq_create_page_size,
16154 &wq_create->u.request_1,
16155 (wq->page_size / SLI4_PAGE_SIZE));
16156 page = wq_create->u.request_1.page;
16159 page = wq_create->u.request.page;
16163 list_for_each_entry(dmabuf, &wq->page_list, list) {
16164 memset(dmabuf->virt, 0, hw_page_size);
16165 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
16166 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
16169 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16170 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
16172 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16173 /* The IOCTL status is embedded in the mailbox subheader. */
16174 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16175 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16176 if (shdr_status || shdr_add_status || rc) {
16177 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16178 "2503 WQ_CREATE mailbox failed with "
16179 "status x%x add_status x%x, mbx status x%x\n",
16180 shdr_status, shdr_add_status, rc);
16185 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
16186 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
16187 &wq_create->u.response);
16189 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
16190 &wq_create->u.response_1);
16192 if (wq->queue_id == 0xFFFF) {
16197 wq->db_format = LPFC_DB_LIST_FORMAT;
16198 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
16199 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16200 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
16201 &wq_create->u.response);
16202 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
16203 (wq->db_format != LPFC_DB_RING_FORMAT)) {
16204 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16205 "3265 WQ[%d] doorbell format "
16206 "not supported: x%x\n",
16207 wq->queue_id, wq->db_format);
16211 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
16212 &wq_create->u.response);
16213 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16215 if (!bar_memmap_p) {
16216 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16217 "3263 WQ[%d] failed to memmap "
16218 "pci barset:x%x\n",
16219 wq->queue_id, pci_barset);
16223 db_offset = wq_create->u.response.doorbell_offset;
16224 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
16225 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
16226 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16227 "3252 WQ[%d] doorbell offset "
16228 "not supported: x%x\n",
16229 wq->queue_id, db_offset);
16233 wq->db_regaddr = bar_memmap_p + db_offset;
16234 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16235 "3264 WQ[%d]: barset:x%x, offset:x%x, "
16236 "format:x%x\n", wq->queue_id,
16237 pci_barset, db_offset, wq->db_format);
16239 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16241 /* Check if DPP was honored by the firmware */
16242 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
16243 &wq_create->u.response_1);
16244 if (wq->dpp_enable) {
16245 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
16246 &wq_create->u.response_1);
16247 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16249 if (!bar_memmap_p) {
16250 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16251 "3267 WQ[%d] failed to memmap "
16252 "pci barset:x%x\n",
16253 wq->queue_id, pci_barset);
16257 db_offset = wq_create->u.response_1.doorbell_offset;
16258 wq->db_regaddr = bar_memmap_p + db_offset;
16259 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
16260 &wq_create->u.response_1);
16261 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
16262 &wq_create->u.response_1);
16263 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16265 if (!bar_memmap_p) {
16266 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16267 "3268 WQ[%d] failed to memmap "
16268 "pci barset:x%x\n",
16269 wq->queue_id, dpp_barset);
16273 dpp_offset = wq_create->u.response_1.dpp_offset;
16274 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
16275 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16276 "3271 WQ[%d]: barset:x%x, offset:x%x, "
16277 "dpp_id:x%x dpp_barset:x%x "
16278 "dpp_offset:x%x\n",
16279 wq->queue_id, pci_barset, db_offset,
16280 wq->dpp_id, dpp_barset, dpp_offset);
16283 /* Enable combined writes for DPP aperture */
16284 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
16285 rc = set_memory_wc(pg_addr, 1);
16287 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16288 "3272 Cannot setup Combined "
16289 "Write on WQ[%d] - disable DPP\n",
16291 phba->cfg_enable_dpp = 0;
16294 phba->cfg_enable_dpp = 0;
16297 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16299 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
16300 if (wq->pring == NULL) {
16304 wq->type = LPFC_WQ;
16305 wq->assoc_qid = cq->queue_id;
16306 wq->subtype = subtype;
16307 wq->host_index = 0;
16309 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
16311 /* link the wq onto the parent cq child list */
16312 list_add_tail(&wq->list, &cq->child_list);
16314 mempool_free(mbox, phba->mbox_mem_pool);
16319 * lpfc_rq_create - Create a Receive Queue on the HBA
16320 * @phba: HBA structure that indicates port to create a queue on.
16321 * @hrq: The queue structure to use to create the header receive queue.
16322 * @drq: The queue structure to use to create the data receive queue.
16323 * @cq: The completion queue to bind this work queue to.
16324 * @subtype: The subtype of the work queue indicating its functionality.
16326 * This function creates a receive buffer queue pair , as detailed in @hrq and
16327 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16330 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16331 * struct is used to get the entry count that is necessary to determine the
16332 * number of pages to use for this queue. The @cq is used to indicate which
16333 * completion queue to bind received buffers that are posted to these queues to.
16334 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16335 * receive queue pair. This function is asynchronous and will wait for the
16336 * mailbox command to finish before continuing.
16338 * On success this function will return a zero. If unable to allocate enough
16339 * memory this function will return -ENOMEM. If the queue create mailbox command
16340 * fails this function will return -ENXIO.
16343 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16344 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
16346 struct lpfc_mbx_rq_create *rq_create;
16347 struct lpfc_dmabuf *dmabuf;
16348 LPFC_MBOXQ_t *mbox;
16349 int rc, length, status = 0;
16350 uint32_t shdr_status, shdr_add_status;
16351 union lpfc_sli4_cfg_shdr *shdr;
16352 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16353 void __iomem *bar_memmap_p;
16354 uint32_t db_offset;
16355 uint16_t pci_barset;
16357 /* sanity check on queue memory */
16358 if (!hrq || !drq || !cq)
16360 if (!phba->sli4_hba.pc_sli4_params.supported)
16361 hw_page_size = SLI4_PAGE_SIZE;
16363 if (hrq->entry_count != drq->entry_count)
16365 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16368 length = (sizeof(struct lpfc_mbx_rq_create) -
16369 sizeof(struct lpfc_sli4_cfg_mhdr));
16370 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16371 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16372 length, LPFC_SLI4_MBX_EMBED);
16373 rq_create = &mbox->u.mqe.un.rq_create;
16374 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16375 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16376 phba->sli4_hba.pc_sli4_params.rqv);
16377 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16378 bf_set(lpfc_rq_context_rqe_count_1,
16379 &rq_create->u.request.context,
16381 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
16382 bf_set(lpfc_rq_context_rqe_size,
16383 &rq_create->u.request.context,
16385 bf_set(lpfc_rq_context_page_size,
16386 &rq_create->u.request.context,
16387 LPFC_RQ_PAGE_SIZE_4096);
16389 switch (hrq->entry_count) {
16391 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16392 "2535 Unsupported RQ count. (%d)\n",
16394 if (hrq->entry_count < 512) {
16398 fallthrough; /* otherwise default to smallest count */
16400 bf_set(lpfc_rq_context_rqe_count,
16401 &rq_create->u.request.context,
16402 LPFC_RQ_RING_SIZE_512);
16405 bf_set(lpfc_rq_context_rqe_count,
16406 &rq_create->u.request.context,
16407 LPFC_RQ_RING_SIZE_1024);
16410 bf_set(lpfc_rq_context_rqe_count,
16411 &rq_create->u.request.context,
16412 LPFC_RQ_RING_SIZE_2048);
16415 bf_set(lpfc_rq_context_rqe_count,
16416 &rq_create->u.request.context,
16417 LPFC_RQ_RING_SIZE_4096);
16420 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
16421 LPFC_HDR_BUF_SIZE);
16423 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16425 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16427 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16428 memset(dmabuf->virt, 0, hw_page_size);
16429 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16430 putPaddrLow(dmabuf->phys);
16431 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16432 putPaddrHigh(dmabuf->phys);
16434 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16435 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16437 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16438 /* The IOCTL status is embedded in the mailbox subheader. */
16439 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16440 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16441 if (shdr_status || shdr_add_status || rc) {
16442 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16443 "2504 RQ_CREATE mailbox failed with "
16444 "status x%x add_status x%x, mbx status x%x\n",
16445 shdr_status, shdr_add_status, rc);
16449 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16450 if (hrq->queue_id == 0xFFFF) {
16455 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16456 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
16457 &rq_create->u.response);
16458 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
16459 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
16460 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16461 "3262 RQ [%d] doorbell format not "
16462 "supported: x%x\n", hrq->queue_id,
16468 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
16469 &rq_create->u.response);
16470 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
16471 if (!bar_memmap_p) {
16472 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16473 "3269 RQ[%d] failed to memmap pci "
16474 "barset:x%x\n", hrq->queue_id,
16480 db_offset = rq_create->u.response.doorbell_offset;
16481 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
16482 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
16483 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16484 "3270 RQ[%d] doorbell offset not "
16485 "supported: x%x\n", hrq->queue_id,
16490 hrq->db_regaddr = bar_memmap_p + db_offset;
16491 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16492 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
16493 "format:x%x\n", hrq->queue_id, pci_barset,
16494 db_offset, hrq->db_format);
16496 hrq->db_format = LPFC_DB_RING_FORMAT;
16497 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16499 hrq->type = LPFC_HRQ;
16500 hrq->assoc_qid = cq->queue_id;
16501 hrq->subtype = subtype;
16502 hrq->host_index = 0;
16503 hrq->hba_index = 0;
16504 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16506 /* now create the data queue */
16507 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16508 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16509 length, LPFC_SLI4_MBX_EMBED);
16510 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16511 phba->sli4_hba.pc_sli4_params.rqv);
16512 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16513 bf_set(lpfc_rq_context_rqe_count_1,
16514 &rq_create->u.request.context, hrq->entry_count);
16515 if (subtype == LPFC_NVMET)
16516 rq_create->u.request.context.buffer_size =
16517 LPFC_NVMET_DATA_BUF_SIZE;
16519 rq_create->u.request.context.buffer_size =
16520 LPFC_DATA_BUF_SIZE;
16521 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
16523 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
16524 (PAGE_SIZE/SLI4_PAGE_SIZE));
16526 switch (drq->entry_count) {
16528 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16529 "2536 Unsupported RQ count. (%d)\n",
16531 if (drq->entry_count < 512) {
16535 fallthrough; /* otherwise default to smallest count */
16537 bf_set(lpfc_rq_context_rqe_count,
16538 &rq_create->u.request.context,
16539 LPFC_RQ_RING_SIZE_512);
16542 bf_set(lpfc_rq_context_rqe_count,
16543 &rq_create->u.request.context,
16544 LPFC_RQ_RING_SIZE_1024);
16547 bf_set(lpfc_rq_context_rqe_count,
16548 &rq_create->u.request.context,
16549 LPFC_RQ_RING_SIZE_2048);
16552 bf_set(lpfc_rq_context_rqe_count,
16553 &rq_create->u.request.context,
16554 LPFC_RQ_RING_SIZE_4096);
16557 if (subtype == LPFC_NVMET)
16558 bf_set(lpfc_rq_context_buf_size,
16559 &rq_create->u.request.context,
16560 LPFC_NVMET_DATA_BUF_SIZE);
16562 bf_set(lpfc_rq_context_buf_size,
16563 &rq_create->u.request.context,
16564 LPFC_DATA_BUF_SIZE);
16566 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16568 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16570 list_for_each_entry(dmabuf, &drq->page_list, list) {
16571 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16572 putPaddrLow(dmabuf->phys);
16573 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16574 putPaddrHigh(dmabuf->phys);
16576 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16577 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16578 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16579 /* The IOCTL status is embedded in the mailbox subheader. */
16580 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16581 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16582 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16583 if (shdr_status || shdr_add_status || rc) {
16587 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16588 if (drq->queue_id == 0xFFFF) {
16592 drq->type = LPFC_DRQ;
16593 drq->assoc_qid = cq->queue_id;
16594 drq->subtype = subtype;
16595 drq->host_index = 0;
16596 drq->hba_index = 0;
16597 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16599 /* link the header and data RQs onto the parent cq child list */
16600 list_add_tail(&hrq->list, &cq->child_list);
16601 list_add_tail(&drq->list, &cq->child_list);
16604 mempool_free(mbox, phba->mbox_mem_pool);
16609 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
16610 * @phba: HBA structure that indicates port to create a queue on.
16611 * @hrqp: The queue structure array to use to create the header receive queues.
16612 * @drqp: The queue structure array to use to create the data receive queues.
16613 * @cqp: The completion queue array to bind these receive queues to.
16614 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16616 * This function creates a receive buffer queue pair , as detailed in @hrq and
16617 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16620 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16621 * struct is used to get the entry count that is necessary to determine the
16622 * number of pages to use for this queue. The @cq is used to indicate which
16623 * completion queue to bind received buffers that are posted to these queues to.
16624 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16625 * receive queue pair. This function is asynchronous and will wait for the
16626 * mailbox command to finish before continuing.
16628 * On success this function will return a zero. If unable to allocate enough
16629 * memory this function will return -ENOMEM. If the queue create mailbox command
16630 * fails this function will return -ENXIO.
16633 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16634 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16637 struct lpfc_queue *hrq, *drq, *cq;
16638 struct lpfc_mbx_rq_create_v2 *rq_create;
16639 struct lpfc_dmabuf *dmabuf;
16640 LPFC_MBOXQ_t *mbox;
16641 int rc, length, alloclen, status = 0;
16642 int cnt, idx, numrq, page_idx = 0;
16643 uint32_t shdr_status, shdr_add_status;
16644 union lpfc_sli4_cfg_shdr *shdr;
16645 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16647 numrq = phba->cfg_nvmet_mrq;
16648 /* sanity check on array memory */
16649 if (!hrqp || !drqp || !cqp || !numrq)
16651 if (!phba->sli4_hba.pc_sli4_params.supported)
16652 hw_page_size = SLI4_PAGE_SIZE;
16654 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16658 length = sizeof(struct lpfc_mbx_rq_create_v2);
16659 length += ((2 * numrq * hrqp[0]->page_count) *
16660 sizeof(struct dma_address));
16662 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16663 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16664 LPFC_SLI4_MBX_NEMBED);
16665 if (alloclen < length) {
16666 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16667 "3099 Allocated DMA memory size (%d) is "
16668 "less than the requested DMA memory size "
16669 "(%d)\n", alloclen, length);
16676 rq_create = mbox->sge_array->addr[0];
16677 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16679 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16682 for (idx = 0; idx < numrq; idx++) {
16687 /* sanity check on queue memory */
16688 if (!hrq || !drq || !cq) {
16693 if (hrq->entry_count != drq->entry_count) {
16699 bf_set(lpfc_mbx_rq_create_num_pages,
16700 &rq_create->u.request,
16702 bf_set(lpfc_mbx_rq_create_rq_cnt,
16703 &rq_create->u.request, (numrq * 2));
16704 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16706 bf_set(lpfc_rq_context_base_cq,
16707 &rq_create->u.request.context,
16709 bf_set(lpfc_rq_context_data_size,
16710 &rq_create->u.request.context,
16711 LPFC_NVMET_DATA_BUF_SIZE);
16712 bf_set(lpfc_rq_context_hdr_size,
16713 &rq_create->u.request.context,
16714 LPFC_HDR_BUF_SIZE);
16715 bf_set(lpfc_rq_context_rqe_count_1,
16716 &rq_create->u.request.context,
16718 bf_set(lpfc_rq_context_rqe_size,
16719 &rq_create->u.request.context,
16721 bf_set(lpfc_rq_context_page_size,
16722 &rq_create->u.request.context,
16723 (PAGE_SIZE/SLI4_PAGE_SIZE));
16726 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16727 memset(dmabuf->virt, 0, hw_page_size);
16728 cnt = page_idx + dmabuf->buffer_tag;
16729 rq_create->u.request.page[cnt].addr_lo =
16730 putPaddrLow(dmabuf->phys);
16731 rq_create->u.request.page[cnt].addr_hi =
16732 putPaddrHigh(dmabuf->phys);
16738 list_for_each_entry(dmabuf, &drq->page_list, list) {
16739 memset(dmabuf->virt, 0, hw_page_size);
16740 cnt = page_idx + dmabuf->buffer_tag;
16741 rq_create->u.request.page[cnt].addr_lo =
16742 putPaddrLow(dmabuf->phys);
16743 rq_create->u.request.page[cnt].addr_hi =
16744 putPaddrHigh(dmabuf->phys);
16749 hrq->db_format = LPFC_DB_RING_FORMAT;
16750 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16751 hrq->type = LPFC_HRQ;
16752 hrq->assoc_qid = cq->queue_id;
16753 hrq->subtype = subtype;
16754 hrq->host_index = 0;
16755 hrq->hba_index = 0;
16756 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16758 drq->db_format = LPFC_DB_RING_FORMAT;
16759 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16760 drq->type = LPFC_DRQ;
16761 drq->assoc_qid = cq->queue_id;
16762 drq->subtype = subtype;
16763 drq->host_index = 0;
16764 drq->hba_index = 0;
16765 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16767 list_add_tail(&hrq->list, &cq->child_list);
16768 list_add_tail(&drq->list, &cq->child_list);
16771 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16772 /* The IOCTL status is embedded in the mailbox subheader. */
16773 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16774 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16775 if (shdr_status || shdr_add_status || rc) {
16776 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16777 "3120 RQ_CREATE mailbox failed with "
16778 "status x%x add_status x%x, mbx status x%x\n",
16779 shdr_status, shdr_add_status, rc);
16783 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16784 if (rc == 0xFFFF) {
16789 /* Initialize all RQs with associated queue id */
16790 for (idx = 0; idx < numrq; idx++) {
16792 hrq->queue_id = rc + (2 * idx);
16794 drq->queue_id = rc + (2 * idx) + 1;
16798 lpfc_sli4_mbox_cmd_free(phba, mbox);
16803 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16804 * @phba: HBA structure that indicates port to destroy a queue on.
16805 * @eq: The queue structure associated with the queue to destroy.
16807 * This function destroys a queue, as detailed in @eq by sending an mailbox
16808 * command, specific to the type of queue, to the HBA.
16810 * The @eq struct is used to get the queue ID of the queue to destroy.
16812 * On success this function will return a zero. If the queue destroy mailbox
16813 * command fails this function will return -ENXIO.
16816 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16818 LPFC_MBOXQ_t *mbox;
16819 int rc, length, status = 0;
16820 uint32_t shdr_status, shdr_add_status;
16821 union lpfc_sli4_cfg_shdr *shdr;
16823 /* sanity check on queue memory */
16827 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16830 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16831 sizeof(struct lpfc_sli4_cfg_mhdr));
16832 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16833 LPFC_MBOX_OPCODE_EQ_DESTROY,
16834 length, LPFC_SLI4_MBX_EMBED);
16835 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16837 mbox->vport = eq->phba->pport;
16838 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16840 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16841 /* The IOCTL status is embedded in the mailbox subheader. */
16842 shdr = (union lpfc_sli4_cfg_shdr *)
16843 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16844 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16845 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16846 if (shdr_status || shdr_add_status || rc) {
16847 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16848 "2505 EQ_DESTROY mailbox failed with "
16849 "status x%x add_status x%x, mbx status x%x\n",
16850 shdr_status, shdr_add_status, rc);
16854 /* Remove eq from any list */
16855 list_del_init(&eq->list);
16856 mempool_free(mbox, eq->phba->mbox_mem_pool);
16861 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16862 * @phba: HBA structure that indicates port to destroy a queue on.
16863 * @cq: The queue structure associated with the queue to destroy.
16865 * This function destroys a queue, as detailed in @cq by sending an mailbox
16866 * command, specific to the type of queue, to the HBA.
16868 * The @cq struct is used to get the queue ID of the queue to destroy.
16870 * On success this function will return a zero. If the queue destroy mailbox
16871 * command fails this function will return -ENXIO.
16874 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16876 LPFC_MBOXQ_t *mbox;
16877 int rc, length, status = 0;
16878 uint32_t shdr_status, shdr_add_status;
16879 union lpfc_sli4_cfg_shdr *shdr;
16881 /* sanity check on queue memory */
16884 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16887 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16888 sizeof(struct lpfc_sli4_cfg_mhdr));
16889 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16890 LPFC_MBOX_OPCODE_CQ_DESTROY,
16891 length, LPFC_SLI4_MBX_EMBED);
16892 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16894 mbox->vport = cq->phba->pport;
16895 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16896 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16897 /* The IOCTL status is embedded in the mailbox subheader. */
16898 shdr = (union lpfc_sli4_cfg_shdr *)
16899 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16900 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16901 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16902 if (shdr_status || shdr_add_status || rc) {
16903 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16904 "2506 CQ_DESTROY mailbox failed with "
16905 "status x%x add_status x%x, mbx status x%x\n",
16906 shdr_status, shdr_add_status, rc);
16909 /* Remove cq from any list */
16910 list_del_init(&cq->list);
16911 mempool_free(mbox, cq->phba->mbox_mem_pool);
16916 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16917 * @phba: HBA structure that indicates port to destroy a queue on.
16918 * @mq: The queue structure associated with the queue to destroy.
16920 * This function destroys a queue, as detailed in @mq by sending an mailbox
16921 * command, specific to the type of queue, to the HBA.
16923 * The @mq struct is used to get the queue ID of the queue to destroy.
16925 * On success this function will return a zero. If the queue destroy mailbox
16926 * command fails this function will return -ENXIO.
16929 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16931 LPFC_MBOXQ_t *mbox;
16932 int rc, length, status = 0;
16933 uint32_t shdr_status, shdr_add_status;
16934 union lpfc_sli4_cfg_shdr *shdr;
16936 /* sanity check on queue memory */
16939 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16942 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16943 sizeof(struct lpfc_sli4_cfg_mhdr));
16944 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16945 LPFC_MBOX_OPCODE_MQ_DESTROY,
16946 length, LPFC_SLI4_MBX_EMBED);
16947 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16949 mbox->vport = mq->phba->pport;
16950 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16951 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16952 /* The IOCTL status is embedded in the mailbox subheader. */
16953 shdr = (union lpfc_sli4_cfg_shdr *)
16954 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16955 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16956 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16957 if (shdr_status || shdr_add_status || rc) {
16958 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16959 "2507 MQ_DESTROY mailbox failed with "
16960 "status x%x add_status x%x, mbx status x%x\n",
16961 shdr_status, shdr_add_status, rc);
16964 /* Remove mq from any list */
16965 list_del_init(&mq->list);
16966 mempool_free(mbox, mq->phba->mbox_mem_pool);
16971 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16972 * @phba: HBA structure that indicates port to destroy a queue on.
16973 * @wq: The queue structure associated with the queue to destroy.
16975 * This function destroys a queue, as detailed in @wq by sending an mailbox
16976 * command, specific to the type of queue, to the HBA.
16978 * The @wq struct is used to get the queue ID of the queue to destroy.
16980 * On success this function will return a zero. If the queue destroy mailbox
16981 * command fails this function will return -ENXIO.
16984 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16986 LPFC_MBOXQ_t *mbox;
16987 int rc, length, status = 0;
16988 uint32_t shdr_status, shdr_add_status;
16989 union lpfc_sli4_cfg_shdr *shdr;
16991 /* sanity check on queue memory */
16994 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16997 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16998 sizeof(struct lpfc_sli4_cfg_mhdr));
16999 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17000 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17001 length, LPFC_SLI4_MBX_EMBED);
17002 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17004 mbox->vport = wq->phba->pport;
17005 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17006 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17007 shdr = (union lpfc_sli4_cfg_shdr *)
17008 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17009 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17010 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17011 if (shdr_status || shdr_add_status || rc) {
17012 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17013 "2508 WQ_DESTROY mailbox failed with "
17014 "status x%x add_status x%x, mbx status x%x\n",
17015 shdr_status, shdr_add_status, rc);
17018 /* Remove wq from any list */
17019 list_del_init(&wq->list);
17022 mempool_free(mbox, wq->phba->mbox_mem_pool);
17027 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
17028 * @phba: HBA structure that indicates port to destroy a queue on.
17029 * @hrq: The queue structure associated with the queue to destroy.
17030 * @drq: The queue structure associated with the queue to destroy.
17032 * This function destroys a queue, as detailed in @rq by sending an mailbox
17033 * command, specific to the type of queue, to the HBA.
17035 * The @rq struct is used to get the queue ID of the queue to destroy.
17037 * On success this function will return a zero. If the queue destroy mailbox
17038 * command fails this function will return -ENXIO.
17041 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17042 struct lpfc_queue *drq)
17044 LPFC_MBOXQ_t *mbox;
17045 int rc, length, status = 0;
17046 uint32_t shdr_status, shdr_add_status;
17047 union lpfc_sli4_cfg_shdr *shdr;
17049 /* sanity check on queue memory */
17052 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17055 length = (sizeof(struct lpfc_mbx_rq_destroy) -
17056 sizeof(struct lpfc_sli4_cfg_mhdr));
17057 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17058 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17059 length, LPFC_SLI4_MBX_EMBED);
17060 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17062 mbox->vport = hrq->phba->pport;
17063 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17064 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17065 /* The IOCTL status is embedded in the mailbox subheader. */
17066 shdr = (union lpfc_sli4_cfg_shdr *)
17067 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17068 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17069 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17070 if (shdr_status || shdr_add_status || rc) {
17071 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17072 "2509 RQ_DESTROY mailbox failed with "
17073 "status x%x add_status x%x, mbx status x%x\n",
17074 shdr_status, shdr_add_status, rc);
17075 if (rc != MBX_TIMEOUT)
17076 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17079 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17081 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17082 shdr = (union lpfc_sli4_cfg_shdr *)
17083 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17084 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17085 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17086 if (shdr_status || shdr_add_status || rc) {
17087 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17088 "2510 RQ_DESTROY mailbox failed with "
17089 "status x%x add_status x%x, mbx status x%x\n",
17090 shdr_status, shdr_add_status, rc);
17093 list_del_init(&hrq->list);
17094 list_del_init(&drq->list);
17095 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17100 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
17101 * @phba: The virtual port for which this call being executed.
17102 * @pdma_phys_addr0: Physical address of the 1st SGL page.
17103 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
17104 * @xritag: the xritag that ties this io to the SGL pages.
17106 * This routine will post the sgl pages for the IO that has the xritag
17107 * that is in the iocbq structure. The xritag is assigned during iocbq
17108 * creation and persists for as long as the driver is loaded.
17109 * if the caller has fewer than 256 scatter gather segments to map then
17110 * pdma_phys_addr1 should be 0.
17111 * If the caller needs to map more than 256 scatter gather segment then
17112 * pdma_phys_addr1 should be a valid physical address.
17113 * physical address for SGLs must be 64 byte aligned.
17114 * If you are going to map 2 SGL's then the first one must have 256 entries
17115 * the second sgl can have between 1 and 256 entries.
17119 * -ENXIO, -ENOMEM - Failure
17122 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
17123 dma_addr_t pdma_phys_addr0,
17124 dma_addr_t pdma_phys_addr1,
17127 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
17128 LPFC_MBOXQ_t *mbox;
17130 uint32_t shdr_status, shdr_add_status;
17132 union lpfc_sli4_cfg_shdr *shdr;
17134 if (xritag == NO_XRI) {
17135 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17136 "0364 Invalid param:\n");
17140 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17144 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17145 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17146 sizeof(struct lpfc_mbx_post_sgl_pages) -
17147 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17149 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
17150 &mbox->u.mqe.un.post_sgl_pages;
17151 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
17152 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
17154 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
17155 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
17156 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
17157 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
17159 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
17160 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
17161 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
17162 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
17163 if (!phba->sli4_hba.intr_enable)
17164 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17166 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17167 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17169 /* The IOCTL status is embedded in the mailbox subheader. */
17170 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
17171 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17172 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17173 if (rc != MBX_TIMEOUT)
17174 mempool_free(mbox, phba->mbox_mem_pool);
17175 if (shdr_status || shdr_add_status || rc) {
17176 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17177 "2511 POST_SGL mailbox failed with "
17178 "status x%x add_status x%x, mbx status x%x\n",
17179 shdr_status, shdr_add_status, rc);
17185 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
17186 * @phba: pointer to lpfc hba data structure.
17188 * This routine is invoked to post rpi header templates to the
17189 * HBA consistent with the SLI-4 interface spec. This routine
17190 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17191 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17194 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17195 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
17198 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
17203 * Fetch the next logical xri. Because this index is logical,
17204 * the driver starts at 0 each time.
17206 spin_lock_irq(&phba->hbalock);
17207 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
17208 phba->sli4_hba.max_cfg_param.max_xri, 0);
17209 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
17210 spin_unlock_irq(&phba->hbalock);
17213 set_bit(xri, phba->sli4_hba.xri_bmask);
17214 phba->sli4_hba.max_cfg_param.xri_used++;
17216 spin_unlock_irq(&phba->hbalock);
17221 * lpfc_sli4_free_xri - Release an xri for reuse.
17222 * @phba: pointer to lpfc hba data structure.
17223 * @xri: xri to release.
17225 * This routine is invoked to release an xri to the pool of
17226 * available rpis maintained by the driver.
17229 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17231 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
17232 phba->sli4_hba.max_cfg_param.xri_used--;
17237 * lpfc_sli4_free_xri - Release an xri for reuse.
17238 * @phba: pointer to lpfc hba data structure.
17239 * @xri: xri to release.
17241 * This routine is invoked to release an xri to the pool of
17242 * available rpis maintained by the driver.
17245 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17247 spin_lock_irq(&phba->hbalock);
17248 __lpfc_sli4_free_xri(phba, xri);
17249 spin_unlock_irq(&phba->hbalock);
17253 * lpfc_sli4_next_xritag - Get an xritag for the io
17254 * @phba: Pointer to HBA context object.
17256 * This function gets an xritag for the iocb. If there is no unused xritag
17257 * it will return 0xffff.
17258 * The function returns the allocated xritag if successful, else returns zero.
17259 * Zero is not a valid xritag.
17260 * The caller is not required to hold any lock.
17263 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
17265 uint16_t xri_index;
17267 xri_index = lpfc_sli4_alloc_xri(phba);
17268 if (xri_index == NO_XRI)
17269 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17270 "2004 Failed to allocate XRI.last XRITAG is %d"
17271 " Max XRI is %d, Used XRI is %d\n",
17273 phba->sli4_hba.max_cfg_param.max_xri,
17274 phba->sli4_hba.max_cfg_param.xri_used);
17279 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
17280 * @phba: pointer to lpfc hba data structure.
17281 * @post_sgl_list: pointer to els sgl entry list.
17282 * @post_cnt: number of els sgl entries on the list.
17284 * This routine is invoked to post a block of driver's sgl pages to the
17285 * HBA using non-embedded mailbox command. No Lock is held. This routine
17286 * is only called when the driver is loading and after all IO has been
17290 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
17291 struct list_head *post_sgl_list,
17294 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
17295 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17296 struct sgl_page_pairs *sgl_pg_pairs;
17298 LPFC_MBOXQ_t *mbox;
17299 uint32_t reqlen, alloclen, pg_pairs;
17301 uint16_t xritag_start = 0;
17303 uint32_t shdr_status, shdr_add_status;
17304 union lpfc_sli4_cfg_shdr *shdr;
17306 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
17307 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17308 if (reqlen > SLI4_PAGE_SIZE) {
17309 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17310 "2559 Block sgl registration required DMA "
17311 "size (%d) great than a page\n", reqlen);
17315 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17319 /* Allocate DMA memory and set up the non-embedded mailbox command */
17320 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17321 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
17322 LPFC_SLI4_MBX_NEMBED);
17324 if (alloclen < reqlen) {
17325 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17326 "0285 Allocated DMA memory size (%d) is "
17327 "less than the requested DMA memory "
17328 "size (%d)\n", alloclen, reqlen);
17329 lpfc_sli4_mbox_cmd_free(phba, mbox);
17332 /* Set up the SGL pages in the non-embedded DMA pages */
17333 viraddr = mbox->sge_array->addr[0];
17334 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17335 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17338 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
17339 /* Set up the sge entry */
17340 sgl_pg_pairs->sgl_pg0_addr_lo =
17341 cpu_to_le32(putPaddrLow(sglq_entry->phys));
17342 sgl_pg_pairs->sgl_pg0_addr_hi =
17343 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
17344 sgl_pg_pairs->sgl_pg1_addr_lo =
17345 cpu_to_le32(putPaddrLow(0));
17346 sgl_pg_pairs->sgl_pg1_addr_hi =
17347 cpu_to_le32(putPaddrHigh(0));
17349 /* Keep the first xritag on the list */
17351 xritag_start = sglq_entry->sli4_xritag;
17356 /* Complete initialization and perform endian conversion. */
17357 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17358 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
17359 sgl->word0 = cpu_to_le32(sgl->word0);
17361 if (!phba->sli4_hba.intr_enable)
17362 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17364 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17365 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17367 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
17368 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17369 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17370 if (rc != MBX_TIMEOUT)
17371 lpfc_sli4_mbox_cmd_free(phba, mbox);
17372 if (shdr_status || shdr_add_status || rc) {
17373 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17374 "2513 POST_SGL_BLOCK mailbox command failed "
17375 "status x%x add_status x%x mbx status x%x\n",
17376 shdr_status, shdr_add_status, rc);
17383 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
17384 * @phba: pointer to lpfc hba data structure.
17385 * @nblist: pointer to nvme buffer list.
17386 * @count: number of scsi buffers on the list.
17388 * This routine is invoked to post a block of @count scsi sgl pages from a
17389 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
17394 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
17397 struct lpfc_io_buf *lpfc_ncmd;
17398 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17399 struct sgl_page_pairs *sgl_pg_pairs;
17401 LPFC_MBOXQ_t *mbox;
17402 uint32_t reqlen, alloclen, pg_pairs;
17404 uint16_t xritag_start = 0;
17406 uint32_t shdr_status, shdr_add_status;
17407 dma_addr_t pdma_phys_bpl1;
17408 union lpfc_sli4_cfg_shdr *shdr;
17410 /* Calculate the requested length of the dma memory */
17411 reqlen = count * sizeof(struct sgl_page_pairs) +
17412 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17413 if (reqlen > SLI4_PAGE_SIZE) {
17414 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
17415 "6118 Block sgl registration required DMA "
17416 "size (%d) great than a page\n", reqlen);
17419 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17421 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17422 "6119 Failed to allocate mbox cmd memory\n");
17426 /* Allocate DMA memory and set up the non-embedded mailbox command */
17427 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17428 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17429 reqlen, LPFC_SLI4_MBX_NEMBED);
17431 if (alloclen < reqlen) {
17432 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17433 "6120 Allocated DMA memory size (%d) is "
17434 "less than the requested DMA memory "
17435 "size (%d)\n", alloclen, reqlen);
17436 lpfc_sli4_mbox_cmd_free(phba, mbox);
17440 /* Get the first SGE entry from the non-embedded DMA memory */
17441 viraddr = mbox->sge_array->addr[0];
17443 /* Set up the SGL pages in the non-embedded DMA pages */
17444 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17445 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17448 list_for_each_entry(lpfc_ncmd, nblist, list) {
17449 /* Set up the sge entry */
17450 sgl_pg_pairs->sgl_pg0_addr_lo =
17451 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
17452 sgl_pg_pairs->sgl_pg0_addr_hi =
17453 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
17454 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
17455 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
17458 pdma_phys_bpl1 = 0;
17459 sgl_pg_pairs->sgl_pg1_addr_lo =
17460 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
17461 sgl_pg_pairs->sgl_pg1_addr_hi =
17462 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
17463 /* Keep the first xritag on the list */
17465 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
17469 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17470 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
17471 /* Perform endian conversion if necessary */
17472 sgl->word0 = cpu_to_le32(sgl->word0);
17474 if (!phba->sli4_hba.intr_enable) {
17475 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17477 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17478 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17480 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
17481 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17482 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17483 if (rc != MBX_TIMEOUT)
17484 lpfc_sli4_mbox_cmd_free(phba, mbox);
17485 if (shdr_status || shdr_add_status || rc) {
17486 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17487 "6125 POST_SGL_BLOCK mailbox command failed "
17488 "status x%x add_status x%x mbx status x%x\n",
17489 shdr_status, shdr_add_status, rc);
17496 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
17497 * @phba: pointer to lpfc hba data structure.
17498 * @post_nblist: pointer to the nvme buffer list.
17499 * @sb_count: number of nvme buffers.
17501 * This routine walks a list of nvme buffers that was passed in. It attempts
17502 * to construct blocks of nvme buffer sgls which contains contiguous xris and
17503 * uses the non-embedded SGL block post mailbox commands to post to the port.
17504 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
17505 * embedded SGL post mailbox command for posting. The @post_nblist passed in
17506 * must be local list, thus no lock is needed when manipulate the list.
17508 * Returns: 0 = failure, non-zero number of successfully posted buffers.
17511 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
17512 struct list_head *post_nblist, int sb_count)
17514 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
17515 int status, sgl_size;
17516 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
17517 dma_addr_t pdma_phys_sgl1;
17518 int last_xritag = NO_XRI;
17520 LIST_HEAD(prep_nblist);
17521 LIST_HEAD(blck_nblist);
17522 LIST_HEAD(nvme_nblist);
17528 sgl_size = phba->cfg_sg_dma_buf_size;
17529 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
17530 list_del_init(&lpfc_ncmd->list);
17532 if ((last_xritag != NO_XRI) &&
17533 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
17534 /* a hole in xri block, form a sgl posting block */
17535 list_splice_init(&prep_nblist, &blck_nblist);
17536 post_cnt = block_cnt - 1;
17537 /* prepare list for next posting block */
17538 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17541 /* prepare list for next posting block */
17542 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17543 /* enough sgls for non-embed sgl mbox command */
17544 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
17545 list_splice_init(&prep_nblist, &blck_nblist);
17546 post_cnt = block_cnt;
17551 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17553 /* end of repost sgl list condition for NVME buffers */
17554 if (num_posting == sb_count) {
17555 if (post_cnt == 0) {
17556 /* last sgl posting block */
17557 list_splice_init(&prep_nblist, &blck_nblist);
17558 post_cnt = block_cnt;
17559 } else if (block_cnt == 1) {
17560 /* last single sgl with non-contiguous xri */
17561 if (sgl_size > SGL_PAGE_SIZE)
17563 lpfc_ncmd->dma_phys_sgl +
17566 pdma_phys_sgl1 = 0;
17567 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17568 status = lpfc_sli4_post_sgl(
17569 phba, lpfc_ncmd->dma_phys_sgl,
17570 pdma_phys_sgl1, cur_xritag);
17572 /* Post error. Buffer unavailable. */
17573 lpfc_ncmd->flags |=
17574 LPFC_SBUF_NOT_POSTED;
17576 /* Post success. Bffer available. */
17577 lpfc_ncmd->flags &=
17578 ~LPFC_SBUF_NOT_POSTED;
17579 lpfc_ncmd->status = IOSTAT_SUCCESS;
17582 /* success, put on NVME buffer sgl list */
17583 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17587 /* continue until a nembed page worth of sgls */
17591 /* post block of NVME buffer list sgls */
17592 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
17595 /* don't reset xirtag due to hole in xri block */
17596 if (block_cnt == 0)
17597 last_xritag = NO_XRI;
17599 /* reset NVME buffer post count for next round of posting */
17602 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
17603 while (!list_empty(&blck_nblist)) {
17604 list_remove_head(&blck_nblist, lpfc_ncmd,
17605 struct lpfc_io_buf, list);
17607 /* Post error. Mark buffer unavailable. */
17608 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
17610 /* Post success, Mark buffer available. */
17611 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
17612 lpfc_ncmd->status = IOSTAT_SUCCESS;
17615 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17618 /* Push NVME buffers with sgl posted to the available list */
17619 lpfc_io_buf_replenish(phba, &nvme_nblist);
17625 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
17626 * @phba: pointer to lpfc_hba struct that the frame was received on
17627 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17629 * This function checks the fields in the @fc_hdr to see if the FC frame is a
17630 * valid type of frame that the LPFC driver will handle. This function will
17631 * return a zero if the frame is a valid frame or a non zero value when the
17632 * frame does not pass the check.
17635 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
17637 /* make rctl_names static to save stack space */
17638 struct fc_vft_header *fc_vft_hdr;
17639 uint32_t *header = (uint32_t *) fc_hdr;
17641 #define FC_RCTL_MDS_DIAGS 0xF4
17643 switch (fc_hdr->fh_r_ctl) {
17644 case FC_RCTL_DD_UNCAT: /* uncategorized information */
17645 case FC_RCTL_DD_SOL_DATA: /* solicited data */
17646 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
17647 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
17648 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
17649 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
17650 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
17651 case FC_RCTL_DD_CMD_STATUS: /* command status */
17652 case FC_RCTL_ELS_REQ: /* extended link services request */
17653 case FC_RCTL_ELS_REP: /* extended link services reply */
17654 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
17655 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
17656 case FC_RCTL_BA_NOP: /* basic link service NOP */
17657 case FC_RCTL_BA_ABTS: /* basic link service abort */
17658 case FC_RCTL_BA_RMC: /* remove connection */
17659 case FC_RCTL_BA_ACC: /* basic accept */
17660 case FC_RCTL_BA_RJT: /* basic reject */
17661 case FC_RCTL_BA_PRMT:
17662 case FC_RCTL_ACK_1: /* acknowledge_1 */
17663 case FC_RCTL_ACK_0: /* acknowledge_0 */
17664 case FC_RCTL_P_RJT: /* port reject */
17665 case FC_RCTL_F_RJT: /* fabric reject */
17666 case FC_RCTL_P_BSY: /* port busy */
17667 case FC_RCTL_F_BSY: /* fabric busy to data frame */
17668 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
17669 case FC_RCTL_LCR: /* link credit reset */
17670 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
17671 case FC_RCTL_END: /* end */
17673 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
17674 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17675 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17676 return lpfc_fc_frame_check(phba, fc_hdr);
17681 switch (fc_hdr->fh_type) {
17694 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
17695 "2538 Received frame rctl:x%x, type:x%x, "
17696 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
17697 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17698 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17699 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17700 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17701 be32_to_cpu(header[6]));
17704 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
17705 "2539 Dropped frame rctl:x%x type:x%x\n",
17706 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17711 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17712 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17714 * This function processes the FC header to retrieve the VFI from the VF
17715 * header, if one exists. This function will return the VFI if one exists
17716 * or 0 if no VSAN Header exists.
17719 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17721 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17723 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17725 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17729 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17730 * @phba: Pointer to the HBA structure to search for the vport on
17731 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17732 * @fcfi: The FC Fabric ID that the frame came from
17733 * @did: Destination ID to match against
17735 * This function searches the @phba for a vport that matches the content of the
17736 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17737 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17738 * returns the matching vport pointer or NULL if unable to match frame to a
17741 static struct lpfc_vport *
17742 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17743 uint16_t fcfi, uint32_t did)
17745 struct lpfc_vport **vports;
17746 struct lpfc_vport *vport = NULL;
17749 if (did == Fabric_DID)
17750 return phba->pport;
17751 if ((phba->pport->fc_flag & FC_PT2PT) &&
17752 !(phba->link_state == LPFC_HBA_READY))
17753 return phba->pport;
17755 vports = lpfc_create_vport_work_array(phba);
17756 if (vports != NULL) {
17757 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17758 if (phba->fcf.fcfi == fcfi &&
17759 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17760 vports[i]->fc_myDID == did) {
17766 lpfc_destroy_vport_work_array(phba, vports);
17771 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17772 * @vport: The vport to work on.
17774 * This function updates the receive sequence time stamp for this vport. The
17775 * receive sequence time stamp indicates the time that the last frame of the
17776 * the sequence that has been idle for the longest amount of time was received.
17777 * the driver uses this time stamp to indicate if any received sequences have
17781 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17783 struct lpfc_dmabuf *h_buf;
17784 struct hbq_dmabuf *dmabuf = NULL;
17786 /* get the oldest sequence on the rcv list */
17787 h_buf = list_get_first(&vport->rcv_buffer_list,
17788 struct lpfc_dmabuf, list);
17791 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17792 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17796 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17797 * @vport: The vport that the received sequences were sent to.
17799 * This function cleans up all outstanding received sequences. This is called
17800 * by the driver when a link event or user action invalidates all the received
17804 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17806 struct lpfc_dmabuf *h_buf, *hnext;
17807 struct lpfc_dmabuf *d_buf, *dnext;
17808 struct hbq_dmabuf *dmabuf = NULL;
17810 /* start with the oldest sequence on the rcv list */
17811 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17812 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17813 list_del_init(&dmabuf->hbuf.list);
17814 list_for_each_entry_safe(d_buf, dnext,
17815 &dmabuf->dbuf.list, list) {
17816 list_del_init(&d_buf->list);
17817 lpfc_in_buf_free(vport->phba, d_buf);
17819 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17824 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17825 * @vport: The vport that the received sequences were sent to.
17827 * This function determines whether any received sequences have timed out by
17828 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17829 * indicates that there is at least one timed out sequence this routine will
17830 * go through the received sequences one at a time from most inactive to most
17831 * active to determine which ones need to be cleaned up. Once it has determined
17832 * that a sequence needs to be cleaned up it will simply free up the resources
17833 * without sending an abort.
17836 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17838 struct lpfc_dmabuf *h_buf, *hnext;
17839 struct lpfc_dmabuf *d_buf, *dnext;
17840 struct hbq_dmabuf *dmabuf = NULL;
17841 unsigned long timeout;
17842 int abort_count = 0;
17844 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17845 vport->rcv_buffer_time_stamp);
17846 if (list_empty(&vport->rcv_buffer_list) ||
17847 time_before(jiffies, timeout))
17849 /* start with the oldest sequence on the rcv list */
17850 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17851 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17852 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17853 dmabuf->time_stamp);
17854 if (time_before(jiffies, timeout))
17857 list_del_init(&dmabuf->hbuf.list);
17858 list_for_each_entry_safe(d_buf, dnext,
17859 &dmabuf->dbuf.list, list) {
17860 list_del_init(&d_buf->list);
17861 lpfc_in_buf_free(vport->phba, d_buf);
17863 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17866 lpfc_update_rcv_time_stamp(vport);
17870 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17871 * @vport: pointer to a vitural port
17872 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17874 * This function searches through the existing incomplete sequences that have
17875 * been sent to this @vport. If the frame matches one of the incomplete
17876 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17877 * make up that sequence. If no sequence is found that matches this frame then
17878 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17879 * This function returns a pointer to the first dmabuf in the sequence list that
17880 * the frame was linked to.
17882 static struct hbq_dmabuf *
17883 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17885 struct fc_frame_header *new_hdr;
17886 struct fc_frame_header *temp_hdr;
17887 struct lpfc_dmabuf *d_buf;
17888 struct lpfc_dmabuf *h_buf;
17889 struct hbq_dmabuf *seq_dmabuf = NULL;
17890 struct hbq_dmabuf *temp_dmabuf = NULL;
17893 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17894 dmabuf->time_stamp = jiffies;
17895 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17897 /* Use the hdr_buf to find the sequence that this frame belongs to */
17898 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17899 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17900 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17901 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17902 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17904 /* found a pending sequence that matches this frame */
17905 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17910 * This indicates first frame received for this sequence.
17911 * Queue the buffer on the vport's rcv_buffer_list.
17913 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17914 lpfc_update_rcv_time_stamp(vport);
17917 temp_hdr = seq_dmabuf->hbuf.virt;
17918 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17919 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17920 list_del_init(&seq_dmabuf->hbuf.list);
17921 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17922 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17923 lpfc_update_rcv_time_stamp(vport);
17926 /* move this sequence to the tail to indicate a young sequence */
17927 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17928 seq_dmabuf->time_stamp = jiffies;
17929 lpfc_update_rcv_time_stamp(vport);
17930 if (list_empty(&seq_dmabuf->dbuf.list)) {
17931 temp_hdr = dmabuf->hbuf.virt;
17932 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17935 /* find the correct place in the sequence to insert this frame */
17936 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17938 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17939 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17941 * If the frame's sequence count is greater than the frame on
17942 * the list then insert the frame right after this frame
17944 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17945 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17946 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
17951 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17953 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
17962 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17963 * @vport: pointer to a vitural port
17964 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17966 * This function tries to abort from the partially assembed sequence, described
17967 * by the information from basic abbort @dmabuf. It checks to see whether such
17968 * partially assembled sequence held by the driver. If so, it shall free up all
17969 * the frames from the partially assembled sequence.
17972 * true -- if there is matching partially assembled sequence present and all
17973 * the frames freed with the sequence;
17974 * false -- if there is no matching partially assembled sequence present so
17975 * nothing got aborted in the lower layer driver
17978 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17979 struct hbq_dmabuf *dmabuf)
17981 struct fc_frame_header *new_hdr;
17982 struct fc_frame_header *temp_hdr;
17983 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17984 struct hbq_dmabuf *seq_dmabuf = NULL;
17986 /* Use the hdr_buf to find the sequence that matches this frame */
17987 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17988 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17989 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17990 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17991 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17992 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17993 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17994 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17996 /* found a pending sequence that matches this frame */
17997 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18001 /* Free up all the frames from the partially assembled sequence */
18003 list_for_each_entry_safe(d_buf, n_buf,
18004 &seq_dmabuf->dbuf.list, list) {
18005 list_del_init(&d_buf->list);
18006 lpfc_in_buf_free(vport->phba, d_buf);
18014 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
18015 * @vport: pointer to a vitural port
18016 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18018 * This function tries to abort from the assembed sequence from upper level
18019 * protocol, described by the information from basic abbort @dmabuf. It
18020 * checks to see whether such pending context exists at upper level protocol.
18021 * If so, it shall clean up the pending context.
18024 * true -- if there is matching pending context of the sequence cleaned
18026 * false -- if there is no matching pending context of the sequence present
18030 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18032 struct lpfc_hba *phba = vport->phba;
18035 /* Accepting abort at ulp with SLI4 only */
18036 if (phba->sli_rev < LPFC_SLI_REV4)
18039 /* Register all caring upper level protocols to attend abort */
18040 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18048 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
18049 * @phba: Pointer to HBA context object.
18050 * @cmd_iocbq: pointer to the command iocbq structure.
18051 * @rsp_iocbq: pointer to the response iocbq structure.
18053 * This function handles the sequence abort response iocb command complete
18054 * event. It properly releases the memory allocated to the sequence abort
18058 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
18059 struct lpfc_iocbq *cmd_iocbq,
18060 struct lpfc_iocbq *rsp_iocbq)
18062 struct lpfc_nodelist *ndlp;
18065 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
18066 lpfc_nlp_put(ndlp);
18067 lpfc_nlp_not_used(ndlp);
18068 lpfc_sli_release_iocbq(phba, cmd_iocbq);
18071 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
18072 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
18073 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18074 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
18075 rsp_iocbq->iocb.ulpStatus,
18076 rsp_iocbq->iocb.un.ulpWord[4]);
18080 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
18081 * @phba: Pointer to HBA context object.
18082 * @xri: xri id in transaction.
18084 * This function validates the xri maps to the known range of XRIs allocated an
18085 * used by the driver.
18088 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18093 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
18094 if (xri == phba->sli4_hba.xri_ids[i])
18101 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
18102 * @vport: pointer to a vitural port.
18103 * @fc_hdr: pointer to a FC frame header.
18104 * @aborted: was the partially assembled receive sequence successfully aborted
18106 * This function sends a basic response to a previous unsol sequence abort
18107 * event after aborting the sequence handling.
18110 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
18111 struct fc_frame_header *fc_hdr, bool aborted)
18113 struct lpfc_hba *phba = vport->phba;
18114 struct lpfc_iocbq *ctiocb = NULL;
18115 struct lpfc_nodelist *ndlp;
18116 uint16_t oxid, rxid, xri, lxri;
18117 uint32_t sid, fctl;
18121 if (!lpfc_is_link_up(phba))
18124 sid = sli4_sid_from_fc_hdr(fc_hdr);
18125 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
18126 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
18128 ndlp = lpfc_findnode_did(vport, sid);
18130 ndlp = lpfc_nlp_init(vport, sid);
18132 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
18133 "1268 Failed to allocate ndlp for "
18134 "oxid:x%x SID:x%x\n", oxid, sid);
18137 /* Put ndlp onto pport node list */
18138 lpfc_enqueue_node(vport, ndlp);
18141 /* Allocate buffer for rsp iocb */
18142 ctiocb = lpfc_sli_get_iocbq(phba);
18146 /* Extract the F_CTL field from FC_HDR */
18147 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
18149 icmd = &ctiocb->iocb;
18150 icmd->un.xseq64.bdl.bdeSize = 0;
18151 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
18152 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
18153 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
18154 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
18156 /* Fill in the rest of iocb fields */
18157 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
18158 icmd->ulpBdeCount = 0;
18160 icmd->ulpClass = CLASS3;
18161 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
18162 ctiocb->context1 = lpfc_nlp_get(ndlp);
18163 if (!ctiocb->context1) {
18164 lpfc_sli_release_iocbq(phba, ctiocb);
18168 ctiocb->vport = phba->pport;
18169 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
18170 ctiocb->sli4_lxritag = NO_XRI;
18171 ctiocb->sli4_xritag = NO_XRI;
18173 if (fctl & FC_FC_EX_CTX)
18174 /* Exchange responder sent the abort so we
18180 lxri = lpfc_sli4_xri_inrange(phba, xri);
18181 if (lxri != NO_XRI)
18182 lpfc_set_rrq_active(phba, ndlp, lxri,
18183 (xri == oxid) ? rxid : oxid, 0);
18184 /* For BA_ABTS from exchange responder, if the logical xri with
18185 * the oxid maps to the FCP XRI range, the port no longer has
18186 * that exchange context, send a BLS_RJT. Override the IOCB for
18189 if ((fctl & FC_FC_EX_CTX) &&
18190 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
18191 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
18192 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
18193 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
18194 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
18197 /* If BA_ABTS failed to abort a partially assembled receive sequence,
18198 * the driver no longer has that exchange, send a BLS_RJT. Override
18199 * the IOCB for a BA_RJT.
18201 if (aborted == false) {
18202 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
18203 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
18204 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
18205 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
18208 if (fctl & FC_FC_EX_CTX) {
18209 /* ABTS sent by responder to CT exchange, construction
18210 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
18211 * field and RX_ID from ABTS for RX_ID field.
18213 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
18215 /* ABTS sent by initiator to CT exchange, construction
18216 * of BA_ACC will need to allocate a new XRI as for the
18219 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
18221 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
18222 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
18224 /* Xmit CT abts response on exchange <xid> */
18225 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
18226 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
18227 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
18229 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
18230 if (rc == IOCB_ERROR) {
18231 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18232 "2925 Failed to issue CT ABTS RSP x%x on "
18233 "xri x%x, Data x%x\n",
18234 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
18236 lpfc_nlp_put(ndlp);
18237 ctiocb->context1 = NULL;
18238 lpfc_sli_release_iocbq(phba, ctiocb);
18243 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
18244 * @vport: Pointer to the vport on which this sequence was received
18245 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18247 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
18248 * receive sequence is only partially assembed by the driver, it shall abort
18249 * the partially assembled frames for the sequence. Otherwise, if the
18250 * unsolicited receive sequence has been completely assembled and passed to
18251 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
18252 * unsolicited sequence has been aborted. After that, it will issue a basic
18253 * accept to accept the abort.
18256 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
18257 struct hbq_dmabuf *dmabuf)
18259 struct lpfc_hba *phba = vport->phba;
18260 struct fc_frame_header fc_hdr;
18264 /* Make a copy of fc_hdr before the dmabuf being released */
18265 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
18266 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
18268 if (fctl & FC_FC_EX_CTX) {
18269 /* ABTS by responder to exchange, no cleanup needed */
18272 /* ABTS by initiator to exchange, need to do cleanup */
18273 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
18274 if (aborted == false)
18275 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
18277 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18279 if (phba->nvmet_support) {
18280 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
18284 /* Respond with BA_ACC or BA_RJT accordingly */
18285 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
18289 * lpfc_seq_complete - Indicates if a sequence is complete
18290 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18292 * This function checks the sequence, starting with the frame described by
18293 * @dmabuf, to see if all the frames associated with this sequence are present.
18294 * the frames associated with this sequence are linked to the @dmabuf using the
18295 * dbuf list. This function looks for two major things. 1) That the first frame
18296 * has a sequence count of zero. 2) There is a frame with last frame of sequence
18297 * set. 3) That there are no holes in the sequence count. The function will
18298 * return 1 when the sequence is complete, otherwise it will return 0.
18301 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
18303 struct fc_frame_header *hdr;
18304 struct lpfc_dmabuf *d_buf;
18305 struct hbq_dmabuf *seq_dmabuf;
18309 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18310 /* make sure first fame of sequence has a sequence count of zero */
18311 if (hdr->fh_seq_cnt != seq_count)
18313 fctl = (hdr->fh_f_ctl[0] << 16 |
18314 hdr->fh_f_ctl[1] << 8 |
18316 /* If last frame of sequence we can return success. */
18317 if (fctl & FC_FC_END_SEQ)
18319 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
18320 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18321 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18322 /* If there is a hole in the sequence count then fail. */
18323 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
18325 fctl = (hdr->fh_f_ctl[0] << 16 |
18326 hdr->fh_f_ctl[1] << 8 |
18328 /* If last frame of sequence we can return success. */
18329 if (fctl & FC_FC_END_SEQ)
18336 * lpfc_prep_seq - Prep sequence for ULP processing
18337 * @vport: Pointer to the vport on which this sequence was received
18338 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
18340 * This function takes a sequence, described by a list of frames, and creates
18341 * a list of iocbq structures to describe the sequence. This iocbq list will be
18342 * used to issue to the generic unsolicited sequence handler. This routine
18343 * returns a pointer to the first iocbq in the list. If the function is unable
18344 * to allocate an iocbq then it throw out the received frames that were not
18345 * able to be described and return a pointer to the first iocbq. If unable to
18346 * allocate any iocbqs (including the first) this function will return NULL.
18348 static struct lpfc_iocbq *
18349 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
18351 struct hbq_dmabuf *hbq_buf;
18352 struct lpfc_dmabuf *d_buf, *n_buf;
18353 struct lpfc_iocbq *first_iocbq, *iocbq;
18354 struct fc_frame_header *fc_hdr;
18356 uint32_t len, tot_len;
18357 struct ulp_bde64 *pbde;
18359 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18360 /* remove from receive buffer list */
18361 list_del_init(&seq_dmabuf->hbuf.list);
18362 lpfc_update_rcv_time_stamp(vport);
18363 /* get the Remote Port's SID */
18364 sid = sli4_sid_from_fc_hdr(fc_hdr);
18366 /* Get an iocbq struct to fill in. */
18367 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
18369 /* Initialize the first IOCB. */
18370 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
18371 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
18372 first_iocbq->vport = vport;
18374 /* Check FC Header to see what TYPE of frame we are rcv'ing */
18375 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
18376 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
18377 first_iocbq->iocb.un.rcvels.parmRo =
18378 sli4_did_from_fc_hdr(fc_hdr);
18379 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
18381 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
18382 first_iocbq->iocb.ulpContext = NO_XRI;
18383 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
18384 be16_to_cpu(fc_hdr->fh_ox_id);
18385 /* iocbq is prepped for internal consumption. Physical vpi. */
18386 first_iocbq->iocb.unsli3.rcvsli3.vpi =
18387 vport->phba->vpi_ids[vport->vpi];
18388 /* put the first buffer into the first IOCBq */
18389 tot_len = bf_get(lpfc_rcqe_length,
18390 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
18392 first_iocbq->context2 = &seq_dmabuf->dbuf;
18393 first_iocbq->context3 = NULL;
18394 first_iocbq->iocb.ulpBdeCount = 1;
18395 if (tot_len > LPFC_DATA_BUF_SIZE)
18396 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
18397 LPFC_DATA_BUF_SIZE;
18399 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
18401 first_iocbq->iocb.un.rcvels.remoteID = sid;
18403 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18405 iocbq = first_iocbq;
18407 * Each IOCBq can have two Buffers assigned, so go through the list
18408 * of buffers for this sequence and save two buffers in each IOCBq
18410 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
18412 lpfc_in_buf_free(vport->phba, d_buf);
18415 if (!iocbq->context3) {
18416 iocbq->context3 = d_buf;
18417 iocbq->iocb.ulpBdeCount++;
18418 /* We need to get the size out of the right CQE */
18419 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18420 len = bf_get(lpfc_rcqe_length,
18421 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18422 pbde = (struct ulp_bde64 *)
18423 &iocbq->iocb.unsli3.sli3Words[4];
18424 if (len > LPFC_DATA_BUF_SIZE)
18425 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
18427 pbde->tus.f.bdeSize = len;
18429 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
18432 iocbq = lpfc_sli_get_iocbq(vport->phba);
18435 first_iocbq->iocb.ulpStatus =
18436 IOSTAT_FCP_RSP_ERROR;
18437 first_iocbq->iocb.un.ulpWord[4] =
18438 IOERR_NO_RESOURCES;
18440 lpfc_in_buf_free(vport->phba, d_buf);
18443 /* We need to get the size out of the right CQE */
18444 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18445 len = bf_get(lpfc_rcqe_length,
18446 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18447 iocbq->context2 = d_buf;
18448 iocbq->context3 = NULL;
18449 iocbq->iocb.ulpBdeCount = 1;
18450 if (len > LPFC_DATA_BUF_SIZE)
18451 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
18452 LPFC_DATA_BUF_SIZE;
18454 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
18457 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18459 iocbq->iocb.un.rcvels.remoteID = sid;
18460 list_add_tail(&iocbq->list, &first_iocbq->list);
18463 /* Free the sequence's header buffer */
18465 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
18467 return first_iocbq;
18471 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
18472 struct hbq_dmabuf *seq_dmabuf)
18474 struct fc_frame_header *fc_hdr;
18475 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
18476 struct lpfc_hba *phba = vport->phba;
18478 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18479 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
18481 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18482 "2707 Ring %d handler: Failed to allocate "
18483 "iocb Rctl x%x Type x%x received\n",
18485 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18488 if (!lpfc_complete_unsol_iocb(phba,
18489 phba->sli4_hba.els_wq->pring,
18490 iocbq, fc_hdr->fh_r_ctl,
18492 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18493 "2540 Ring %d handler: unexpected Rctl "
18494 "x%x Type x%x received\n",
18496 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18498 /* Free iocb created in lpfc_prep_seq */
18499 list_for_each_entry_safe(curr_iocb, next_iocb,
18500 &iocbq->list, list) {
18501 list_del_init(&curr_iocb->list);
18502 lpfc_sli_release_iocbq(phba, curr_iocb);
18504 lpfc_sli_release_iocbq(phba, iocbq);
18508 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
18509 struct lpfc_iocbq *rspiocb)
18511 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
18513 if (pcmd && pcmd->virt)
18514 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18516 lpfc_sli_release_iocbq(phba, cmdiocb);
18517 lpfc_drain_txq(phba);
18521 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
18522 struct hbq_dmabuf *dmabuf)
18524 struct fc_frame_header *fc_hdr;
18525 struct lpfc_hba *phba = vport->phba;
18526 struct lpfc_iocbq *iocbq = NULL;
18527 union lpfc_wqe *wqe;
18528 struct lpfc_dmabuf *pcmd = NULL;
18529 uint32_t frame_len;
18531 unsigned long iflags;
18533 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18534 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
18536 /* Send the received frame back */
18537 iocbq = lpfc_sli_get_iocbq(phba);
18539 /* Queue cq event and wakeup worker thread to process it */
18540 spin_lock_irqsave(&phba->hbalock, iflags);
18541 list_add_tail(&dmabuf->cq_event.list,
18542 &phba->sli4_hba.sp_queue_event);
18543 phba->hba_flag |= HBA_SP_QUEUE_EVT;
18544 spin_unlock_irqrestore(&phba->hbalock, iflags);
18545 lpfc_worker_wake_up(phba);
18549 /* Allocate buffer for command payload */
18550 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
18552 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
18554 if (!pcmd || !pcmd->virt)
18557 INIT_LIST_HEAD(&pcmd->list);
18559 /* copyin the payload */
18560 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
18562 /* fill in BDE's for command */
18563 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
18564 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
18565 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
18566 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
18568 iocbq->context2 = pcmd;
18569 iocbq->vport = vport;
18570 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
18571 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
18574 * Setup rest of the iocb as though it were a WQE
18575 * Build the SEND_FRAME WQE
18577 wqe = (union lpfc_wqe *)&iocbq->iocb;
18579 wqe->send_frame.frame_len = frame_len;
18580 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
18581 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
18582 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
18583 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
18584 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
18585 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
18587 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
18588 iocbq->iocb.ulpLe = 1;
18589 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
18590 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
18591 if (rc == IOCB_ERROR)
18594 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18598 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18599 "2023 Unable to process MDS loopback frame\n");
18600 if (pcmd && pcmd->virt)
18601 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18604 lpfc_sli_release_iocbq(phba, iocbq);
18605 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18609 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
18610 * @phba: Pointer to HBA context object.
18611 * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
18613 * This function is called with no lock held. This function processes all
18614 * the received buffers and gives it to upper layers when a received buffer
18615 * indicates that it is the final frame in the sequence. The interrupt
18616 * service routine processes received buffers at interrupt contexts.
18617 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
18618 * appropriate receive function when the final frame in a sequence is received.
18621 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
18622 struct hbq_dmabuf *dmabuf)
18624 struct hbq_dmabuf *seq_dmabuf;
18625 struct fc_frame_header *fc_hdr;
18626 struct lpfc_vport *vport;
18630 /* Process each received buffer */
18631 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18633 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18634 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18635 vport = phba->pport;
18636 /* Handle MDS Loopback frames */
18637 if (!(phba->pport->load_flag & FC_UNLOADING))
18638 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18640 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18644 /* check to see if this a valid type of frame */
18645 if (lpfc_fc_frame_check(phba, fc_hdr)) {
18646 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18650 if ((bf_get(lpfc_cqe_code,
18651 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
18652 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
18653 &dmabuf->cq_event.cqe.rcqe_cmpl);
18655 fcfi = bf_get(lpfc_rcqe_fcf_id,
18656 &dmabuf->cq_event.cqe.rcqe_cmpl);
18658 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
18659 vport = phba->pport;
18660 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18661 "2023 MDS Loopback %d bytes\n",
18662 bf_get(lpfc_rcqe_length,
18663 &dmabuf->cq_event.cqe.rcqe_cmpl));
18664 /* Handle MDS Loopback frames */
18665 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18669 /* d_id this frame is directed to */
18670 did = sli4_did_from_fc_hdr(fc_hdr);
18672 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
18674 /* throw out the frame */
18675 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18679 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
18680 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18681 (did != Fabric_DID)) {
18683 * Throw out the frame if we are not pt2pt.
18684 * The pt2pt protocol allows for discovery frames
18685 * to be received without a registered VPI.
18687 if (!(vport->fc_flag & FC_PT2PT) ||
18688 (phba->link_state == LPFC_HBA_READY)) {
18689 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18694 /* Handle the basic abort sequence (BA_ABTS) event */
18695 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18696 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18700 /* Link this frame */
18701 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18703 /* unable to add frame to vport - throw it out */
18704 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18707 /* If not last frame in sequence continue processing frames. */
18708 if (!lpfc_seq_complete(seq_dmabuf))
18711 /* Send the complete sequence to the upper layer protocol */
18712 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
18716 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18717 * @phba: pointer to lpfc hba data structure.
18719 * This routine is invoked to post rpi header templates to the
18720 * HBA consistent with the SLI-4 interface spec. This routine
18721 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18722 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18724 * This routine does not require any locks. It's usage is expected
18725 * to be driver load or reset recovery when the driver is
18730 * -EIO - The mailbox failed to complete successfully.
18731 * When this error occurs, the driver is not guaranteed
18732 * to have any rpi regions posted to the device and
18733 * must either attempt to repost the regions or take a
18737 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18739 struct lpfc_rpi_hdr *rpi_page;
18743 /* SLI4 ports that support extents do not require RPI headers. */
18744 if (!phba->sli4_hba.rpi_hdrs_in_use)
18746 if (phba->sli4_hba.extents_in_use)
18749 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18751 * Assign the rpi headers a physical rpi only if the driver
18752 * has not initialized those resources. A port reset only
18753 * needs the headers posted.
18755 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18757 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18759 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18760 if (rc != MBX_SUCCESS) {
18761 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18762 "2008 Error %d posting all rpi "
18770 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18771 LPFC_RPI_RSRC_RDY);
18776 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18777 * @phba: pointer to lpfc hba data structure.
18778 * @rpi_page: pointer to the rpi memory region.
18780 * This routine is invoked to post a single rpi header to the
18781 * HBA consistent with the SLI-4 interface spec. This memory region
18782 * maps up to 64 rpi context regions.
18786 * -ENOMEM - No available memory
18787 * -EIO - The mailbox failed to complete successfully.
18790 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18792 LPFC_MBOXQ_t *mboxq;
18793 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18795 uint32_t shdr_status, shdr_add_status;
18796 union lpfc_sli4_cfg_shdr *shdr;
18798 /* SLI4 ports that support extents do not require RPI headers. */
18799 if (!phba->sli4_hba.rpi_hdrs_in_use)
18801 if (phba->sli4_hba.extents_in_use)
18804 /* The port is notified of the header region via a mailbox command. */
18805 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18807 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18808 "2001 Unable to allocate memory for issuing "
18809 "SLI_CONFIG_SPECIAL mailbox command\n");
18813 /* Post all rpi memory regions to the port. */
18814 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18815 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18816 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18817 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18818 sizeof(struct lpfc_sli4_cfg_mhdr),
18819 LPFC_SLI4_MBX_EMBED);
18822 /* Post the physical rpi to the port for this rpi header. */
18823 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18824 rpi_page->start_rpi);
18825 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18826 hdr_tmpl, rpi_page->page_count);
18828 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18829 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18830 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18831 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18832 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18833 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18834 if (rc != MBX_TIMEOUT)
18835 mempool_free(mboxq, phba->mbox_mem_pool);
18836 if (shdr_status || shdr_add_status || rc) {
18837 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18838 "2514 POST_RPI_HDR mailbox failed with "
18839 "status x%x add_status x%x, mbx status x%x\n",
18840 shdr_status, shdr_add_status, rc);
18844 * The next_rpi stores the next logical module-64 rpi value used
18845 * to post physical rpis in subsequent rpi postings.
18847 spin_lock_irq(&phba->hbalock);
18848 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18849 spin_unlock_irq(&phba->hbalock);
18855 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18856 * @phba: pointer to lpfc hba data structure.
18858 * This routine is invoked to post rpi header templates to the
18859 * HBA consistent with the SLI-4 interface spec. This routine
18860 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18861 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18864 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
18865 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18868 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18871 uint16_t max_rpi, rpi_limit;
18872 uint16_t rpi_remaining, lrpi = 0;
18873 struct lpfc_rpi_hdr *rpi_hdr;
18874 unsigned long iflag;
18877 * Fetch the next logical rpi. Because this index is logical,
18878 * the driver starts at 0 each time.
18880 spin_lock_irqsave(&phba->hbalock, iflag);
18881 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18882 rpi_limit = phba->sli4_hba.next_rpi;
18884 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18885 if (rpi >= rpi_limit)
18886 rpi = LPFC_RPI_ALLOC_ERROR;
18888 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18889 phba->sli4_hba.max_cfg_param.rpi_used++;
18890 phba->sli4_hba.rpi_count++;
18892 lpfc_printf_log(phba, KERN_INFO,
18893 LOG_NODE | LOG_DISCOVERY,
18894 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
18895 (int) rpi, max_rpi, rpi_limit);
18898 * Don't try to allocate more rpi header regions if the device limit
18899 * has been exhausted.
18901 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18902 (phba->sli4_hba.rpi_count >= max_rpi)) {
18903 spin_unlock_irqrestore(&phba->hbalock, iflag);
18908 * RPI header postings are not required for SLI4 ports capable of
18911 if (!phba->sli4_hba.rpi_hdrs_in_use) {
18912 spin_unlock_irqrestore(&phba->hbalock, iflag);
18917 * If the driver is running low on rpi resources, allocate another
18918 * page now. Note that the next_rpi value is used because
18919 * it represents how many are actually in use whereas max_rpi notes
18920 * how many are supported max by the device.
18922 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18923 spin_unlock_irqrestore(&phba->hbalock, iflag);
18924 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18925 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18927 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18928 "2002 Error Could not grow rpi "
18931 lrpi = rpi_hdr->start_rpi;
18932 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18933 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18941 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18942 * @phba: pointer to lpfc hba data structure.
18943 * @rpi: rpi to free
18945 * This routine is invoked to release an rpi to the pool of
18946 * available rpis maintained by the driver.
18949 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18952 * if the rpi value indicates a prior unreg has already
18953 * been done, skip the unreg.
18955 if (rpi == LPFC_RPI_ALLOC_ERROR)
18958 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18959 phba->sli4_hba.rpi_count--;
18960 phba->sli4_hba.max_cfg_param.rpi_used--;
18962 lpfc_printf_log(phba, KERN_INFO,
18963 LOG_NODE | LOG_DISCOVERY,
18964 "2016 rpi %x not inuse\n",
18970 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18971 * @phba: pointer to lpfc hba data structure.
18972 * @rpi: rpi to free
18974 * This routine is invoked to release an rpi to the pool of
18975 * available rpis maintained by the driver.
18978 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18980 spin_lock_irq(&phba->hbalock);
18981 __lpfc_sli4_free_rpi(phba, rpi);
18982 spin_unlock_irq(&phba->hbalock);
18986 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18987 * @phba: pointer to lpfc hba data structure.
18989 * This routine is invoked to remove the memory region that
18990 * provided rpi via a bitmask.
18993 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18995 kfree(phba->sli4_hba.rpi_bmask);
18996 kfree(phba->sli4_hba.rpi_ids);
18997 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
19001 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
19002 * @ndlp: pointer to lpfc nodelist data structure.
19003 * @cmpl: completion call-back.
19004 * @arg: data to load as MBox 'caller buffer information'
19006 * This routine is invoked to remove the memory region that
19007 * provided rpi via a bitmask.
19010 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19011 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
19013 LPFC_MBOXQ_t *mboxq;
19014 struct lpfc_hba *phba = ndlp->phba;
19017 /* The port is notified of the header region via a mailbox command. */
19018 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19022 /* Post all rpi memory regions to the port. */
19023 lpfc_resume_rpi(mboxq, ndlp);
19025 mboxq->mbox_cmpl = cmpl;
19026 mboxq->ctx_buf = arg;
19027 mboxq->ctx_ndlp = ndlp;
19029 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19030 mboxq->vport = ndlp->vport;
19031 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19032 if (rc == MBX_NOT_FINISHED) {
19033 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19034 "2010 Resume RPI Mailbox failed "
19035 "status %d, mbxStatus x%x\n", rc,
19036 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19037 mempool_free(mboxq, phba->mbox_mem_pool);
19044 * lpfc_sli4_init_vpi - Initialize a vpi with the port
19045 * @vport: Pointer to the vport for which the vpi is being initialized
19047 * This routine is invoked to activate a vpi with the port.
19051 * -Evalue otherwise
19054 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
19056 LPFC_MBOXQ_t *mboxq;
19058 int retval = MBX_SUCCESS;
19060 struct lpfc_hba *phba = vport->phba;
19061 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19064 lpfc_init_vpi(phba, mboxq, vport->vpi);
19065 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
19066 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
19067 if (rc != MBX_SUCCESS) {
19068 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19069 "2022 INIT VPI Mailbox failed "
19070 "status %d, mbxStatus x%x\n", rc,
19071 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19074 if (rc != MBX_TIMEOUT)
19075 mempool_free(mboxq, vport->phba->mbox_mem_pool);
19081 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
19082 * @phba: pointer to lpfc hba data structure.
19083 * @mboxq: Pointer to mailbox object.
19085 * This routine is invoked to manually add a single FCF record. The caller
19086 * must pass a completely initialized FCF_Record. This routine takes
19087 * care of the nonembedded mailbox operations.
19090 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
19093 union lpfc_sli4_cfg_shdr *shdr;
19094 uint32_t shdr_status, shdr_add_status;
19096 virt_addr = mboxq->sge_array->addr[0];
19097 /* The IOCTL status is embedded in the mailbox subheader. */
19098 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
19099 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19100 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19102 if ((shdr_status || shdr_add_status) &&
19103 (shdr_status != STATUS_FCF_IN_USE))
19104 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19105 "2558 ADD_FCF_RECORD mailbox failed with "
19106 "status x%x add_status x%x\n",
19107 shdr_status, shdr_add_status);
19109 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19113 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
19114 * @phba: pointer to lpfc hba data structure.
19115 * @fcf_record: pointer to the initialized fcf record to add.
19117 * This routine is invoked to manually add a single FCF record. The caller
19118 * must pass a completely initialized FCF_Record. This routine takes
19119 * care of the nonembedded mailbox operations.
19122 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
19125 LPFC_MBOXQ_t *mboxq;
19128 struct lpfc_mbx_sge sge;
19129 uint32_t alloc_len, req_len;
19132 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19134 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19135 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
19139 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
19142 /* Allocate DMA memory and set up the non-embedded mailbox command */
19143 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19144 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
19145 req_len, LPFC_SLI4_MBX_NEMBED);
19146 if (alloc_len < req_len) {
19147 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19148 "2523 Allocated DMA memory size (x%x) is "
19149 "less than the requested DMA memory "
19150 "size (x%x)\n", alloc_len, req_len);
19151 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19156 * Get the first SGE entry from the non-embedded DMA memory. This
19157 * routine only uses a single SGE.
19159 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
19160 virt_addr = mboxq->sge_array->addr[0];
19162 * Configure the FCF record for FCFI 0. This is the driver's
19163 * hardcoded default and gets used in nonFIP mode.
19165 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
19166 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
19167 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
19170 * Copy the fcf_index and the FCF Record Data. The data starts after
19171 * the FCoE header plus word10. The data copy needs to be endian
19174 bytep += sizeof(uint32_t);
19175 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
19176 mboxq->vport = phba->pport;
19177 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
19178 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19179 if (rc == MBX_NOT_FINISHED) {
19180 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19181 "2515 ADD_FCF_RECORD mailbox failed with "
19182 "status 0x%x\n", rc);
19183 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19192 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
19193 * @phba: pointer to lpfc hba data structure.
19194 * @fcf_record: pointer to the fcf record to write the default data.
19195 * @fcf_index: FCF table entry index.
19197 * This routine is invoked to build the driver's default FCF record. The
19198 * values used are hardcoded. This routine handles memory initialization.
19202 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
19203 struct fcf_record *fcf_record,
19204 uint16_t fcf_index)
19206 memset(fcf_record, 0, sizeof(struct fcf_record));
19207 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
19208 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
19209 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
19210 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
19211 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
19212 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
19213 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
19214 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
19215 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
19216 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
19217 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
19218 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
19219 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
19220 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
19221 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
19222 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
19223 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
19224 /* Set the VLAN bit map */
19225 if (phba->valid_vlan) {
19226 fcf_record->vlan_bitmap[phba->vlan_id / 8]
19227 = 1 << (phba->vlan_id % 8);
19232 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
19233 * @phba: pointer to lpfc hba data structure.
19234 * @fcf_index: FCF table entry offset.
19236 * This routine is invoked to scan the entire FCF table by reading FCF
19237 * record and processing it one at a time starting from the @fcf_index
19238 * for initial FCF discovery or fast FCF failover rediscovery.
19240 * Return 0 if the mailbox command is submitted successfully, none 0
19244 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19247 LPFC_MBOXQ_t *mboxq;
19249 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
19250 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
19251 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19253 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19254 "2000 Failed to allocate mbox for "
19257 goto fail_fcf_scan;
19259 /* Construct the read FCF record mailbox command */
19260 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19263 goto fail_fcf_scan;
19265 /* Issue the mailbox command asynchronously */
19266 mboxq->vport = phba->pport;
19267 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
19269 spin_lock_irq(&phba->hbalock);
19270 phba->hba_flag |= FCF_TS_INPROG;
19271 spin_unlock_irq(&phba->hbalock);
19273 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19274 if (rc == MBX_NOT_FINISHED)
19277 /* Reset eligible FCF count for new scan */
19278 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
19279 phba->fcf.eligible_fcf_cnt = 0;
19285 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19286 /* FCF scan failed, clear FCF_TS_INPROG flag */
19287 spin_lock_irq(&phba->hbalock);
19288 phba->hba_flag &= ~FCF_TS_INPROG;
19289 spin_unlock_irq(&phba->hbalock);
19295 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
19296 * @phba: pointer to lpfc hba data structure.
19297 * @fcf_index: FCF table entry offset.
19299 * This routine is invoked to read an FCF record indicated by @fcf_index
19300 * and to use it for FLOGI roundrobin FCF failover.
19302 * Return 0 if the mailbox command is submitted successfully, none 0
19306 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19309 LPFC_MBOXQ_t *mboxq;
19311 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19313 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19314 "2763 Failed to allocate mbox for "
19317 goto fail_fcf_read;
19319 /* Construct the read FCF record mailbox command */
19320 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19323 goto fail_fcf_read;
19325 /* Issue the mailbox command asynchronously */
19326 mboxq->vport = phba->pport;
19327 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
19328 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19329 if (rc == MBX_NOT_FINISHED)
19335 if (error && mboxq)
19336 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19341 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
19342 * @phba: pointer to lpfc hba data structure.
19343 * @fcf_index: FCF table entry offset.
19345 * This routine is invoked to read an FCF record indicated by @fcf_index to
19346 * determine whether it's eligible for FLOGI roundrobin failover list.
19348 * Return 0 if the mailbox command is submitted successfully, none 0
19352 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19355 LPFC_MBOXQ_t *mboxq;
19357 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19359 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19360 "2758 Failed to allocate mbox for "
19363 goto fail_fcf_read;
19365 /* Construct the read FCF record mailbox command */
19366 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19369 goto fail_fcf_read;
19371 /* Issue the mailbox command asynchronously */
19372 mboxq->vport = phba->pport;
19373 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
19374 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19375 if (rc == MBX_NOT_FINISHED)
19381 if (error && mboxq)
19382 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19387 * lpfc_check_next_fcf_pri_level
19388 * @phba: pointer to the lpfc_hba struct for this port.
19389 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
19390 * routine when the rr_bmask is empty. The FCF indecies are put into the
19391 * rr_bmask based on their priority level. Starting from the highest priority
19392 * to the lowest. The most likely FCF candidate will be in the highest
19393 * priority group. When this routine is called it searches the fcf_pri list for
19394 * next lowest priority group and repopulates the rr_bmask with only those
19397 * 1=success 0=failure
19400 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
19402 uint16_t next_fcf_pri;
19403 uint16_t last_index;
19404 struct lpfc_fcf_pri *fcf_pri;
19408 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
19409 LPFC_SLI4_FCF_TBL_INDX_MAX);
19410 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19411 "3060 Last IDX %d\n", last_index);
19413 /* Verify the priority list has 2 or more entries */
19414 spin_lock_irq(&phba->hbalock);
19415 if (list_empty(&phba->fcf.fcf_pri_list) ||
19416 list_is_singular(&phba->fcf.fcf_pri_list)) {
19417 spin_unlock_irq(&phba->hbalock);
19418 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19419 "3061 Last IDX %d\n", last_index);
19420 return 0; /* Empty rr list */
19422 spin_unlock_irq(&phba->hbalock);
19426 * Clear the rr_bmask and set all of the bits that are at this
19429 memset(phba->fcf.fcf_rr_bmask, 0,
19430 sizeof(*phba->fcf.fcf_rr_bmask));
19431 spin_lock_irq(&phba->hbalock);
19432 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19433 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
19436 * the 1st priority that has not FLOGI failed
19437 * will be the highest.
19440 next_fcf_pri = fcf_pri->fcf_rec.priority;
19441 spin_unlock_irq(&phba->hbalock);
19442 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19443 rc = lpfc_sli4_fcf_rr_index_set(phba,
19444 fcf_pri->fcf_rec.fcf_index);
19448 spin_lock_irq(&phba->hbalock);
19451 * if next_fcf_pri was not set above and the list is not empty then
19452 * we have failed flogis on all of them. So reset flogi failed
19453 * and start at the beginning.
19455 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
19456 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19457 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
19459 * the 1st priority that has not FLOGI failed
19460 * will be the highest.
19463 next_fcf_pri = fcf_pri->fcf_rec.priority;
19464 spin_unlock_irq(&phba->hbalock);
19465 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19466 rc = lpfc_sli4_fcf_rr_index_set(phba,
19467 fcf_pri->fcf_rec.fcf_index);
19471 spin_lock_irq(&phba->hbalock);
19475 spin_unlock_irq(&phba->hbalock);
19480 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
19481 * @phba: pointer to lpfc hba data structure.
19483 * This routine is to get the next eligible FCF record index in a round
19484 * robin fashion. If the next eligible FCF record index equals to the
19485 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
19486 * shall be returned, otherwise, the next eligible FCF record's index
19487 * shall be returned.
19490 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
19492 uint16_t next_fcf_index;
19495 /* Search start from next bit of currently registered FCF index */
19496 next_fcf_index = phba->fcf.current_rec.fcf_indx;
19499 /* Determine the next fcf index to check */
19500 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
19501 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19502 LPFC_SLI4_FCF_TBL_INDX_MAX,
19505 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
19506 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19508 * If we have wrapped then we need to clear the bits that
19509 * have been tested so that we can detect when we should
19510 * change the priority level.
19512 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19513 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
19517 /* Check roundrobin failover list empty condition */
19518 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
19519 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
19521 * If next fcf index is not found check if there are lower
19522 * Priority level fcf's in the fcf_priority list.
19523 * Set up the rr_bmask with all of the avaiable fcf bits
19524 * at that level and continue the selection process.
19526 if (lpfc_check_next_fcf_pri_level(phba))
19527 goto initial_priority;
19528 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
19529 "2844 No roundrobin failover FCF available\n");
19531 return LPFC_FCOE_FCF_NEXT_NONE;
19534 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
19535 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
19536 LPFC_FCF_FLOGI_FAILED) {
19537 if (list_is_singular(&phba->fcf.fcf_pri_list))
19538 return LPFC_FCOE_FCF_NEXT_NONE;
19540 goto next_priority;
19543 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19544 "2845 Get next roundrobin failover FCF (x%x)\n",
19547 return next_fcf_index;
19551 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
19552 * @phba: pointer to lpfc hba data structure.
19553 * @fcf_index: index into the FCF table to 'set'
19555 * This routine sets the FCF record index in to the eligible bmask for
19556 * roundrobin failover search. It checks to make sure that the index
19557 * does not go beyond the range of the driver allocated bmask dimension
19558 * before setting the bit.
19560 * Returns 0 if the index bit successfully set, otherwise, it returns
19564 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
19566 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19567 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19568 "2610 FCF (x%x) reached driver's book "
19569 "keeping dimension:x%x\n",
19570 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19573 /* Set the eligible FCF record index bmask */
19574 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19576 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19577 "2790 Set FCF (x%x) to roundrobin FCF failover "
19578 "bmask\n", fcf_index);
19584 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
19585 * @phba: pointer to lpfc hba data structure.
19586 * @fcf_index: index into the FCF table to 'clear'
19588 * This routine clears the FCF record index from the eligible bmask for
19589 * roundrobin failover search. It checks to make sure that the index
19590 * does not go beyond the range of the driver allocated bmask dimension
19591 * before clearing the bit.
19594 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
19596 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
19597 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19598 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19599 "2762 FCF (x%x) reached driver's book "
19600 "keeping dimension:x%x\n",
19601 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19604 /* Clear the eligible FCF record index bmask */
19605 spin_lock_irq(&phba->hbalock);
19606 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
19608 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
19609 list_del_init(&fcf_pri->list);
19613 spin_unlock_irq(&phba->hbalock);
19614 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19616 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19617 "2791 Clear FCF (x%x) from roundrobin failover "
19618 "bmask\n", fcf_index);
19622 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
19623 * @phba: pointer to lpfc hba data structure.
19624 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
19626 * This routine is the completion routine for the rediscover FCF table mailbox
19627 * command. If the mailbox command returned failure, it will try to stop the
19628 * FCF rediscover wait timer.
19631 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
19633 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19634 uint32_t shdr_status, shdr_add_status;
19636 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19638 shdr_status = bf_get(lpfc_mbox_hdr_status,
19639 &redisc_fcf->header.cfg_shdr.response);
19640 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19641 &redisc_fcf->header.cfg_shdr.response);
19642 if (shdr_status || shdr_add_status) {
19643 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19644 "2746 Requesting for FCF rediscovery failed "
19645 "status x%x add_status x%x\n",
19646 shdr_status, shdr_add_status);
19647 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
19648 spin_lock_irq(&phba->hbalock);
19649 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
19650 spin_unlock_irq(&phba->hbalock);
19652 * CVL event triggered FCF rediscover request failed,
19653 * last resort to re-try current registered FCF entry.
19655 lpfc_retry_pport_discovery(phba);
19657 spin_lock_irq(&phba->hbalock);
19658 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
19659 spin_unlock_irq(&phba->hbalock);
19661 * DEAD FCF event triggered FCF rediscover request
19662 * failed, last resort to fail over as a link down
19663 * to FCF registration.
19665 lpfc_sli4_fcf_dead_failthrough(phba);
19668 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19669 "2775 Start FCF rediscover quiescent timer\n");
19671 * Start FCF rediscovery wait timer for pending FCF
19672 * before rescan FCF record table.
19674 lpfc_fcf_redisc_wait_start_timer(phba);
19677 mempool_free(mbox, phba->mbox_mem_pool);
19681 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
19682 * @phba: pointer to lpfc hba data structure.
19684 * This routine is invoked to request for rediscovery of the entire FCF table
19688 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
19690 LPFC_MBOXQ_t *mbox;
19691 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19694 /* Cancel retry delay timers to all vports before FCF rediscover */
19695 lpfc_cancel_all_vport_retry_delay_timer(phba);
19697 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19699 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19700 "2745 Failed to allocate mbox for "
19701 "requesting FCF rediscover.\n");
19705 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19706 sizeof(struct lpfc_sli4_cfg_mhdr));
19707 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19708 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19709 length, LPFC_SLI4_MBX_EMBED);
19711 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19712 /* Set count to 0 for invalidating the entire FCF database */
19713 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19715 /* Issue the mailbox command asynchronously */
19716 mbox->vport = phba->pport;
19717 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19718 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19720 if (rc == MBX_NOT_FINISHED) {
19721 mempool_free(mbox, phba->mbox_mem_pool);
19728 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
19729 * @phba: pointer to lpfc hba data structure.
19731 * This function is the failover routine as a last resort to the FCF DEAD
19732 * event when driver failed to perform fast FCF failover.
19735 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19737 uint32_t link_state;
19740 * Last resort as FCF DEAD event failover will treat this as
19741 * a link down, but save the link state because we don't want
19742 * it to be changed to Link Down unless it is already down.
19744 link_state = phba->link_state;
19745 lpfc_linkdown(phba);
19746 phba->link_state = link_state;
19748 /* Unregister FCF if no devices connected to it */
19749 lpfc_unregister_unused_fcf(phba);
19753 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
19754 * @phba: pointer to lpfc hba data structure.
19755 * @rgn23_data: pointer to configure region 23 data.
19757 * This function gets SLI3 port configure region 23 data through memory dump
19758 * mailbox command. When it successfully retrieves data, the size of the data
19759 * will be returned, otherwise, 0 will be returned.
19762 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19764 LPFC_MBOXQ_t *pmb = NULL;
19766 uint32_t offset = 0;
19772 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19774 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19775 "2600 failed to allocate mailbox memory\n");
19781 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19782 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19784 if (rc != MBX_SUCCESS) {
19785 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19786 "2601 failed to read config "
19787 "region 23, rc 0x%x Status 0x%x\n",
19788 rc, mb->mbxStatus);
19789 mb->un.varDmp.word_cnt = 0;
19792 * dump mem may return a zero when finished or we got a
19793 * mailbox error, either way we are done.
19795 if (mb->un.varDmp.word_cnt == 0)
19798 i = mb->un.varDmp.word_cnt * sizeof(uint32_t);
19799 if (offset + i > DMP_RGN23_SIZE)
19800 i = DMP_RGN23_SIZE - offset;
19801 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19802 rgn23_data + offset, i);
19804 } while (offset < DMP_RGN23_SIZE);
19806 mempool_free(pmb, phba->mbox_mem_pool);
19811 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19812 * @phba: pointer to lpfc hba data structure.
19813 * @rgn23_data: pointer to configure region 23 data.
19815 * This function gets SLI4 port configure region 23 data through memory dump
19816 * mailbox command. When it successfully retrieves data, the size of the data
19817 * will be returned, otherwise, 0 will be returned.
19820 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19822 LPFC_MBOXQ_t *mboxq = NULL;
19823 struct lpfc_dmabuf *mp = NULL;
19824 struct lpfc_mqe *mqe;
19825 uint32_t data_length = 0;
19831 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19833 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19834 "3105 failed to allocate mailbox memory\n");
19838 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19840 mqe = &mboxq->u.mqe;
19841 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19842 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19845 data_length = mqe->un.mb_words[5];
19846 if (data_length == 0)
19848 if (data_length > DMP_RGN23_SIZE) {
19852 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19854 mempool_free(mboxq, phba->mbox_mem_pool);
19856 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19859 return data_length;
19863 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19864 * @phba: pointer to lpfc hba data structure.
19866 * This function read region 23 and parse TLV for port status to
19867 * decide if the user disaled the port. If the TLV indicates the
19868 * port is disabled, the hba_flag is set accordingly.
19871 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19873 uint8_t *rgn23_data = NULL;
19874 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19875 uint32_t offset = 0;
19877 /* Get adapter Region 23 data */
19878 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19882 if (phba->sli_rev < LPFC_SLI_REV4)
19883 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19885 if_type = bf_get(lpfc_sli_intf_if_type,
19886 &phba->sli4_hba.sli_intf);
19887 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19889 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19895 /* Check the region signature first */
19896 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19897 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19898 "2619 Config region 23 has bad signature\n");
19903 /* Check the data structure version */
19904 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19905 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19906 "2620 Config region 23 has bad version\n");
19911 /* Parse TLV entries in the region */
19912 while (offset < data_size) {
19913 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19916 * If the TLV is not driver specific TLV or driver id is
19917 * not linux driver id, skip the record.
19919 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19920 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19921 (rgn23_data[offset + 3] != 0)) {
19922 offset += rgn23_data[offset + 1] * 4 + 4;
19926 /* Driver found a driver specific TLV in the config region */
19927 sub_tlv_len = rgn23_data[offset + 1] * 4;
19932 * Search for configured port state sub-TLV.
19934 while ((offset < data_size) &&
19935 (tlv_offset < sub_tlv_len)) {
19936 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19941 if (rgn23_data[offset] != PORT_STE_TYPE) {
19942 offset += rgn23_data[offset + 1] * 4 + 4;
19943 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19947 /* This HBA contains PORT_STE configured */
19948 if (!rgn23_data[offset + 2])
19949 phba->hba_flag |= LINK_DISABLED;
19961 * lpfc_wr_object - write an object to the firmware
19962 * @phba: HBA structure that indicates port to create a queue on.
19963 * @dmabuf_list: list of dmabufs to write to the port.
19964 * @size: the total byte value of the objects to write to the port.
19965 * @offset: the current offset to be used to start the transfer.
19967 * This routine will create a wr_object mailbox command to send to the port.
19968 * the mailbox command will be constructed using the dma buffers described in
19969 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19970 * BDEs that the imbedded mailbox can support. The @offset variable will be
19971 * used to indicate the starting offset of the transfer and will also return
19972 * the offset after the write object mailbox has completed. @size is used to
19973 * determine the end of the object and whether the eof bit should be set.
19975 * Return 0 is successful and offset will contain the the new offset to use
19976 * for the next write.
19977 * Return negative value for error cases.
19980 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19981 uint32_t size, uint32_t *offset)
19983 struct lpfc_mbx_wr_object *wr_object;
19984 LPFC_MBOXQ_t *mbox;
19986 uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
19988 struct lpfc_dmabuf *dmabuf;
19989 uint32_t written = 0;
19990 bool check_change_status = false;
19992 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19996 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19997 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19998 sizeof(struct lpfc_mbx_wr_object) -
19999 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
20001 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
20002 wr_object->u.request.write_offset = *offset;
20003 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
20004 wr_object->u.request.object_name[0] =
20005 cpu_to_le32(wr_object->u.request.object_name[0]);
20006 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
20007 list_for_each_entry(dmabuf, dmabuf_list, list) {
20008 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
20010 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
20011 wr_object->u.request.bde[i].addrHigh =
20012 putPaddrHigh(dmabuf->phys);
20013 if (written + SLI4_PAGE_SIZE >= size) {
20014 wr_object->u.request.bde[i].tus.f.bdeSize =
20016 written += (size - written);
20017 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
20018 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
20019 check_change_status = true;
20021 wr_object->u.request.bde[i].tus.f.bdeSize =
20023 written += SLI4_PAGE_SIZE;
20027 wr_object->u.request.bde_count = i;
20028 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
20029 if (!phba->sli4_hba.intr_enable)
20030 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
20032 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
20033 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
20035 /* The IOCTL status is embedded in the mailbox subheader. */
20036 shdr_status = bf_get(lpfc_mbox_hdr_status,
20037 &wr_object->header.cfg_shdr.response);
20038 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20039 &wr_object->header.cfg_shdr.response);
20040 if (check_change_status) {
20041 shdr_change_status = bf_get(lpfc_wr_object_change_status,
20042 &wr_object->u.response);
20044 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20045 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20046 shdr_csf = bf_get(lpfc_wr_object_csf,
20047 &wr_object->u.response);
20049 shdr_change_status =
20050 LPFC_CHANGE_STATUS_PCI_RESET;
20053 switch (shdr_change_status) {
20054 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20055 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20056 "3198 Firmware write complete: System "
20057 "reboot required to instantiate\n");
20059 case (LPFC_CHANGE_STATUS_FW_RESET):
20060 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20061 "3199 Firmware write complete: Firmware"
20062 " reset required to instantiate\n");
20064 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20065 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20066 "3200 Firmware write complete: Port "
20067 "Migration or PCI Reset required to "
20070 case (LPFC_CHANGE_STATUS_PCI_RESET):
20071 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20072 "3201 Firmware write complete: PCI "
20073 "Reset required to instantiate\n");
20079 if (rc != MBX_TIMEOUT)
20080 mempool_free(mbox, phba->mbox_mem_pool);
20081 if (shdr_status || shdr_add_status || rc) {
20082 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20083 "3025 Write Object mailbox failed with "
20084 "status x%x add_status x%x, mbx status x%x\n",
20085 shdr_status, shdr_add_status, rc);
20087 *offset = shdr_add_status;
20089 *offset += wr_object->u.response.actual_write_length;
20094 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
20095 * @vport: pointer to vport data structure.
20097 * This function iterate through the mailboxq and clean up all REG_LOGIN
20098 * and REG_VPI mailbox commands associated with the vport. This function
20099 * is called when driver want to restart discovery of the vport due to
20100 * a Clear Virtual Link event.
20103 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
20105 struct lpfc_hba *phba = vport->phba;
20106 LPFC_MBOXQ_t *mb, *nextmb;
20107 struct lpfc_dmabuf *mp;
20108 struct lpfc_nodelist *ndlp;
20109 struct lpfc_nodelist *act_mbx_ndlp = NULL;
20110 LIST_HEAD(mbox_cmd_list);
20111 uint8_t restart_loop;
20113 /* Clean up internally queued mailbox commands with the vport */
20114 spin_lock_irq(&phba->hbalock);
20115 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
20116 if (mb->vport != vport)
20119 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20120 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20123 list_del(&mb->list);
20124 list_add_tail(&mb->list, &mbox_cmd_list);
20126 /* Clean up active mailbox command with the vport */
20127 mb = phba->sli.mbox_active;
20128 if (mb && (mb->vport == vport)) {
20129 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
20130 (mb->u.mb.mbxCommand == MBX_REG_VPI))
20131 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20132 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20133 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20134 /* Put reference count for delayed processing */
20135 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
20136 /* Unregister the RPI when mailbox complete */
20137 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20140 /* Cleanup any mailbox completions which are not yet processed */
20143 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
20145 * If this mailox is already processed or it is
20146 * for another vport ignore it.
20148 if ((mb->vport != vport) ||
20149 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
20152 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20153 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20156 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20157 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20158 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20159 /* Unregister the RPI when mailbox complete */
20160 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20162 spin_unlock_irq(&phba->hbalock);
20163 spin_lock(&ndlp->lock);
20164 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20165 spin_unlock(&ndlp->lock);
20166 spin_lock_irq(&phba->hbalock);
20170 } while (restart_loop);
20172 spin_unlock_irq(&phba->hbalock);
20174 /* Release the cleaned-up mailbox commands */
20175 while (!list_empty(&mbox_cmd_list)) {
20176 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
20177 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20178 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
20180 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
20183 mb->ctx_buf = NULL;
20184 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20185 mb->ctx_ndlp = NULL;
20187 spin_lock(&ndlp->lock);
20188 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20189 spin_unlock(&ndlp->lock);
20190 lpfc_nlp_put(ndlp);
20193 mempool_free(mb, phba->mbox_mem_pool);
20196 /* Release the ndlp with the cleaned-up active mailbox command */
20197 if (act_mbx_ndlp) {
20198 spin_lock(&act_mbx_ndlp->lock);
20199 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20200 spin_unlock(&act_mbx_ndlp->lock);
20201 lpfc_nlp_put(act_mbx_ndlp);
20206 * lpfc_drain_txq - Drain the txq
20207 * @phba: Pointer to HBA context object.
20209 * This function attempt to submit IOCBs on the txq
20210 * to the adapter. For SLI4 adapters, the txq contains
20211 * ELS IOCBs that have been deferred because the there
20212 * are no SGLs. This congestion can occur with large
20213 * vport counts during node discovery.
20217 lpfc_drain_txq(struct lpfc_hba *phba)
20219 LIST_HEAD(completions);
20220 struct lpfc_sli_ring *pring;
20221 struct lpfc_iocbq *piocbq = NULL;
20222 unsigned long iflags = 0;
20223 char *fail_msg = NULL;
20224 struct lpfc_sglq *sglq;
20225 union lpfc_wqe128 wqe;
20226 uint32_t txq_cnt = 0;
20227 struct lpfc_queue *wq;
20229 if (phba->link_flag & LS_MDS_LOOPBACK) {
20230 /* MDS WQE are posted only to first WQ*/
20231 wq = phba->sli4_hba.hdwq[0].io_wq;
20236 wq = phba->sli4_hba.els_wq;
20239 pring = lpfc_phba_elsring(phba);
20242 if (unlikely(!pring) || list_empty(&pring->txq))
20245 spin_lock_irqsave(&pring->ring_lock, iflags);
20246 list_for_each_entry(piocbq, &pring->txq, list) {
20250 if (txq_cnt > pring->txq_max)
20251 pring->txq_max = txq_cnt;
20253 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20255 while (!list_empty(&pring->txq)) {
20256 spin_lock_irqsave(&pring->ring_lock, iflags);
20258 piocbq = lpfc_sli_ringtx_get(phba, pring);
20260 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20261 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20262 "2823 txq empty and txq_cnt is %d\n ",
20266 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
20268 __lpfc_sli_ringtx_put(phba, pring, piocbq);
20269 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20274 /* The xri and iocb resources secured,
20275 * attempt to issue request
20277 piocbq->sli4_lxritag = sglq->sli4_lxritag;
20278 piocbq->sli4_xritag = sglq->sli4_xritag;
20279 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
20280 fail_msg = "to convert bpl to sgl";
20281 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
20282 fail_msg = "to convert iocb to wqe";
20283 else if (lpfc_sli4_wq_put(wq, &wqe))
20284 fail_msg = " - Wq is full";
20286 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
20289 /* Failed means we can't issue and need to cancel */
20290 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20291 "2822 IOCB failed %s iotag 0x%x "
20294 piocbq->iotag, piocbq->sli4_xritag);
20295 list_add_tail(&piocbq->list, &completions);
20297 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20300 /* Cancel all the IOCBs that cannot be issued */
20301 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
20302 IOERR_SLI_ABORTED);
20308 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
20309 * @phba: Pointer to HBA context object.
20310 * @pwqeq: Pointer to command WQE.
20311 * @sglq: Pointer to the scatter gather queue object.
20313 * This routine converts the bpl or bde that is in the WQE
20314 * to a sgl list for the sli4 hardware. The physical address
20315 * of the bpl/bde is converted back to a virtual address.
20316 * If the WQE contains a BPL then the list of BDE's is
20317 * converted to sli4_sge's. If the WQE contains a single
20318 * BDE then it is converted to a single sli_sge.
20319 * The WQE is still in cpu endianness so the contents of
20320 * the bpl can be used without byte swapping.
20322 * Returns valid XRI = Success, NO_XRI = Failure.
20325 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
20326 struct lpfc_sglq *sglq)
20328 uint16_t xritag = NO_XRI;
20329 struct ulp_bde64 *bpl = NULL;
20330 struct ulp_bde64 bde;
20331 struct sli4_sge *sgl = NULL;
20332 struct lpfc_dmabuf *dmabuf;
20333 union lpfc_wqe128 *wqe;
20336 uint32_t offset = 0; /* accumulated offset in the sg request list */
20337 int inbound = 0; /* number of sg reply entries inbound from firmware */
20340 if (!pwqeq || !sglq)
20343 sgl = (struct sli4_sge *)sglq->sgl;
20345 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
20347 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
20348 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
20349 return sglq->sli4_xritag;
20350 numBdes = pwqeq->rsvd2;
20352 /* The addrHigh and addrLow fields within the WQE
20353 * have not been byteswapped yet so there is no
20354 * need to swap them back.
20356 if (pwqeq->context3)
20357 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
20361 bpl = (struct ulp_bde64 *)dmabuf->virt;
20365 for (i = 0; i < numBdes; i++) {
20366 /* Should already be byte swapped. */
20367 sgl->addr_hi = bpl->addrHigh;
20368 sgl->addr_lo = bpl->addrLow;
20370 sgl->word2 = le32_to_cpu(sgl->word2);
20371 if ((i+1) == numBdes)
20372 bf_set(lpfc_sli4_sge_last, sgl, 1);
20374 bf_set(lpfc_sli4_sge_last, sgl, 0);
20375 /* swap the size field back to the cpu so we
20376 * can assign it to the sgl.
20378 bde.tus.w = le32_to_cpu(bpl->tus.w);
20379 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
20380 /* The offsets in the sgl need to be accumulated
20381 * separately for the request and reply lists.
20382 * The request is always first, the reply follows.
20385 case CMD_GEN_REQUEST64_WQE:
20386 /* add up the reply sg entries */
20387 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
20389 /* first inbound? reset the offset */
20392 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20393 bf_set(lpfc_sli4_sge_type, sgl,
20394 LPFC_SGE_TYPE_DATA);
20395 offset += bde.tus.f.bdeSize;
20397 case CMD_FCP_TRSP64_WQE:
20398 bf_set(lpfc_sli4_sge_offset, sgl, 0);
20399 bf_set(lpfc_sli4_sge_type, sgl,
20400 LPFC_SGE_TYPE_DATA);
20402 case CMD_FCP_TSEND64_WQE:
20403 case CMD_FCP_TRECEIVE64_WQE:
20404 bf_set(lpfc_sli4_sge_type, sgl,
20405 bpl->tus.f.bdeFlags);
20409 offset += bde.tus.f.bdeSize;
20410 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20413 sgl->word2 = cpu_to_le32(sgl->word2);
20417 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
20418 /* The addrHigh and addrLow fields of the BDE have not
20419 * been byteswapped yet so they need to be swapped
20420 * before putting them in the sgl.
20422 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
20423 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
20424 sgl->word2 = le32_to_cpu(sgl->word2);
20425 bf_set(lpfc_sli4_sge_last, sgl, 1);
20426 sgl->word2 = cpu_to_le32(sgl->word2);
20427 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
20429 return sglq->sli4_xritag;
20433 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
20434 * @phba: Pointer to HBA context object.
20435 * @qp: Pointer to HDW queue.
20436 * @pwqe: Pointer to command WQE.
20439 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20440 struct lpfc_iocbq *pwqe)
20442 union lpfc_wqe128 *wqe = &pwqe->wqe;
20443 struct lpfc_async_xchg_ctx *ctxp;
20444 struct lpfc_queue *wq;
20445 struct lpfc_sglq *sglq;
20446 struct lpfc_sli_ring *pring;
20447 unsigned long iflags;
20450 /* NVME_LS and NVME_LS ABTS requests. */
20451 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
20452 pring = phba->sli4_hba.nvmels_wq->pring;
20453 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20455 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
20457 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20460 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20461 pwqe->sli4_xritag = sglq->sli4_xritag;
20462 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
20463 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20466 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20467 pwqe->sli4_xritag);
20468 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
20470 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20474 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20475 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20477 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20481 /* NVME_FCREQ and NVME_ABTS requests */
20482 if (pwqe->iocb_flag & LPFC_IO_NVME ||
20483 pwqe->iocb_flag & LPFC_IO_FCP) {
20484 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
20488 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20490 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20492 ret = lpfc_sli4_wq_put(wq, wqe);
20494 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20497 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20498 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20500 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20504 /* NVMET requests */
20505 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
20506 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
20510 ctxp = pwqe->context2;
20511 sglq = ctxp->ctxbuf->sglq;
20512 if (pwqe->sli4_xritag == NO_XRI) {
20513 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20514 pwqe->sli4_xritag = sglq->sli4_xritag;
20516 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20517 pwqe->sli4_xritag);
20518 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20520 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20522 ret = lpfc_sli4_wq_put(wq, wqe);
20524 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20527 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20528 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20530 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20537 * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
20538 * @phba: Pointer to HBA context object.
20539 * @cmdiocb: Pointer to driver command iocb object.
20540 * @cmpl: completion function.
20542 * Fill the appropriate fields for the abort WQE and call
20543 * internal routine lpfc_sli4_issue_wqe to send the WQE
20544 * This function is called with hbalock held and no ring_lock held.
20546 * RETURNS 0 - SUCCESS
20550 lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
20553 struct lpfc_vport *vport = cmdiocb->vport;
20554 struct lpfc_iocbq *abtsiocb = NULL;
20555 union lpfc_wqe128 *abtswqe;
20556 struct lpfc_io_buf *lpfc_cmd;
20557 int retval = IOCB_ERROR;
20558 u16 xritag = cmdiocb->sli4_xritag;
20561 * The scsi command can not be in txq and it is in flight because the
20562 * pCmd is still pointing at the SCSI command we have to abort. There
20563 * is no need to search the txcmplq. Just send an abort to the FW.
20566 abtsiocb = __lpfc_sli_get_iocbq(phba);
20568 return WQE_NORESOURCE;
20570 /* Indicate the IO is being aborted by the driver. */
20571 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
20573 abtswqe = &abtsiocb->wqe;
20574 memset(abtswqe, 0, sizeof(*abtswqe));
20576 if (lpfc_is_link_up(phba))
20577 bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
20579 bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 0);
20580 bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
20581 abtswqe->abort_cmd.rsrvd5 = 0;
20582 abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
20583 bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
20584 bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
20585 bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
20586 bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
20587 bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
20588 bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
20590 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
20591 abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
20592 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
20593 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
20594 abtsiocb->iocb_flag |= LPFC_IO_FCP;
20595 if (cmdiocb->iocb_flag & LPFC_IO_NVME)
20596 abtsiocb->iocb_flag |= LPFC_IO_NVME;
20597 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
20598 abtsiocb->iocb_flag |= LPFC_IO_FOF;
20599 abtsiocb->vport = vport;
20600 abtsiocb->wqe_cmpl = cmpl;
20602 lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
20603 retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
20605 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
20606 "0359 Abort xri x%x, original iotag x%x, "
20607 "abort cmd iotag x%x retval x%x\n",
20608 xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
20611 cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
20612 __lpfc_sli_release_iocbq(phba, abtsiocb);
20618 #ifdef LPFC_MXP_STAT
20620 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
20621 * @phba: pointer to lpfc hba data structure.
20622 * @hwqid: belong to which HWQ.
20624 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
20625 * 15 seconds after a test case is running.
20627 * The user should call lpfc_debugfs_multixripools_write before running a test
20628 * case to clear stat_snapshot_taken. Then the user starts a test case. During
20629 * test case is running, stat_snapshot_taken is incremented by 1 every time when
20630 * this routine is called from heartbeat timer. When stat_snapshot_taken is
20631 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
20633 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
20635 struct lpfc_sli4_hdw_queue *qp;
20636 struct lpfc_multixri_pool *multixri_pool;
20637 struct lpfc_pvt_pool *pvt_pool;
20638 struct lpfc_pbl_pool *pbl_pool;
20641 qp = &phba->sli4_hba.hdwq[hwqid];
20642 multixri_pool = qp->p_multixri_pool;
20643 if (!multixri_pool)
20646 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
20647 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20648 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20649 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20651 multixri_pool->stat_pbl_count = pbl_pool->count;
20652 multixri_pool->stat_pvt_count = pvt_pool->count;
20653 multixri_pool->stat_busy_count = txcmplq_cnt;
20656 multixri_pool->stat_snapshot_taken++;
20661 * lpfc_adjust_pvt_pool_count - Adjust private pool count
20662 * @phba: pointer to lpfc hba data structure.
20663 * @hwqid: belong to which HWQ.
20665 * This routine moves some XRIs from private to public pool when private pool
20668 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
20670 struct lpfc_multixri_pool *multixri_pool;
20672 u32 prev_io_req_count;
20674 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20675 if (!multixri_pool)
20677 io_req_count = multixri_pool->io_req_count;
20678 prev_io_req_count = multixri_pool->prev_io_req_count;
20680 if (prev_io_req_count != io_req_count) {
20681 /* Private pool is busy */
20682 multixri_pool->prev_io_req_count = io_req_count;
20684 /* Private pool is not busy.
20685 * Move XRIs from private to public pool.
20687 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
20692 * lpfc_adjust_high_watermark - Adjust high watermark
20693 * @phba: pointer to lpfc hba data structure.
20694 * @hwqid: belong to which HWQ.
20696 * This routine sets high watermark as number of outstanding XRIs,
20697 * but make sure the new value is between xri_limit/2 and xri_limit.
20699 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
20707 struct lpfc_multixri_pool *multixri_pool;
20708 struct lpfc_sli4_hdw_queue *qp;
20710 qp = &phba->sli4_hba.hdwq[hwqid];
20711 multixri_pool = qp->p_multixri_pool;
20712 if (!multixri_pool)
20714 xri_limit = multixri_pool->xri_limit;
20716 watermark_max = xri_limit;
20717 watermark_min = xri_limit / 2;
20719 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20720 abts_io_bufs = qp->abts_scsi_io_bufs;
20721 abts_io_bufs += qp->abts_nvme_io_bufs;
20723 new_watermark = txcmplq_cnt + abts_io_bufs;
20724 new_watermark = min(watermark_max, new_watermark);
20725 new_watermark = max(watermark_min, new_watermark);
20726 multixri_pool->pvt_pool.high_watermark = new_watermark;
20728 #ifdef LPFC_MXP_STAT
20729 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
20735 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
20736 * @phba: pointer to lpfc hba data structure.
20737 * @hwqid: belong to which HWQ.
20739 * This routine is called from hearbeat timer when pvt_pool is idle.
20740 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
20741 * The first step moves (all - low_watermark) amount of XRIs.
20742 * The second step moves the rest of XRIs.
20744 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
20746 struct lpfc_pbl_pool *pbl_pool;
20747 struct lpfc_pvt_pool *pvt_pool;
20748 struct lpfc_sli4_hdw_queue *qp;
20749 struct lpfc_io_buf *lpfc_ncmd;
20750 struct lpfc_io_buf *lpfc_ncmd_next;
20751 unsigned long iflag;
20752 struct list_head tmp_list;
20755 qp = &phba->sli4_hba.hdwq[hwqid];
20756 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20757 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20760 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
20761 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
20763 if (pvt_pool->count > pvt_pool->low_watermark) {
20764 /* Step 1: move (all - low_watermark) from pvt_pool
20768 /* Move low watermark of bufs from pvt_pool to tmp_list */
20769 INIT_LIST_HEAD(&tmp_list);
20770 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20771 &pvt_pool->list, list) {
20772 list_move_tail(&lpfc_ncmd->list, &tmp_list);
20774 if (tmp_count >= pvt_pool->low_watermark)
20778 /* Move all bufs from pvt_pool to pbl_pool */
20779 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20781 /* Move all bufs from tmp_list to pvt_pool */
20782 list_splice(&tmp_list, &pvt_pool->list);
20784 pbl_pool->count += (pvt_pool->count - tmp_count);
20785 pvt_pool->count = tmp_count;
20787 /* Step 2: move the rest from pvt_pool to pbl_pool */
20788 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20789 pbl_pool->count += pvt_pool->count;
20790 pvt_pool->count = 0;
20793 spin_unlock(&pvt_pool->lock);
20794 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20798 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20799 * @phba: pointer to lpfc hba data structure
20800 * @qp: pointer to HDW queue
20801 * @pbl_pool: specified public free XRI pool
20802 * @pvt_pool: specified private free XRI pool
20803 * @count: number of XRIs to move
20805 * This routine tries to move some free common bufs from the specified pbl_pool
20806 * to the specified pvt_pool. It might move less than count XRIs if there's not
20807 * enough in public pool.
20810 * true - if XRIs are successfully moved from the specified pbl_pool to the
20811 * specified pvt_pool
20812 * false - if the specified pbl_pool is empty or locked by someone else
20815 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20816 struct lpfc_pbl_pool *pbl_pool,
20817 struct lpfc_pvt_pool *pvt_pool, u32 count)
20819 struct lpfc_io_buf *lpfc_ncmd;
20820 struct lpfc_io_buf *lpfc_ncmd_next;
20821 unsigned long iflag;
20824 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20826 if (pbl_pool->count) {
20827 /* Move a batch of XRIs from public to private pool */
20828 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
20829 list_for_each_entry_safe(lpfc_ncmd,
20833 list_move_tail(&lpfc_ncmd->list,
20842 spin_unlock(&pvt_pool->lock);
20843 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20846 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20853 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20854 * @phba: pointer to lpfc hba data structure.
20855 * @hwqid: belong to which HWQ.
20856 * @count: number of XRIs to move
20858 * This routine tries to find some free common bufs in one of public pools with
20859 * Round Robin method. The search always starts from local hwqid, then the next
20860 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20861 * a batch of free common bufs are moved to private pool on hwqid.
20862 * It might move less than count XRIs if there's not enough in public pool.
20864 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20866 struct lpfc_multixri_pool *multixri_pool;
20867 struct lpfc_multixri_pool *next_multixri_pool;
20868 struct lpfc_pvt_pool *pvt_pool;
20869 struct lpfc_pbl_pool *pbl_pool;
20870 struct lpfc_sli4_hdw_queue *qp;
20875 qp = &phba->sli4_hba.hdwq[hwqid];
20876 multixri_pool = qp->p_multixri_pool;
20877 pvt_pool = &multixri_pool->pvt_pool;
20878 pbl_pool = &multixri_pool->pbl_pool;
20880 /* Check if local pbl_pool is available */
20881 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20883 #ifdef LPFC_MXP_STAT
20884 multixri_pool->local_pbl_hit_count++;
20889 hwq_count = phba->cfg_hdw_queue;
20891 /* Get the next hwqid which was found last time */
20892 next_hwqid = multixri_pool->rrb_next_hwqid;
20895 /* Go to next hwq */
20896 next_hwqid = (next_hwqid + 1) % hwq_count;
20898 next_multixri_pool =
20899 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20900 pbl_pool = &next_multixri_pool->pbl_pool;
20902 /* Check if the public free xri pool is available */
20903 ret = _lpfc_move_xri_pbl_to_pvt(
20904 phba, qp, pbl_pool, pvt_pool, count);
20906 /* Exit while-loop if success or all hwqid are checked */
20907 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20909 /* Starting point for the next time */
20910 multixri_pool->rrb_next_hwqid = next_hwqid;
20913 /* stats: all public pools are empty*/
20914 multixri_pool->pbl_empty_count++;
20917 #ifdef LPFC_MXP_STAT
20919 if (next_hwqid == hwqid)
20920 multixri_pool->local_pbl_hit_count++;
20922 multixri_pool->other_pbl_hit_count++;
20928 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20929 * @phba: pointer to lpfc hba data structure.
20930 * @hwqid: belong to which HWQ.
20932 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20935 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20937 struct lpfc_multixri_pool *multixri_pool;
20938 struct lpfc_pvt_pool *pvt_pool;
20940 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20941 pvt_pool = &multixri_pool->pvt_pool;
20943 if (pvt_pool->count < pvt_pool->low_watermark)
20944 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20948 * lpfc_release_io_buf - Return one IO buf back to free pool
20949 * @phba: pointer to lpfc hba data structure.
20950 * @lpfc_ncmd: IO buf to be returned.
20951 * @qp: belong to which HWQ.
20953 * This routine returns one IO buf back to free pool. If this is an urgent IO,
20954 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
20955 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
20956 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
20957 * lpfc_io_buf_list_put.
20959 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20960 struct lpfc_sli4_hdw_queue *qp)
20962 unsigned long iflag;
20963 struct lpfc_pbl_pool *pbl_pool;
20964 struct lpfc_pvt_pool *pvt_pool;
20965 struct lpfc_epd_pool *epd_pool;
20971 /* MUST zero fields if buffer is reused by another protocol */
20972 lpfc_ncmd->nvmeCmd = NULL;
20973 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20974 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20976 if (phba->cfg_xpsgl && !phba->nvmet_support &&
20977 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
20978 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
20980 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
20981 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
20983 if (phba->cfg_xri_rebalancing) {
20984 if (lpfc_ncmd->expedite) {
20985 /* Return to expedite pool */
20986 epd_pool = &phba->epd_pool;
20987 spin_lock_irqsave(&epd_pool->lock, iflag);
20988 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20990 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20994 /* Avoid invalid access if an IO sneaks in and is being rejected
20995 * just _after_ xri pools are destroyed in lpfc_offline.
20996 * Nothing much can be done at this point.
20998 if (!qp->p_multixri_pool)
21001 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21002 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21004 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21005 abts_io_bufs = qp->abts_scsi_io_bufs;
21006 abts_io_bufs += qp->abts_nvme_io_bufs;
21008 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21009 xri_limit = qp->p_multixri_pool->xri_limit;
21011 #ifdef LPFC_MXP_STAT
21012 if (xri_owned <= xri_limit)
21013 qp->p_multixri_pool->below_limit_count++;
21015 qp->p_multixri_pool->above_limit_count++;
21018 /* XRI goes to either public or private free xri pool
21019 * based on watermark and xri_limit
21021 if ((pvt_pool->count < pvt_pool->low_watermark) ||
21022 (xri_owned < xri_limit &&
21023 pvt_pool->count < pvt_pool->high_watermark)) {
21024 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21025 qp, free_pvt_pool);
21026 list_add_tail(&lpfc_ncmd->list,
21029 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21031 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21032 qp, free_pub_pool);
21033 list_add_tail(&lpfc_ncmd->list,
21036 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21039 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
21041 list_add_tail(&lpfc_ncmd->list,
21042 &qp->lpfc_io_buf_list_put);
21044 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
21050 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
21051 * @phba: pointer to lpfc hba data structure.
21052 * @qp: pointer to HDW queue
21053 * @pvt_pool: pointer to private pool data structure.
21054 * @ndlp: pointer to lpfc nodelist data structure.
21056 * This routine tries to get one free IO buf from private pool.
21059 * pointer to one free IO buf - if private pool is not empty
21060 * NULL - if private pool is empty
21062 static struct lpfc_io_buf *
21063 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
21064 struct lpfc_sli4_hdw_queue *qp,
21065 struct lpfc_pvt_pool *pvt_pool,
21066 struct lpfc_nodelist *ndlp)
21068 struct lpfc_io_buf *lpfc_ncmd;
21069 struct lpfc_io_buf *lpfc_ncmd_next;
21070 unsigned long iflag;
21072 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
21073 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21074 &pvt_pool->list, list) {
21075 if (lpfc_test_rrq_active(
21076 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
21078 list_del(&lpfc_ncmd->list);
21080 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21083 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21089 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
21090 * @phba: pointer to lpfc hba data structure.
21092 * This routine tries to get one free IO buf from expedite pool.
21095 * pointer to one free IO buf - if expedite pool is not empty
21096 * NULL - if expedite pool is empty
21098 static struct lpfc_io_buf *
21099 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
21101 struct lpfc_io_buf *lpfc_ncmd;
21102 struct lpfc_io_buf *lpfc_ncmd_next;
21103 unsigned long iflag;
21104 struct lpfc_epd_pool *epd_pool;
21106 epd_pool = &phba->epd_pool;
21109 spin_lock_irqsave(&epd_pool->lock, iflag);
21110 if (epd_pool->count > 0) {
21111 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21112 &epd_pool->list, list) {
21113 list_del(&lpfc_ncmd->list);
21118 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21124 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
21125 * @phba: pointer to lpfc hba data structure.
21126 * @ndlp: pointer to lpfc nodelist data structure.
21127 * @hwqid: belong to which HWQ
21128 * @expedite: 1 means this request is urgent.
21130 * This routine will do the following actions and then return a pointer to
21133 * 1. If private free xri count is empty, move some XRIs from public to
21135 * 2. Get one XRI from private free xri pool.
21136 * 3. If we fail to get one from pvt_pool and this is an expedite request,
21137 * get one free xri from expedite pool.
21139 * Note: ndlp is only used on SCSI side for RRQ testing.
21140 * The caller should pass NULL for ndlp on NVME side.
21143 * pointer to one free IO buf - if private pool is not empty
21144 * NULL - if private pool is empty
21146 static struct lpfc_io_buf *
21147 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
21148 struct lpfc_nodelist *ndlp,
21149 int hwqid, int expedite)
21151 struct lpfc_sli4_hdw_queue *qp;
21152 struct lpfc_multixri_pool *multixri_pool;
21153 struct lpfc_pvt_pool *pvt_pool;
21154 struct lpfc_io_buf *lpfc_ncmd;
21156 qp = &phba->sli4_hba.hdwq[hwqid];
21158 multixri_pool = qp->p_multixri_pool;
21159 pvt_pool = &multixri_pool->pvt_pool;
21160 multixri_pool->io_req_count++;
21162 /* If pvt_pool is empty, move some XRIs from public to private pool */
21163 if (pvt_pool->count == 0)
21164 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21166 /* Get one XRI from private free xri pool */
21167 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
21170 lpfc_ncmd->hdwq = qp;
21171 lpfc_ncmd->hdwq_no = hwqid;
21172 } else if (expedite) {
21173 /* If we fail to get one from pvt_pool and this is an expedite
21174 * request, get one free xri from expedite pool.
21176 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
21182 static inline struct lpfc_io_buf *
21183 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
21185 struct lpfc_sli4_hdw_queue *qp;
21186 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
21188 qp = &phba->sli4_hba.hdwq[idx];
21189 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
21190 &qp->lpfc_io_buf_list_get, list) {
21191 if (lpfc_test_rrq_active(phba, ndlp,
21192 lpfc_cmd->cur_iocbq.sli4_lxritag))
21195 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
21198 list_del_init(&lpfc_cmd->list);
21200 lpfc_cmd->hdwq = qp;
21201 lpfc_cmd->hdwq_no = idx;
21208 * lpfc_get_io_buf - Get one IO buffer from free pool
21209 * @phba: The HBA for which this call is being executed.
21210 * @ndlp: pointer to lpfc nodelist data structure.
21211 * @hwqid: belong to which HWQ
21212 * @expedite: 1 means this request is urgent.
21214 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
21215 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
21216 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
21218 * Note: ndlp is only used on SCSI side for RRQ testing.
21219 * The caller should pass NULL for ndlp on NVME side.
21223 * Pointer to lpfc_io_buf - Success
21225 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
21226 struct lpfc_nodelist *ndlp,
21227 u32 hwqid, int expedite)
21229 struct lpfc_sli4_hdw_queue *qp;
21230 unsigned long iflag;
21231 struct lpfc_io_buf *lpfc_cmd;
21233 qp = &phba->sli4_hba.hdwq[hwqid];
21236 if (phba->cfg_xri_rebalancing)
21237 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
21238 phba, ndlp, hwqid, expedite);
21240 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
21241 qp, alloc_xri_get);
21242 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
21243 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
21245 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
21246 qp, alloc_xri_put);
21247 list_splice(&qp->lpfc_io_buf_list_put,
21248 &qp->lpfc_io_buf_list_get);
21249 qp->get_io_bufs += qp->put_io_bufs;
21250 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
21251 qp->put_io_bufs = 0;
21252 spin_unlock(&qp->io_buf_list_put_lock);
21253 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
21255 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
21257 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
21264 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
21265 * @phba: The HBA for which this call is being executed.
21266 * @lpfc_buf: IO buf structure to append the SGL chunk
21268 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
21269 * and will allocate an SGL chunk if the pool is empty.
21273 * Pointer to sli4_hybrid_sgl - Success
21275 struct sli4_hybrid_sgl *
21276 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
21278 struct sli4_hybrid_sgl *list_entry = NULL;
21279 struct sli4_hybrid_sgl *tmp = NULL;
21280 struct sli4_hybrid_sgl *allocated_sgl = NULL;
21281 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21282 struct list_head *buf_list = &hdwq->sgl_list;
21283 unsigned long iflags;
21285 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21287 if (likely(!list_empty(buf_list))) {
21288 /* break off 1 chunk from the sgl_list */
21289 list_for_each_entry_safe(list_entry, tmp,
21290 buf_list, list_node) {
21291 list_move_tail(&list_entry->list_node,
21292 &lpfc_buf->dma_sgl_xtra_list);
21296 /* allocate more */
21297 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21298 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21299 cpu_to_node(hdwq->io_wq->chann));
21301 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21302 "8353 error kmalloc memory for HDWQ "
21304 lpfc_buf->hdwq_no, __func__);
21308 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
21309 GFP_ATOMIC, &tmp->dma_phys_sgl);
21310 if (!tmp->dma_sgl) {
21311 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21312 "8354 error pool_alloc memory for HDWQ "
21314 lpfc_buf->hdwq_no, __func__);
21319 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21320 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
21323 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
21324 struct sli4_hybrid_sgl,
21327 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21329 return allocated_sgl;
21333 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
21334 * @phba: The HBA for which this call is being executed.
21335 * @lpfc_buf: IO buf structure with the SGL chunk
21337 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
21344 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
21347 struct sli4_hybrid_sgl *list_entry = NULL;
21348 struct sli4_hybrid_sgl *tmp = NULL;
21349 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21350 struct list_head *buf_list = &hdwq->sgl_list;
21351 unsigned long iflags;
21353 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21355 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
21356 list_for_each_entry_safe(list_entry, tmp,
21357 &lpfc_buf->dma_sgl_xtra_list,
21359 list_move_tail(&list_entry->list_node,
21366 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21371 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
21372 * @phba: phba object
21373 * @hdwq: hdwq to cleanup sgl buff resources on
21375 * This routine frees all SGL chunks of hdwq SGL chunk pool.
21381 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
21382 struct lpfc_sli4_hdw_queue *hdwq)
21384 struct list_head *buf_list = &hdwq->sgl_list;
21385 struct sli4_hybrid_sgl *list_entry = NULL;
21386 struct sli4_hybrid_sgl *tmp = NULL;
21387 unsigned long iflags;
21389 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21391 /* Free sgl pool */
21392 list_for_each_entry_safe(list_entry, tmp,
21393 buf_list, list_node) {
21394 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
21395 list_entry->dma_sgl,
21396 list_entry->dma_phys_sgl);
21397 list_del(&list_entry->list_node);
21401 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21405 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
21406 * @phba: The HBA for which this call is being executed.
21407 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
21409 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
21410 * and will allocate an CMD/RSP buffer if the pool is empty.
21414 * Pointer to fcp_cmd_rsp_buf - Success
21416 struct fcp_cmd_rsp_buf *
21417 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21418 struct lpfc_io_buf *lpfc_buf)
21420 struct fcp_cmd_rsp_buf *list_entry = NULL;
21421 struct fcp_cmd_rsp_buf *tmp = NULL;
21422 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
21423 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21424 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21425 unsigned long iflags;
21427 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21429 if (likely(!list_empty(buf_list))) {
21430 /* break off 1 chunk from the list */
21431 list_for_each_entry_safe(list_entry, tmp,
21434 list_move_tail(&list_entry->list_node,
21435 &lpfc_buf->dma_cmd_rsp_list);
21439 /* allocate more */
21440 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21441 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21442 cpu_to_node(hdwq->io_wq->chann));
21444 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21445 "8355 error kmalloc memory for HDWQ "
21447 lpfc_buf->hdwq_no, __func__);
21451 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
21453 &tmp->fcp_cmd_rsp_dma_handle);
21455 if (!tmp->fcp_cmnd) {
21456 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21457 "8356 error pool_alloc memory for HDWQ "
21459 lpfc_buf->hdwq_no, __func__);
21464 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
21465 sizeof(struct fcp_cmnd));
21467 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21468 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
21471 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
21472 struct fcp_cmd_rsp_buf,
21475 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21477 return allocated_buf;
21481 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
21482 * @phba: The HBA for which this call is being executed.
21483 * @lpfc_buf: IO buf structure with the CMD/RSP buf
21485 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
21492 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21493 struct lpfc_io_buf *lpfc_buf)
21496 struct fcp_cmd_rsp_buf *list_entry = NULL;
21497 struct fcp_cmd_rsp_buf *tmp = NULL;
21498 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21499 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21500 unsigned long iflags;
21502 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21504 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
21505 list_for_each_entry_safe(list_entry, tmp,
21506 &lpfc_buf->dma_cmd_rsp_list,
21508 list_move_tail(&list_entry->list_node,
21515 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21520 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
21521 * @phba: phba object
21522 * @hdwq: hdwq to cleanup cmd rsp buff resources on
21524 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
21530 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21531 struct lpfc_sli4_hdw_queue *hdwq)
21533 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21534 struct fcp_cmd_rsp_buf *list_entry = NULL;
21535 struct fcp_cmd_rsp_buf *tmp = NULL;
21536 unsigned long iflags;
21538 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21540 /* Free cmd_rsp buf pool */
21541 list_for_each_entry_safe(list_entry, tmp,
21544 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
21545 list_entry->fcp_cmnd,
21546 list_entry->fcp_cmd_rsp_dma_handle);
21547 list_del(&list_entry->list_node);
21551 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);