1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/kthread.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
33 #include <linux/ctype.h>
34 #include <linux/aer.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/miscdevice.h>
38 #include <linux/percpu.h>
39 #include <linux/msi.h>
40 #include <linux/irq.h>
41 #include <linux/bitops.h>
43 #include <scsi/scsi.h>
44 #include <scsi/scsi_device.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_transport_fc.h>
47 #include <scsi/scsi_tcq.h>
48 #include <scsi/fc/fc_fs.h>
50 #include <linux/nvme-fc-driver.h>
55 #include "lpfc_sli4.h"
57 #include "lpfc_disc.h"
59 #include "lpfc_scsi.h"
60 #include "lpfc_nvme.h"
61 #include "lpfc_nvmet.h"
62 #include "lpfc_logmsg.h"
63 #include "lpfc_crtn.h"
64 #include "lpfc_vport.h"
65 #include "lpfc_version.h"
69 unsigned long _dump_buf_data_order;
71 unsigned long _dump_buf_dif_order;
72 spinlock_t _dump_buf_lock;
74 /* Used when mapping IRQ vectors in a driver centric manner */
75 static uint32_t lpfc_present_cpu;
77 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
78 static int lpfc_post_rcv_buf(struct lpfc_hba *);
79 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
80 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
81 static int lpfc_setup_endian_order(struct lpfc_hba *);
82 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
83 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
84 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
85 static void lpfc_init_sgl_list(struct lpfc_hba *);
86 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
87 static void lpfc_free_active_sgl(struct lpfc_hba *);
88 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
89 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
90 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
91 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
92 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
93 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
94 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
95 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
96 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
97 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
99 static struct scsi_transport_template *lpfc_transport_template = NULL;
100 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
101 static DEFINE_IDR(lpfc_hba_index);
102 #define LPFC_NVMET_BUF_POST 254
105 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
106 * @phba: pointer to lpfc hba data structure.
108 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
109 * mailbox command. It retrieves the revision information from the HBA and
110 * collects the Vital Product Data (VPD) about the HBA for preparing the
111 * configuration of the HBA.
115 * -ERESTART - requests the SLI layer to reset the HBA and try again.
116 * Any other value - indicates an error.
119 lpfc_config_port_prep(struct lpfc_hba *phba)
121 lpfc_vpd_t *vp = &phba->vpd;
125 char *lpfc_vpd_data = NULL;
127 static char licensed[56] =
128 "key unlock for use with gnu public licensed code only\0";
129 static int init_key = 1;
131 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
133 phba->link_state = LPFC_HBA_ERROR;
138 phba->link_state = LPFC_INIT_MBX_CMDS;
140 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
142 uint32_t *ptext = (uint32_t *) licensed;
144 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
145 *ptext = cpu_to_be32(*ptext);
149 lpfc_read_nv(phba, pmb);
150 memset((char*)mb->un.varRDnvp.rsvd3, 0,
151 sizeof (mb->un.varRDnvp.rsvd3));
152 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
155 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
157 if (rc != MBX_SUCCESS) {
158 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
159 "0324 Config Port initialization "
160 "error, mbxCmd x%x READ_NVPARM, "
162 mb->mbxCommand, mb->mbxStatus);
163 mempool_free(pmb, phba->mbox_mem_pool);
166 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
168 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
173 * Clear all option bits except LPFC_SLI3_BG_ENABLED,
174 * which was already set in lpfc_get_cfgparam()
176 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
178 /* Setup and issue mailbox READ REV command */
179 lpfc_read_rev(phba, pmb);
180 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
181 if (rc != MBX_SUCCESS) {
182 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
183 "0439 Adapter failed to init, mbxCmd x%x "
184 "READ_REV, mbxStatus x%x\n",
185 mb->mbxCommand, mb->mbxStatus);
186 mempool_free( pmb, phba->mbox_mem_pool);
192 * The value of rr must be 1 since the driver set the cv field to 1.
193 * This setting requires the FW to set all revision fields.
195 if (mb->un.varRdRev.rr == 0) {
197 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
198 "0440 Adapter failed to init, READ_REV has "
199 "missing revision information.\n");
200 mempool_free(pmb, phba->mbox_mem_pool);
204 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
205 mempool_free(pmb, phba->mbox_mem_pool);
209 /* Save information as VPD data */
211 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
212 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
213 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
214 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
215 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
216 vp->rev.biuRev = mb->un.varRdRev.biuRev;
217 vp->rev.smRev = mb->un.varRdRev.smRev;
218 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
219 vp->rev.endecRev = mb->un.varRdRev.endecRev;
220 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
221 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
222 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
223 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
224 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
225 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
227 /* If the sli feature level is less then 9, we must
228 * tear down all RPIs and VPIs on link down if NPIV
231 if (vp->rev.feaLevelHigh < 9)
232 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
234 if (lpfc_is_LC_HBA(phba->pcidev->device))
235 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
236 sizeof (phba->RandomData));
238 /* Get adapter VPD information */
239 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
243 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
244 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
246 if (rc != MBX_SUCCESS) {
247 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
248 "0441 VPD not present on adapter, "
249 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
250 mb->mbxCommand, mb->mbxStatus);
251 mb->un.varDmp.word_cnt = 0;
253 /* dump mem may return a zero when finished or we got a
254 * mailbox error, either way we are done.
256 if (mb->un.varDmp.word_cnt == 0)
258 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
259 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
260 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
261 lpfc_vpd_data + offset,
262 mb->un.varDmp.word_cnt);
263 offset += mb->un.varDmp.word_cnt;
264 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
265 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
267 kfree(lpfc_vpd_data);
269 mempool_free(pmb, phba->mbox_mem_pool);
274 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
275 * @phba: pointer to lpfc hba data structure.
276 * @pmboxq: pointer to the driver internal queue element for mailbox command.
278 * This is the completion handler for driver's configuring asynchronous event
279 * mailbox command to the device. If the mailbox command returns successfully,
280 * it will set internal async event support flag to 1; otherwise, it will
281 * set internal async event support flag to 0.
284 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
286 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
287 phba->temp_sensor_support = 1;
289 phba->temp_sensor_support = 0;
290 mempool_free(pmboxq, phba->mbox_mem_pool);
295 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
296 * @phba: pointer to lpfc hba data structure.
297 * @pmboxq: pointer to the driver internal queue element for mailbox command.
299 * This is the completion handler for dump mailbox command for getting
300 * wake up parameters. When this command complete, the response contain
301 * Option rom version of the HBA. This function translate the version number
302 * into a human readable string and store it in OptionROMVersion.
305 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
308 uint32_t prog_id_word;
310 /* character array used for decoding dist type. */
311 char dist_char[] = "nabx";
313 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
314 mempool_free(pmboxq, phba->mbox_mem_pool);
318 prg = (struct prog_id *) &prog_id_word;
320 /* word 7 contain option rom version */
321 prog_id_word = pmboxq->u.mb.un.varWords[7];
323 /* Decode the Option rom version word to a readable string */
325 dist = dist_char[prg->dist];
327 if ((prg->dist == 3) && (prg->num == 0))
328 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
329 prg->ver, prg->rev, prg->lev);
331 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
332 prg->ver, prg->rev, prg->lev,
334 mempool_free(pmboxq, phba->mbox_mem_pool);
339 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
340 * cfg_soft_wwnn, cfg_soft_wwpn
341 * @vport: pointer to lpfc vport data structure.
348 lpfc_update_vport_wwn(struct lpfc_vport *vport)
350 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
351 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
353 /* If the soft name exists then update it using the service params */
354 if (vport->phba->cfg_soft_wwnn)
355 u64_to_wwn(vport->phba->cfg_soft_wwnn,
356 vport->fc_sparam.nodeName.u.wwn);
357 if (vport->phba->cfg_soft_wwpn)
358 u64_to_wwn(vport->phba->cfg_soft_wwpn,
359 vport->fc_sparam.portName.u.wwn);
362 * If the name is empty or there exists a soft name
363 * then copy the service params name, otherwise use the fc name
365 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
366 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
367 sizeof(struct lpfc_name));
369 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
370 sizeof(struct lpfc_name));
373 * If the port name has changed, then set the Param changes flag
376 if (vport->fc_portname.u.wwn[0] != 0 &&
377 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
378 sizeof(struct lpfc_name)))
379 vport->vport_flag |= FAWWPN_PARAM_CHG;
381 if (vport->fc_portname.u.wwn[0] == 0 ||
382 vport->phba->cfg_soft_wwpn ||
383 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
384 vport->vport_flag & FAWWPN_SET) {
385 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
386 sizeof(struct lpfc_name));
387 vport->vport_flag &= ~FAWWPN_SET;
388 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
389 vport->vport_flag |= FAWWPN_SET;
392 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
393 sizeof(struct lpfc_name));
397 * lpfc_config_port_post - Perform lpfc initialization after config port
398 * @phba: pointer to lpfc hba data structure.
400 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
401 * command call. It performs all internal resource and state setups on the
402 * port: post IOCB buffers, enable appropriate host interrupt attentions,
403 * ELS ring timers, etc.
407 * Any other value - error.
410 lpfc_config_port_post(struct lpfc_hba *phba)
412 struct lpfc_vport *vport = phba->pport;
413 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
416 struct lpfc_dmabuf *mp;
417 struct lpfc_sli *psli = &phba->sli;
418 uint32_t status, timeout;
422 spin_lock_irq(&phba->hbalock);
424 * If the Config port completed correctly the HBA is not
425 * over heated any more.
427 if (phba->over_temp_state == HBA_OVER_TEMP)
428 phba->over_temp_state = HBA_NORMAL_TEMP;
429 spin_unlock_irq(&phba->hbalock);
431 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
433 phba->link_state = LPFC_HBA_ERROR;
438 /* Get login parameters for NID. */
439 rc = lpfc_read_sparam(phba, pmb, 0);
441 mempool_free(pmb, phba->mbox_mem_pool);
446 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
447 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
448 "0448 Adapter failed init, mbxCmd x%x "
449 "READ_SPARM mbxStatus x%x\n",
450 mb->mbxCommand, mb->mbxStatus);
451 phba->link_state = LPFC_HBA_ERROR;
452 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
453 mempool_free(pmb, phba->mbox_mem_pool);
454 lpfc_mbuf_free(phba, mp->virt, mp->phys);
459 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
461 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
462 lpfc_mbuf_free(phba, mp->virt, mp->phys);
465 lpfc_update_vport_wwn(vport);
467 /* Update the fc_host data structures with new wwn. */
468 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
469 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
470 fc_host_max_npiv_vports(shost) = phba->max_vpi;
472 /* If no serial number in VPD data, use low 6 bytes of WWNN */
473 /* This should be consolidated into parse_vpd ? - mr */
474 if (phba->SerialNumber[0] == 0) {
477 outptr = &vport->fc_nodename.u.s.IEEE[0];
478 for (i = 0; i < 12; i++) {
480 j = ((status & 0xf0) >> 4);
482 phba->SerialNumber[i] =
483 (char)((uint8_t) 0x30 + (uint8_t) j);
485 phba->SerialNumber[i] =
486 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
490 phba->SerialNumber[i] =
491 (char)((uint8_t) 0x30 + (uint8_t) j);
493 phba->SerialNumber[i] =
494 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
498 lpfc_read_config(phba, pmb);
500 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
502 "0453 Adapter failed to init, mbxCmd x%x "
503 "READ_CONFIG, mbxStatus x%x\n",
504 mb->mbxCommand, mb->mbxStatus);
505 phba->link_state = LPFC_HBA_ERROR;
506 mempool_free( pmb, phba->mbox_mem_pool);
510 /* Check if the port is disabled */
511 lpfc_sli_read_link_ste(phba);
513 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
514 i = (mb->un.varRdConfig.max_xri + 1);
515 if (phba->cfg_hba_queue_depth > i) {
516 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
517 "3359 HBA queue depth changed from %d to %d\n",
518 phba->cfg_hba_queue_depth, i);
519 phba->cfg_hba_queue_depth = i;
522 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
523 i = (mb->un.varRdConfig.max_xri >> 3);
524 if (phba->pport->cfg_lun_queue_depth > i) {
525 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
526 "3360 LUN queue depth changed from %d to %d\n",
527 phba->pport->cfg_lun_queue_depth, i);
528 phba->pport->cfg_lun_queue_depth = i;
531 phba->lmt = mb->un.varRdConfig.lmt;
533 /* Get the default values for Model Name and Description */
534 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
536 phba->link_state = LPFC_LINK_DOWN;
538 /* Only process IOCBs on ELS ring till hba_state is READY */
539 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
540 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
541 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
542 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
544 /* Post receive buffers for desired rings */
545 if (phba->sli_rev != 3)
546 lpfc_post_rcv_buf(phba);
549 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
551 if (phba->intr_type == MSIX) {
552 rc = lpfc_config_msi(phba, pmb);
554 mempool_free(pmb, phba->mbox_mem_pool);
557 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
558 if (rc != MBX_SUCCESS) {
559 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
560 "0352 Config MSI mailbox command "
561 "failed, mbxCmd x%x, mbxStatus x%x\n",
562 pmb->u.mb.mbxCommand,
563 pmb->u.mb.mbxStatus);
564 mempool_free(pmb, phba->mbox_mem_pool);
569 spin_lock_irq(&phba->hbalock);
570 /* Initialize ERATT handling flag */
571 phba->hba_flag &= ~HBA_ERATT_HANDLED;
573 /* Enable appropriate host interrupts */
574 if (lpfc_readl(phba->HCregaddr, &status)) {
575 spin_unlock_irq(&phba->hbalock);
578 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
579 if (psli->num_rings > 0)
580 status |= HC_R0INT_ENA;
581 if (psli->num_rings > 1)
582 status |= HC_R1INT_ENA;
583 if (psli->num_rings > 2)
584 status |= HC_R2INT_ENA;
585 if (psli->num_rings > 3)
586 status |= HC_R3INT_ENA;
588 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
589 (phba->cfg_poll & DISABLE_FCP_RING_INT))
590 status &= ~(HC_R0INT_ENA);
592 writel(status, phba->HCregaddr);
593 readl(phba->HCregaddr); /* flush */
594 spin_unlock_irq(&phba->hbalock);
596 /* Set up ring-0 (ELS) timer */
597 timeout = phba->fc_ratov * 2;
598 mod_timer(&vport->els_tmofunc,
599 jiffies + msecs_to_jiffies(1000 * timeout));
600 /* Set up heart beat (HB) timer */
601 mod_timer(&phba->hb_tmofunc,
602 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
603 phba->hb_outstanding = 0;
604 phba->last_completion_time = jiffies;
605 /* Set up error attention (ERATT) polling timer */
606 mod_timer(&phba->eratt_poll,
607 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
609 if (phba->hba_flag & LINK_DISABLED) {
610 lpfc_printf_log(phba,
612 "2598 Adapter Link is disabled.\n");
613 lpfc_down_link(phba, pmb);
614 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
615 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
616 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
617 lpfc_printf_log(phba,
619 "2599 Adapter failed to issue DOWN_LINK"
620 " mbox command rc 0x%x\n", rc);
622 mempool_free(pmb, phba->mbox_mem_pool);
625 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
626 mempool_free(pmb, phba->mbox_mem_pool);
627 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
631 /* MBOX buffer will be freed in mbox compl */
632 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
634 phba->link_state = LPFC_HBA_ERROR;
638 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
639 pmb->mbox_cmpl = lpfc_config_async_cmpl;
640 pmb->vport = phba->pport;
641 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
643 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
644 lpfc_printf_log(phba,
647 "0456 Adapter failed to issue "
648 "ASYNCEVT_ENABLE mbox status x%x\n",
650 mempool_free(pmb, phba->mbox_mem_pool);
653 /* Get Option rom version */
654 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
656 phba->link_state = LPFC_HBA_ERROR;
660 lpfc_dump_wakeup_param(phba, pmb);
661 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
662 pmb->vport = phba->pport;
663 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
665 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
666 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
667 "to get Option ROM version status x%x\n", rc);
668 mempool_free(pmb, phba->mbox_mem_pool);
675 * lpfc_hba_init_link - Initialize the FC link
676 * @phba: pointer to lpfc hba data structure.
677 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
679 * This routine will issue the INIT_LINK mailbox command call.
680 * It is available to other drivers through the lpfc_hba data
681 * structure for use as a delayed link up mechanism with the
682 * module parameter lpfc_suppress_link_up.
686 * Any other value - error
689 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
691 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
695 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
696 * @phba: pointer to lpfc hba data structure.
697 * @fc_topology: desired fc topology.
698 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
700 * This routine will issue the INIT_LINK mailbox command call.
701 * It is available to other drivers through the lpfc_hba data
702 * structure for use as a delayed link up mechanism with the
703 * module parameter lpfc_suppress_link_up.
707 * Any other value - error
710 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
713 struct lpfc_vport *vport = phba->pport;
718 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
720 phba->link_state = LPFC_HBA_ERROR;
726 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
728 !(phba->lmt & LMT_1Gb)) ||
729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
730 !(phba->lmt & LMT_2Gb)) ||
731 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
732 !(phba->lmt & LMT_4Gb)) ||
733 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
734 !(phba->lmt & LMT_8Gb)) ||
735 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
736 !(phba->lmt & LMT_10Gb)) ||
737 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
738 !(phba->lmt & LMT_16Gb)) ||
739 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
740 !(phba->lmt & LMT_32Gb)) ||
741 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
742 !(phba->lmt & LMT_64Gb))) {
743 /* Reset link speed to auto */
744 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
745 "1302 Invalid speed for this board:%d "
746 "Reset link speed to auto.\n",
747 phba->cfg_link_speed);
748 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
750 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
751 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
752 if (phba->sli_rev < LPFC_SLI_REV4)
753 lpfc_set_loopback_flag(phba);
754 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
755 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
756 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
757 "0498 Adapter failed to init, mbxCmd x%x "
758 "INIT_LINK, mbxStatus x%x\n",
759 mb->mbxCommand, mb->mbxStatus);
760 if (phba->sli_rev <= LPFC_SLI_REV3) {
761 /* Clear all interrupt enable conditions */
762 writel(0, phba->HCregaddr);
763 readl(phba->HCregaddr); /* flush */
764 /* Clear all pending interrupts */
765 writel(0xffffffff, phba->HAregaddr);
766 readl(phba->HAregaddr); /* flush */
768 phba->link_state = LPFC_HBA_ERROR;
769 if (rc != MBX_BUSY || flag == MBX_POLL)
770 mempool_free(pmb, phba->mbox_mem_pool);
773 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
774 if (flag == MBX_POLL)
775 mempool_free(pmb, phba->mbox_mem_pool);
781 * lpfc_hba_down_link - this routine downs the FC link
782 * @phba: pointer to lpfc hba data structure.
783 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
785 * This routine will issue the DOWN_LINK mailbox command call.
786 * It is available to other drivers through the lpfc_hba data
787 * structure for use to stop the link.
791 * Any other value - error
794 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
799 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
801 phba->link_state = LPFC_HBA_ERROR;
805 lpfc_printf_log(phba,
807 "0491 Adapter Link is disabled.\n");
808 lpfc_down_link(phba, pmb);
809 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
810 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
811 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
812 lpfc_printf_log(phba,
814 "2522 Adapter failed to issue DOWN_LINK"
815 " mbox command rc 0x%x\n", rc);
817 mempool_free(pmb, phba->mbox_mem_pool);
820 if (flag == MBX_POLL)
821 mempool_free(pmb, phba->mbox_mem_pool);
827 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
828 * @phba: pointer to lpfc HBA data structure.
830 * This routine will do LPFC uninitialization before the HBA is reset when
831 * bringing down the SLI Layer.
835 * Any other value - error.
838 lpfc_hba_down_prep(struct lpfc_hba *phba)
840 struct lpfc_vport **vports;
843 if (phba->sli_rev <= LPFC_SLI_REV3) {
844 /* Disable interrupts */
845 writel(0, phba->HCregaddr);
846 readl(phba->HCregaddr); /* flush */
849 if (phba->pport->load_flag & FC_UNLOADING)
850 lpfc_cleanup_discovery_resources(phba->pport);
852 vports = lpfc_create_vport_work_array(phba);
854 for (i = 0; i <= phba->max_vports &&
855 vports[i] != NULL; i++)
856 lpfc_cleanup_discovery_resources(vports[i]);
857 lpfc_destroy_vport_work_array(phba, vports);
863 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
864 * rspiocb which got deferred
866 * @phba: pointer to lpfc HBA data structure.
868 * This routine will cleanup completed slow path events after HBA is reset
869 * when bringing down the SLI Layer.
876 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
878 struct lpfc_iocbq *rspiocbq;
879 struct hbq_dmabuf *dmabuf;
880 struct lpfc_cq_event *cq_event;
882 spin_lock_irq(&phba->hbalock);
883 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
884 spin_unlock_irq(&phba->hbalock);
886 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
887 /* Get the response iocb from the head of work queue */
888 spin_lock_irq(&phba->hbalock);
889 list_remove_head(&phba->sli4_hba.sp_queue_event,
890 cq_event, struct lpfc_cq_event, list);
891 spin_unlock_irq(&phba->hbalock);
893 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
894 case CQE_CODE_COMPL_WQE:
895 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
897 lpfc_sli_release_iocbq(phba, rspiocbq);
899 case CQE_CODE_RECEIVE:
900 case CQE_CODE_RECEIVE_V1:
901 dmabuf = container_of(cq_event, struct hbq_dmabuf,
903 lpfc_in_buf_free(phba, &dmabuf->dbuf);
909 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
910 * @phba: pointer to lpfc HBA data structure.
912 * This routine will cleanup posted ELS buffers after the HBA is reset
913 * when bringing down the SLI Layer.
920 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
922 struct lpfc_sli *psli = &phba->sli;
923 struct lpfc_sli_ring *pring;
924 struct lpfc_dmabuf *mp, *next_mp;
928 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
929 lpfc_sli_hbqbuf_free_all(phba);
931 /* Cleanup preposted buffers on the ELS ring */
932 pring = &psli->sli3_ring[LPFC_ELS_RING];
933 spin_lock_irq(&phba->hbalock);
934 list_splice_init(&pring->postbufq, &buflist);
935 spin_unlock_irq(&phba->hbalock);
938 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
941 lpfc_mbuf_free(phba, mp->virt, mp->phys);
945 spin_lock_irq(&phba->hbalock);
946 pring->postbufq_cnt -= count;
947 spin_unlock_irq(&phba->hbalock);
952 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
953 * @phba: pointer to lpfc HBA data structure.
955 * This routine will cleanup the txcmplq after the HBA is reset when bringing
956 * down the SLI Layer.
962 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
964 struct lpfc_sli *psli = &phba->sli;
965 struct lpfc_queue *qp = NULL;
966 struct lpfc_sli_ring *pring;
967 LIST_HEAD(completions);
969 struct lpfc_iocbq *piocb, *next_iocb;
971 if (phba->sli_rev != LPFC_SLI_REV4) {
972 for (i = 0; i < psli->num_rings; i++) {
973 pring = &psli->sli3_ring[i];
974 spin_lock_irq(&phba->hbalock);
975 /* At this point in time the HBA is either reset or DOA
976 * Nothing should be on txcmplq as it will
979 list_splice_init(&pring->txcmplq, &completions);
980 pring->txcmplq_cnt = 0;
981 spin_unlock_irq(&phba->hbalock);
983 lpfc_sli_abort_iocb_ring(phba, pring);
985 /* Cancel all the IOCBs from the completions list */
986 lpfc_sli_cancel_iocbs(phba, &completions,
987 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
990 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
994 spin_lock_irq(&pring->ring_lock);
995 list_for_each_entry_safe(piocb, next_iocb,
996 &pring->txcmplq, list)
997 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
998 list_splice_init(&pring->txcmplq, &completions);
999 pring->txcmplq_cnt = 0;
1000 spin_unlock_irq(&pring->ring_lock);
1001 lpfc_sli_abort_iocb_ring(phba, pring);
1003 /* Cancel all the IOCBs from the completions list */
1004 lpfc_sli_cancel_iocbs(phba, &completions,
1005 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1009 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1011 * @phba: pointer to lpfc HBA data structure.
1013 * This routine will do uninitialization after the HBA is reset when bring
1014 * down the SLI Layer.
1018 * Any other value - error.
1021 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1023 lpfc_hba_free_post_buf(phba);
1024 lpfc_hba_clean_txcmplq(phba);
1029 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1030 * @phba: pointer to lpfc HBA data structure.
1032 * This routine will do uninitialization after the HBA is reset when bring
1033 * down the SLI Layer.
1037 * Any other value - error.
1040 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1042 struct lpfc_io_buf *psb, *psb_next;
1043 struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
1044 struct lpfc_sli4_hdw_queue *qp;
1046 LIST_HEAD(nvme_aborts);
1047 LIST_HEAD(nvmet_aborts);
1048 struct lpfc_sglq *sglq_entry = NULL;
1052 lpfc_sli_hbqbuf_free_all(phba);
1053 lpfc_hba_clean_txcmplq(phba);
1055 /* At this point in time the HBA is either reset or DOA. Either
1056 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1057 * on the lpfc_els_sgl_list so that it can either be freed if the
1058 * driver is unloading or reposted if the driver is restarting
1061 spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */
1063 /* sgl_list_lock required because worker thread uses this
1066 spin_lock(&phba->sli4_hba.sgl_list_lock);
1067 list_for_each_entry(sglq_entry,
1068 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1069 sglq_entry->state = SGL_FREED;
1071 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1072 &phba->sli4_hba.lpfc_els_sgl_list);
1075 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1077 /* abts_xxxx_buf_list_lock required because worker thread uses this
1081 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1082 qp = &phba->sli4_hba.hdwq[idx];
1084 spin_lock(&qp->abts_scsi_buf_list_lock);
1085 list_splice_init(&qp->lpfc_abts_scsi_buf_list,
1088 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1090 psb->status = IOSTAT_SUCCESS;
1093 spin_lock(&qp->io_buf_list_put_lock);
1094 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1095 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1096 qp->abts_scsi_io_bufs = 0;
1097 spin_unlock(&qp->io_buf_list_put_lock);
1098 spin_unlock(&qp->abts_scsi_buf_list_lock);
1100 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1101 spin_lock(&qp->abts_nvme_buf_list_lock);
1102 list_splice_init(&qp->lpfc_abts_nvme_buf_list,
1104 list_for_each_entry_safe(psb, psb_next, &nvme_aborts,
1107 psb->status = IOSTAT_SUCCESS;
1110 spin_lock(&qp->io_buf_list_put_lock);
1111 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1112 qp->abts_nvme_io_bufs = 0;
1113 list_splice_init(&nvme_aborts,
1114 &qp->lpfc_io_buf_list_put);
1115 spin_unlock(&qp->io_buf_list_put_lock);
1116 spin_unlock(&qp->abts_nvme_buf_list_lock);
1120 spin_unlock_irq(&phba->hbalock);
1122 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1123 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1124 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1126 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1127 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1128 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
1129 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1133 lpfc_sli4_free_sp_events(phba);
1138 * lpfc_hba_down_post - Wrapper func for hba down post routine
1139 * @phba: pointer to lpfc HBA data structure.
1141 * This routine wraps the actual SLI3 or SLI4 routine for performing
1142 * uninitialization after the HBA is reset when bring down the SLI Layer.
1146 * Any other value - error.
1149 lpfc_hba_down_post(struct lpfc_hba *phba)
1151 return (*phba->lpfc_hba_down_post)(phba);
1155 * lpfc_hb_timeout - The HBA-timer timeout handler
1156 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1158 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1159 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1160 * work-port-events bitmap and the worker thread is notified. This timeout
1161 * event will be used by the worker thread to invoke the actual timeout
1162 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1163 * be performed in the timeout handler and the HBA timeout event bit shall
1164 * be cleared by the worker thread after it has taken the event bitmap out.
1167 lpfc_hb_timeout(struct timer_list *t)
1169 struct lpfc_hba *phba;
1170 uint32_t tmo_posted;
1171 unsigned long iflag;
1173 phba = from_timer(phba, t, hb_tmofunc);
1175 /* Check for heart beat timeout conditions */
1176 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1177 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1179 phba->pport->work_port_events |= WORKER_HB_TMO;
1180 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1182 /* Tell the worker thread there is work to do */
1184 lpfc_worker_wake_up(phba);
1189 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1190 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1192 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1193 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1194 * work-port-events bitmap and the worker thread is notified. This timeout
1195 * event will be used by the worker thread to invoke the actual timeout
1196 * handler routine, lpfc_rrq_handler. Any periodical operations will
1197 * be performed in the timeout handler and the RRQ timeout event bit shall
1198 * be cleared by the worker thread after it has taken the event bitmap out.
1201 lpfc_rrq_timeout(struct timer_list *t)
1203 struct lpfc_hba *phba;
1204 unsigned long iflag;
1206 phba = from_timer(phba, t, rrq_tmr);
1207 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1208 if (!(phba->pport->load_flag & FC_UNLOADING))
1209 phba->hba_flag |= HBA_RRQ_ACTIVE;
1211 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1212 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1214 if (!(phba->pport->load_flag & FC_UNLOADING))
1215 lpfc_worker_wake_up(phba);
1219 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1220 * @phba: pointer to lpfc hba data structure.
1221 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1223 * This is the callback function to the lpfc heart-beat mailbox command.
1224 * If configured, the lpfc driver issues the heart-beat mailbox command to
1225 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1226 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1227 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1228 * heart-beat outstanding state. Once the mailbox command comes back and
1229 * no error conditions detected, the heart-beat mailbox command timer is
1230 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1231 * state is cleared for the next heart-beat. If the timer expired with the
1232 * heart-beat outstanding state set, the driver will put the HBA offline.
1235 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1237 unsigned long drvr_flag;
1239 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1240 phba->hb_outstanding = 0;
1241 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1243 /* Check and reset heart-beat timer is necessary */
1244 mempool_free(pmboxq, phba->mbox_mem_pool);
1245 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1246 !(phba->link_state == LPFC_HBA_ERROR) &&
1247 !(phba->pport->load_flag & FC_UNLOADING))
1248 mod_timer(&phba->hb_tmofunc,
1250 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1255 lpfc_hb_eq_delay_work(struct work_struct *work)
1257 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1258 struct lpfc_hba, eq_delay_work);
1259 struct lpfc_eq_intr_info *eqi, *eqi_new;
1260 struct lpfc_queue *eq, *eq_next;
1261 unsigned char *eqcnt = NULL;
1265 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1268 if (phba->link_state == LPFC_HBA_ERROR ||
1269 phba->pport->fc_flag & FC_OFFLINE_MODE)
1272 eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char),
1277 /* Loop thru all IRQ vectors */
1278 for (i = 0; i < phba->cfg_irq_chann; i++) {
1279 /* Get the EQ corresponding to the IRQ vector */
1280 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1281 if (eq && eqcnt[eq->last_cpu] < 2)
1282 eqcnt[eq->last_cpu]++;
1286 for_each_present_cpu(i) {
1287 if (phba->cfg_irq_chann > 1 && eqcnt[i] < 2)
1290 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1292 usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) *
1294 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1295 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1299 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1300 if (eq->last_cpu != i) {
1301 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1303 list_move_tail(&eq->cpu_list, &eqi_new->list);
1306 if (usdelay != eq->q_mode)
1307 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1315 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1316 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1320 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1321 * @phba: pointer to lpfc hba data structure.
1323 * For each heartbeat, this routine does some heuristic methods to adjust
1324 * XRI distribution. The goal is to fully utilize free XRIs.
1326 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1331 hwq_count = phba->cfg_hdw_queue;
1332 for (i = 0; i < hwq_count; i++) {
1333 /* Adjust XRIs in private pool */
1334 lpfc_adjust_pvt_pool_count(phba, i);
1336 /* Adjust high watermark */
1337 lpfc_adjust_high_watermark(phba, i);
1339 #ifdef LPFC_MXP_STAT
1340 /* Snapshot pbl, pvt and busy count */
1341 lpfc_snapshot_mxp(phba, i);
1347 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1348 * @phba: pointer to lpfc hba data structure.
1350 * This is the actual HBA-timer timeout handler to be invoked by the worker
1351 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1352 * handler performs any periodic operations needed for the device. If such
1353 * periodic event has already been attended to either in the interrupt handler
1354 * or by processing slow-ring or fast-ring events within the HBA-timer
1355 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1356 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1357 * is configured and there is no heart-beat mailbox command outstanding, a
1358 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1359 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1363 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1365 struct lpfc_vport **vports;
1366 LPFC_MBOXQ_t *pmboxq;
1367 struct lpfc_dmabuf *buf_ptr;
1369 struct lpfc_sli *psli = &phba->sli;
1370 LIST_HEAD(completions);
1372 if (phba->cfg_xri_rebalancing) {
1373 /* Multi-XRI pools handler */
1374 lpfc_hb_mxp_handler(phba);
1377 vports = lpfc_create_vport_work_array(phba);
1379 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1380 lpfc_rcv_seq_check_edtov(vports[i]);
1381 lpfc_fdmi_num_disc_check(vports[i]);
1383 lpfc_destroy_vport_work_array(phba, vports);
1385 if ((phba->link_state == LPFC_HBA_ERROR) ||
1386 (phba->pport->load_flag & FC_UNLOADING) ||
1387 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1390 spin_lock_irq(&phba->pport->work_port_lock);
1392 if (time_after(phba->last_completion_time +
1393 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1395 spin_unlock_irq(&phba->pport->work_port_lock);
1396 if (!phba->hb_outstanding)
1397 mod_timer(&phba->hb_tmofunc,
1399 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1401 mod_timer(&phba->hb_tmofunc,
1403 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1406 spin_unlock_irq(&phba->pport->work_port_lock);
1408 if (phba->elsbuf_cnt &&
1409 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1410 spin_lock_irq(&phba->hbalock);
1411 list_splice_init(&phba->elsbuf, &completions);
1412 phba->elsbuf_cnt = 0;
1413 phba->elsbuf_prev_cnt = 0;
1414 spin_unlock_irq(&phba->hbalock);
1416 while (!list_empty(&completions)) {
1417 list_remove_head(&completions, buf_ptr,
1418 struct lpfc_dmabuf, list);
1419 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1423 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1425 /* If there is no heart beat outstanding, issue a heartbeat command */
1426 if (phba->cfg_enable_hba_heartbeat) {
1427 if (!phba->hb_outstanding) {
1428 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1429 (list_empty(&psli->mboxq))) {
1430 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1433 mod_timer(&phba->hb_tmofunc,
1435 msecs_to_jiffies(1000 *
1436 LPFC_HB_MBOX_INTERVAL));
1440 lpfc_heart_beat(phba, pmboxq);
1441 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1442 pmboxq->vport = phba->pport;
1443 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1446 if (retval != MBX_BUSY &&
1447 retval != MBX_SUCCESS) {
1448 mempool_free(pmboxq,
1449 phba->mbox_mem_pool);
1450 mod_timer(&phba->hb_tmofunc,
1452 msecs_to_jiffies(1000 *
1453 LPFC_HB_MBOX_INTERVAL));
1456 phba->skipped_hb = 0;
1457 phba->hb_outstanding = 1;
1458 } else if (time_before_eq(phba->last_completion_time,
1459 phba->skipped_hb)) {
1460 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1461 "2857 Last completion time not "
1462 " updated in %d ms\n",
1463 jiffies_to_msecs(jiffies
1464 - phba->last_completion_time));
1466 phba->skipped_hb = jiffies;
1468 mod_timer(&phba->hb_tmofunc,
1470 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1474 * If heart beat timeout called with hb_outstanding set
1475 * we need to give the hb mailbox cmd a chance to
1478 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1479 "0459 Adapter heartbeat still out"
1480 "standing:last compl time was %d ms.\n",
1481 jiffies_to_msecs(jiffies
1482 - phba->last_completion_time));
1483 mod_timer(&phba->hb_tmofunc,
1485 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1488 mod_timer(&phba->hb_tmofunc,
1490 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1495 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1496 * @phba: pointer to lpfc hba data structure.
1498 * This routine is called to bring the HBA offline when HBA hardware error
1499 * other than Port Error 6 has been detected.
1502 lpfc_offline_eratt(struct lpfc_hba *phba)
1504 struct lpfc_sli *psli = &phba->sli;
1506 spin_lock_irq(&phba->hbalock);
1507 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1508 spin_unlock_irq(&phba->hbalock);
1509 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1512 lpfc_reset_barrier(phba);
1513 spin_lock_irq(&phba->hbalock);
1514 lpfc_sli_brdreset(phba);
1515 spin_unlock_irq(&phba->hbalock);
1516 lpfc_hba_down_post(phba);
1517 lpfc_sli_brdready(phba, HS_MBRDY);
1518 lpfc_unblock_mgmt_io(phba);
1519 phba->link_state = LPFC_HBA_ERROR;
1524 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1525 * @phba: pointer to lpfc hba data structure.
1527 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1528 * other than Port Error 6 has been detected.
1531 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1533 spin_lock_irq(&phba->hbalock);
1534 phba->link_state = LPFC_HBA_ERROR;
1535 spin_unlock_irq(&phba->hbalock);
1537 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1539 lpfc_hba_down_post(phba);
1540 lpfc_unblock_mgmt_io(phba);
1544 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1545 * @phba: pointer to lpfc hba data structure.
1547 * This routine is invoked to handle the deferred HBA hardware error
1548 * conditions. This type of error is indicated by HBA by setting ER1
1549 * and another ER bit in the host status register. The driver will
1550 * wait until the ER1 bit clears before handling the error condition.
1553 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1555 uint32_t old_host_status = phba->work_hs;
1556 struct lpfc_sli *psli = &phba->sli;
1558 /* If the pci channel is offline, ignore possible errors,
1559 * since we cannot communicate with the pci card anyway.
1561 if (pci_channel_offline(phba->pcidev)) {
1562 spin_lock_irq(&phba->hbalock);
1563 phba->hba_flag &= ~DEFER_ERATT;
1564 spin_unlock_irq(&phba->hbalock);
1568 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1569 "0479 Deferred Adapter Hardware Error "
1570 "Data: x%x x%x x%x\n",
1572 phba->work_status[0], phba->work_status[1]);
1574 spin_lock_irq(&phba->hbalock);
1575 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1576 spin_unlock_irq(&phba->hbalock);
1580 * Firmware stops when it triggred erratt. That could cause the I/Os
1581 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1582 * SCSI layer retry it after re-establishing link.
1584 lpfc_sli_abort_fcp_rings(phba);
1587 * There was a firmware error. Take the hba offline and then
1588 * attempt to restart it.
1590 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1593 /* Wait for the ER1 bit to clear.*/
1594 while (phba->work_hs & HS_FFER1) {
1596 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1597 phba->work_hs = UNPLUG_ERR ;
1600 /* If driver is unloading let the worker thread continue */
1601 if (phba->pport->load_flag & FC_UNLOADING) {
1608 * This is to ptrotect against a race condition in which
1609 * first write to the host attention register clear the
1610 * host status register.
1612 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1613 phba->work_hs = old_host_status & ~HS_FFER1;
1615 spin_lock_irq(&phba->hbalock);
1616 phba->hba_flag &= ~DEFER_ERATT;
1617 spin_unlock_irq(&phba->hbalock);
1618 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1619 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1623 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1625 struct lpfc_board_event_header board_event;
1626 struct Scsi_Host *shost;
1628 board_event.event_type = FC_REG_BOARD_EVENT;
1629 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1630 shost = lpfc_shost_from_vport(phba->pport);
1631 fc_host_post_vendor_event(shost, fc_get_event_number(),
1632 sizeof(board_event),
1633 (char *) &board_event,
1638 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1639 * @phba: pointer to lpfc hba data structure.
1641 * This routine is invoked to handle the following HBA hardware error
1643 * 1 - HBA error attention interrupt
1644 * 2 - DMA ring index out of range
1645 * 3 - Mailbox command came back as unknown
1648 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1650 struct lpfc_vport *vport = phba->pport;
1651 struct lpfc_sli *psli = &phba->sli;
1652 uint32_t event_data;
1653 unsigned long temperature;
1654 struct temp_event temp_event_data;
1655 struct Scsi_Host *shost;
1657 /* If the pci channel is offline, ignore possible errors,
1658 * since we cannot communicate with the pci card anyway.
1660 if (pci_channel_offline(phba->pcidev)) {
1661 spin_lock_irq(&phba->hbalock);
1662 phba->hba_flag &= ~DEFER_ERATT;
1663 spin_unlock_irq(&phba->hbalock);
1667 /* If resets are disabled then leave the HBA alone and return */
1668 if (!phba->cfg_enable_hba_reset)
1671 /* Send an internal error event to mgmt application */
1672 lpfc_board_errevt_to_mgmt(phba);
1674 if (phba->hba_flag & DEFER_ERATT)
1675 lpfc_handle_deferred_eratt(phba);
1677 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1678 if (phba->work_hs & HS_FFER6)
1679 /* Re-establishing Link */
1680 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1681 "1301 Re-establishing Link "
1682 "Data: x%x x%x x%x\n",
1683 phba->work_hs, phba->work_status[0],
1684 phba->work_status[1]);
1685 if (phba->work_hs & HS_FFER8)
1686 /* Device Zeroization */
1687 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1688 "2861 Host Authentication device "
1689 "zeroization Data:x%x x%x x%x\n",
1690 phba->work_hs, phba->work_status[0],
1691 phba->work_status[1]);
1693 spin_lock_irq(&phba->hbalock);
1694 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1695 spin_unlock_irq(&phba->hbalock);
1698 * Firmware stops when it triggled erratt with HS_FFER6.
1699 * That could cause the I/Os dropped by the firmware.
1700 * Error iocb (I/O) on txcmplq and let the SCSI layer
1701 * retry it after re-establishing link.
1703 lpfc_sli_abort_fcp_rings(phba);
1706 * There was a firmware error. Take the hba offline and then
1707 * attempt to restart it.
1709 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1711 lpfc_sli_brdrestart(phba);
1712 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1713 lpfc_unblock_mgmt_io(phba);
1716 lpfc_unblock_mgmt_io(phba);
1717 } else if (phba->work_hs & HS_CRIT_TEMP) {
1718 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1719 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1720 temp_event_data.event_code = LPFC_CRIT_TEMP;
1721 temp_event_data.data = (uint32_t)temperature;
1723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1724 "0406 Adapter maximum temperature exceeded "
1725 "(%ld), taking this port offline "
1726 "Data: x%x x%x x%x\n",
1727 temperature, phba->work_hs,
1728 phba->work_status[0], phba->work_status[1]);
1730 shost = lpfc_shost_from_vport(phba->pport);
1731 fc_host_post_vendor_event(shost, fc_get_event_number(),
1732 sizeof(temp_event_data),
1733 (char *) &temp_event_data,
1734 SCSI_NL_VID_TYPE_PCI
1735 | PCI_VENDOR_ID_EMULEX);
1737 spin_lock_irq(&phba->hbalock);
1738 phba->over_temp_state = HBA_OVER_TEMP;
1739 spin_unlock_irq(&phba->hbalock);
1740 lpfc_offline_eratt(phba);
1743 /* The if clause above forces this code path when the status
1744 * failure is a value other than FFER6. Do not call the offline
1745 * twice. This is the adapter hardware error path.
1747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1748 "0457 Adapter Hardware Error "
1749 "Data: x%x x%x x%x\n",
1751 phba->work_status[0], phba->work_status[1]);
1753 event_data = FC_REG_DUMP_EVENT;
1754 shost = lpfc_shost_from_vport(vport);
1755 fc_host_post_vendor_event(shost, fc_get_event_number(),
1756 sizeof(event_data), (char *) &event_data,
1757 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1759 lpfc_offline_eratt(phba);
1765 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1766 * @phba: pointer to lpfc hba data structure.
1767 * @mbx_action: flag for mailbox shutdown action.
1769 * This routine is invoked to perform an SLI4 port PCI function reset in
1770 * response to port status register polling attention. It waits for port
1771 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1772 * During this process, interrupt vectors are freed and later requested
1773 * for handling possible port resource change.
1776 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1782 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1783 LPFC_SLI_INTF_IF_TYPE_2) {
1785 * On error status condition, driver need to wait for port
1786 * ready before performing reset.
1788 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1793 /* need reset: attempt for port recovery */
1795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1796 "2887 Reset Needed: Attempting Port "
1798 lpfc_offline_prep(phba, mbx_action);
1800 /* release interrupt for possible resource change */
1801 lpfc_sli4_disable_intr(phba);
1802 rc = lpfc_sli_brdrestart(phba);
1804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1805 "6309 Failed to restart board\n");
1808 /* request and enable interrupt */
1809 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1810 if (intr_mode == LPFC_INTR_ERROR) {
1811 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1812 "3175 Failed to enable interrupt\n");
1815 phba->intr_mode = intr_mode;
1816 rc = lpfc_online(phba);
1818 lpfc_unblock_mgmt_io(phba);
1824 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1825 * @phba: pointer to lpfc hba data structure.
1827 * This routine is invoked to handle the SLI4 HBA hardware error attention
1831 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1833 struct lpfc_vport *vport = phba->pport;
1834 uint32_t event_data;
1835 struct Scsi_Host *shost;
1837 struct lpfc_register portstat_reg = {0};
1838 uint32_t reg_err1, reg_err2;
1839 uint32_t uerrlo_reg, uemasklo_reg;
1840 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1841 bool en_rn_msg = true;
1842 struct temp_event temp_event_data;
1843 struct lpfc_register portsmphr_reg;
1846 /* If the pci channel is offline, ignore possible errors, since
1847 * we cannot communicate with the pci card anyway.
1849 if (pci_channel_offline(phba->pcidev)) {
1850 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1851 "3166 pci channel is offline\n");
1852 lpfc_sli4_offline_eratt(phba);
1856 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1857 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1859 case LPFC_SLI_INTF_IF_TYPE_0:
1860 pci_rd_rc1 = lpfc_readl(
1861 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1863 pci_rd_rc2 = lpfc_readl(
1864 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1866 /* consider PCI bus read error as pci_channel_offline */
1867 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1869 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1870 lpfc_sli4_offline_eratt(phba);
1873 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1874 "7623 Checking UE recoverable");
1876 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1877 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1878 &portsmphr_reg.word0))
1881 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1883 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1884 LPFC_PORT_SEM_UE_RECOVERABLE)
1886 /*Sleep for 1Sec, before checking SEMAPHORE */
1890 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1891 "4827 smphr_port_status x%x : Waited %dSec",
1892 smphr_port_status, i);
1894 /* Recoverable UE, reset the HBA device */
1895 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1896 LPFC_PORT_SEM_UE_RECOVERABLE) {
1897 for (i = 0; i < 20; i++) {
1899 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1900 &portsmphr_reg.word0) &&
1901 (LPFC_POST_STAGE_PORT_READY ==
1902 bf_get(lpfc_port_smphr_port_status,
1904 rc = lpfc_sli4_port_sta_fn_reset(phba,
1905 LPFC_MBX_NO_WAIT, en_rn_msg);
1908 lpfc_printf_log(phba,
1910 "4215 Failed to recover UE");
1915 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1916 "7624 Firmware not ready: Failing UE recovery,"
1917 " waited %dSec", i);
1918 lpfc_sli4_offline_eratt(phba);
1921 case LPFC_SLI_INTF_IF_TYPE_2:
1922 case LPFC_SLI_INTF_IF_TYPE_6:
1923 pci_rd_rc1 = lpfc_readl(
1924 phba->sli4_hba.u.if_type2.STATUSregaddr,
1925 &portstat_reg.word0);
1926 /* consider PCI bus read error as pci_channel_offline */
1927 if (pci_rd_rc1 == -EIO) {
1928 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1929 "3151 PCI bus read access failure: x%x\n",
1930 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1931 lpfc_sli4_offline_eratt(phba);
1934 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1935 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1936 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1937 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1938 "2889 Port Overtemperature event, "
1939 "taking port offline Data: x%x x%x\n",
1940 reg_err1, reg_err2);
1942 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
1943 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1944 temp_event_data.event_code = LPFC_CRIT_TEMP;
1945 temp_event_data.data = 0xFFFFFFFF;
1947 shost = lpfc_shost_from_vport(phba->pport);
1948 fc_host_post_vendor_event(shost, fc_get_event_number(),
1949 sizeof(temp_event_data),
1950 (char *)&temp_event_data,
1951 SCSI_NL_VID_TYPE_PCI
1952 | PCI_VENDOR_ID_EMULEX);
1954 spin_lock_irq(&phba->hbalock);
1955 phba->over_temp_state = HBA_OVER_TEMP;
1956 spin_unlock_irq(&phba->hbalock);
1957 lpfc_sli4_offline_eratt(phba);
1960 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1961 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
1962 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1963 "3143 Port Down: Firmware Update "
1966 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1967 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1968 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1969 "3144 Port Down: Debug Dump\n");
1970 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1971 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1973 "3145 Port Down: Provisioning\n");
1975 /* If resets are disabled then leave the HBA alone and return */
1976 if (!phba->cfg_enable_hba_reset)
1979 /* Check port status register for function reset */
1980 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
1983 /* don't report event on forced debug dump */
1984 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1985 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1990 /* fall through for not able to recover */
1991 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1992 "3152 Unrecoverable error, bring the port "
1994 lpfc_sli4_offline_eratt(phba);
1996 case LPFC_SLI_INTF_IF_TYPE_1:
2000 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2001 "3123 Report dump event to upper layer\n");
2002 /* Send an internal error event to mgmt application */
2003 lpfc_board_errevt_to_mgmt(phba);
2005 event_data = FC_REG_DUMP_EVENT;
2006 shost = lpfc_shost_from_vport(vport);
2007 fc_host_post_vendor_event(shost, fc_get_event_number(),
2008 sizeof(event_data), (char *) &event_data,
2009 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2013 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2014 * @phba: pointer to lpfc HBA data structure.
2016 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2017 * routine from the API jump table function pointer from the lpfc_hba struct.
2021 * Any other value - error.
2024 lpfc_handle_eratt(struct lpfc_hba *phba)
2026 (*phba->lpfc_handle_eratt)(phba);
2030 * lpfc_handle_latt - The HBA link event handler
2031 * @phba: pointer to lpfc hba data structure.
2033 * This routine is invoked from the worker thread to handle a HBA host
2034 * attention link event. SLI3 only.
2037 lpfc_handle_latt(struct lpfc_hba *phba)
2039 struct lpfc_vport *vport = phba->pport;
2040 struct lpfc_sli *psli = &phba->sli;
2042 volatile uint32_t control;
2043 struct lpfc_dmabuf *mp;
2046 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2049 goto lpfc_handle_latt_err_exit;
2052 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2055 goto lpfc_handle_latt_free_pmb;
2058 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2061 goto lpfc_handle_latt_free_mp;
2064 /* Cleanup any outstanding ELS commands */
2065 lpfc_els_flush_all_cmd(phba);
2067 psli->slistat.link_event++;
2068 lpfc_read_topology(phba, pmb, mp);
2069 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2071 /* Block ELS IOCBs until we have processed this mbox command */
2072 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2073 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2074 if (rc == MBX_NOT_FINISHED) {
2076 goto lpfc_handle_latt_free_mbuf;
2079 /* Clear Link Attention in HA REG */
2080 spin_lock_irq(&phba->hbalock);
2081 writel(HA_LATT, phba->HAregaddr);
2082 readl(phba->HAregaddr); /* flush */
2083 spin_unlock_irq(&phba->hbalock);
2087 lpfc_handle_latt_free_mbuf:
2088 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2089 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2090 lpfc_handle_latt_free_mp:
2092 lpfc_handle_latt_free_pmb:
2093 mempool_free(pmb, phba->mbox_mem_pool);
2094 lpfc_handle_latt_err_exit:
2095 /* Enable Link attention interrupts */
2096 spin_lock_irq(&phba->hbalock);
2097 psli->sli_flag |= LPFC_PROCESS_LA;
2098 control = readl(phba->HCregaddr);
2099 control |= HC_LAINT_ENA;
2100 writel(control, phba->HCregaddr);
2101 readl(phba->HCregaddr); /* flush */
2103 /* Clear Link Attention in HA REG */
2104 writel(HA_LATT, phba->HAregaddr);
2105 readl(phba->HAregaddr); /* flush */
2106 spin_unlock_irq(&phba->hbalock);
2107 lpfc_linkdown(phba);
2108 phba->link_state = LPFC_HBA_ERROR;
2110 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2111 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2117 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2118 * @phba: pointer to lpfc hba data structure.
2119 * @vpd: pointer to the vital product data.
2120 * @len: length of the vital product data in bytes.
2122 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2123 * an array of characters. In this routine, the ModelName, ProgramType, and
2124 * ModelDesc, etc. fields of the phba data structure will be populated.
2127 * 0 - pointer to the VPD passed in is NULL
2131 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2133 uint8_t lenlo, lenhi;
2143 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2144 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2145 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2147 while (!finished && (index < (len - 4))) {
2148 switch (vpd[index]) {
2156 i = ((((unsigned short)lenhi) << 8) + lenlo);
2165 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2166 if (Length > len - index)
2167 Length = len - index;
2168 while (Length > 0) {
2169 /* Look for Serial Number */
2170 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2177 phba->SerialNumber[j++] = vpd[index++];
2181 phba->SerialNumber[j] = 0;
2184 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2185 phba->vpd_flag |= VPD_MODEL_DESC;
2192 phba->ModelDesc[j++] = vpd[index++];
2196 phba->ModelDesc[j] = 0;
2199 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2200 phba->vpd_flag |= VPD_MODEL_NAME;
2207 phba->ModelName[j++] = vpd[index++];
2211 phba->ModelName[j] = 0;
2214 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2215 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2222 phba->ProgramType[j++] = vpd[index++];
2226 phba->ProgramType[j] = 0;
2229 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2230 phba->vpd_flag |= VPD_PORT;
2237 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2238 (phba->sli4_hba.pport_name_sta ==
2239 LPFC_SLI4_PPNAME_GET)) {
2243 phba->Port[j++] = vpd[index++];
2247 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2248 (phba->sli4_hba.pport_name_sta ==
2249 LPFC_SLI4_PPNAME_NON))
2276 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2277 * @phba: pointer to lpfc hba data structure.
2278 * @mdp: pointer to the data structure to hold the derived model name.
2279 * @descp: pointer to the data structure to hold the derived description.
2281 * This routine retrieves HBA's description based on its registered PCI device
2282 * ID. The @descp passed into this function points to an array of 256 chars. It
2283 * shall be returned with the model name, maximum speed, and the host bus type.
2284 * The @mdp passed into this function points to an array of 80 chars. When the
2285 * function returns, the @mdp will be filled with the model name.
2288 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2291 uint16_t dev_id = phba->pcidev->device;
2294 int oneConnect = 0; /* default is not a oneConnect */
2299 } m = {"<Unknown>", "", ""};
2301 if (mdp && mdp[0] != '\0'
2302 && descp && descp[0] != '\0')
2305 if (phba->lmt & LMT_64Gb)
2307 else if (phba->lmt & LMT_32Gb)
2309 else if (phba->lmt & LMT_16Gb)
2311 else if (phba->lmt & LMT_10Gb)
2313 else if (phba->lmt & LMT_8Gb)
2315 else if (phba->lmt & LMT_4Gb)
2317 else if (phba->lmt & LMT_2Gb)
2319 else if (phba->lmt & LMT_1Gb)
2327 case PCI_DEVICE_ID_FIREFLY:
2328 m = (typeof(m)){"LP6000", "PCI",
2329 "Obsolete, Unsupported Fibre Channel Adapter"};
2331 case PCI_DEVICE_ID_SUPERFLY:
2332 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2333 m = (typeof(m)){"LP7000", "PCI", ""};
2335 m = (typeof(m)){"LP7000E", "PCI", ""};
2336 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2338 case PCI_DEVICE_ID_DRAGONFLY:
2339 m = (typeof(m)){"LP8000", "PCI",
2340 "Obsolete, Unsupported Fibre Channel Adapter"};
2342 case PCI_DEVICE_ID_CENTAUR:
2343 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2344 m = (typeof(m)){"LP9002", "PCI", ""};
2346 m = (typeof(m)){"LP9000", "PCI", ""};
2347 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2349 case PCI_DEVICE_ID_RFLY:
2350 m = (typeof(m)){"LP952", "PCI",
2351 "Obsolete, Unsupported Fibre Channel Adapter"};
2353 case PCI_DEVICE_ID_PEGASUS:
2354 m = (typeof(m)){"LP9802", "PCI-X",
2355 "Obsolete, Unsupported Fibre Channel Adapter"};
2357 case PCI_DEVICE_ID_THOR:
2358 m = (typeof(m)){"LP10000", "PCI-X",
2359 "Obsolete, Unsupported Fibre Channel Adapter"};
2361 case PCI_DEVICE_ID_VIPER:
2362 m = (typeof(m)){"LPX1000", "PCI-X",
2363 "Obsolete, Unsupported Fibre Channel Adapter"};
2365 case PCI_DEVICE_ID_PFLY:
2366 m = (typeof(m)){"LP982", "PCI-X",
2367 "Obsolete, Unsupported Fibre Channel Adapter"};
2369 case PCI_DEVICE_ID_TFLY:
2370 m = (typeof(m)){"LP1050", "PCI-X",
2371 "Obsolete, Unsupported Fibre Channel Adapter"};
2373 case PCI_DEVICE_ID_HELIOS:
2374 m = (typeof(m)){"LP11000", "PCI-X2",
2375 "Obsolete, Unsupported Fibre Channel Adapter"};
2377 case PCI_DEVICE_ID_HELIOS_SCSP:
2378 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2379 "Obsolete, Unsupported Fibre Channel Adapter"};
2381 case PCI_DEVICE_ID_HELIOS_DCSP:
2382 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2383 "Obsolete, Unsupported Fibre Channel Adapter"};
2385 case PCI_DEVICE_ID_NEPTUNE:
2386 m = (typeof(m)){"LPe1000", "PCIe",
2387 "Obsolete, Unsupported Fibre Channel Adapter"};
2389 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2390 m = (typeof(m)){"LPe1000-SP", "PCIe",
2391 "Obsolete, Unsupported Fibre Channel Adapter"};
2393 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2394 m = (typeof(m)){"LPe1002-SP", "PCIe",
2395 "Obsolete, Unsupported Fibre Channel Adapter"};
2397 case PCI_DEVICE_ID_BMID:
2398 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2400 case PCI_DEVICE_ID_BSMB:
2401 m = (typeof(m)){"LP111", "PCI-X2",
2402 "Obsolete, Unsupported Fibre Channel Adapter"};
2404 case PCI_DEVICE_ID_ZEPHYR:
2405 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2407 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2408 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2410 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2411 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2414 case PCI_DEVICE_ID_ZMID:
2415 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2417 case PCI_DEVICE_ID_ZSMB:
2418 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2420 case PCI_DEVICE_ID_LP101:
2421 m = (typeof(m)){"LP101", "PCI-X",
2422 "Obsolete, Unsupported Fibre Channel Adapter"};
2424 case PCI_DEVICE_ID_LP10000S:
2425 m = (typeof(m)){"LP10000-S", "PCI",
2426 "Obsolete, Unsupported Fibre Channel Adapter"};
2428 case PCI_DEVICE_ID_LP11000S:
2429 m = (typeof(m)){"LP11000-S", "PCI-X2",
2430 "Obsolete, Unsupported Fibre Channel Adapter"};
2432 case PCI_DEVICE_ID_LPE11000S:
2433 m = (typeof(m)){"LPe11000-S", "PCIe",
2434 "Obsolete, Unsupported Fibre Channel Adapter"};
2436 case PCI_DEVICE_ID_SAT:
2437 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2439 case PCI_DEVICE_ID_SAT_MID:
2440 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2442 case PCI_DEVICE_ID_SAT_SMB:
2443 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2445 case PCI_DEVICE_ID_SAT_DCSP:
2446 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2448 case PCI_DEVICE_ID_SAT_SCSP:
2449 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2451 case PCI_DEVICE_ID_SAT_S:
2452 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2454 case PCI_DEVICE_ID_HORNET:
2455 m = (typeof(m)){"LP21000", "PCIe",
2456 "Obsolete, Unsupported FCoE Adapter"};
2459 case PCI_DEVICE_ID_PROTEUS_VF:
2460 m = (typeof(m)){"LPev12000", "PCIe IOV",
2461 "Obsolete, Unsupported Fibre Channel Adapter"};
2463 case PCI_DEVICE_ID_PROTEUS_PF:
2464 m = (typeof(m)){"LPev12000", "PCIe IOV",
2465 "Obsolete, Unsupported Fibre Channel Adapter"};
2467 case PCI_DEVICE_ID_PROTEUS_S:
2468 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2469 "Obsolete, Unsupported Fibre Channel Adapter"};
2471 case PCI_DEVICE_ID_TIGERSHARK:
2473 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2475 case PCI_DEVICE_ID_TOMCAT:
2477 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2479 case PCI_DEVICE_ID_FALCON:
2480 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2481 "EmulexSecure Fibre"};
2483 case PCI_DEVICE_ID_BALIUS:
2484 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2485 "Obsolete, Unsupported Fibre Channel Adapter"};
2487 case PCI_DEVICE_ID_LANCER_FC:
2488 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2490 case PCI_DEVICE_ID_LANCER_FC_VF:
2491 m = (typeof(m)){"LPe16000", "PCIe",
2492 "Obsolete, Unsupported Fibre Channel Adapter"};
2494 case PCI_DEVICE_ID_LANCER_FCOE:
2496 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2498 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2500 m = (typeof(m)){"OCe15100", "PCIe",
2501 "Obsolete, Unsupported FCoE"};
2503 case PCI_DEVICE_ID_LANCER_G6_FC:
2504 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2506 case PCI_DEVICE_ID_LANCER_G7_FC:
2507 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2509 case PCI_DEVICE_ID_SKYHAWK:
2510 case PCI_DEVICE_ID_SKYHAWK_VF:
2512 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2515 m = (typeof(m)){"Unknown", "", ""};
2519 if (mdp && mdp[0] == '\0')
2520 snprintf(mdp, 79,"%s", m.name);
2522 * oneConnect hba requires special processing, they are all initiators
2523 * and we put the port number on the end
2525 if (descp && descp[0] == '\0') {
2527 snprintf(descp, 255,
2528 "Emulex OneConnect %s, %s Initiator %s",
2531 else if (max_speed == 0)
2532 snprintf(descp, 255,
2534 m.name, m.bus, m.function);
2536 snprintf(descp, 255,
2537 "Emulex %s %d%s %s %s",
2538 m.name, max_speed, (GE) ? "GE" : "Gb",
2544 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2545 * @phba: pointer to lpfc hba data structure.
2546 * @pring: pointer to a IOCB ring.
2547 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2549 * This routine posts a given number of IOCBs with the associated DMA buffer
2550 * descriptors specified by the cnt argument to the given IOCB ring.
2553 * The number of IOCBs NOT able to be posted to the IOCB ring.
2556 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2559 struct lpfc_iocbq *iocb;
2560 struct lpfc_dmabuf *mp1, *mp2;
2562 cnt += pring->missbufcnt;
2564 /* While there are buffers to post */
2566 /* Allocate buffer for command iocb */
2567 iocb = lpfc_sli_get_iocbq(phba);
2569 pring->missbufcnt = cnt;
2574 /* 2 buffers can be posted per command */
2575 /* Allocate buffer to post */
2576 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2578 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2579 if (!mp1 || !mp1->virt) {
2581 lpfc_sli_release_iocbq(phba, iocb);
2582 pring->missbufcnt = cnt;
2586 INIT_LIST_HEAD(&mp1->list);
2587 /* Allocate buffer to post */
2589 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2591 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2593 if (!mp2 || !mp2->virt) {
2595 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2597 lpfc_sli_release_iocbq(phba, iocb);
2598 pring->missbufcnt = cnt;
2602 INIT_LIST_HEAD(&mp2->list);
2607 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2608 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2609 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2610 icmd->ulpBdeCount = 1;
2613 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2614 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2615 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2617 icmd->ulpBdeCount = 2;
2620 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2623 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2625 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2629 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2633 lpfc_sli_release_iocbq(phba, iocb);
2634 pring->missbufcnt = cnt;
2637 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2639 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2641 pring->missbufcnt = 0;
2646 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2647 * @phba: pointer to lpfc hba data structure.
2649 * This routine posts initial receive IOCB buffers to the ELS ring. The
2650 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2651 * set to 64 IOCBs. SLI3 only.
2654 * 0 - success (currently always success)
2657 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2659 struct lpfc_sli *psli = &phba->sli;
2661 /* Ring 0, ELS / CT buffers */
2662 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2663 /* Ring 2 - FCP no buffers needed */
2668 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2671 * lpfc_sha_init - Set up initial array of hash table entries
2672 * @HashResultPointer: pointer to an array as hash table.
2674 * This routine sets up the initial values to the array of hash table entries
2678 lpfc_sha_init(uint32_t * HashResultPointer)
2680 HashResultPointer[0] = 0x67452301;
2681 HashResultPointer[1] = 0xEFCDAB89;
2682 HashResultPointer[2] = 0x98BADCFE;
2683 HashResultPointer[3] = 0x10325476;
2684 HashResultPointer[4] = 0xC3D2E1F0;
2688 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2689 * @HashResultPointer: pointer to an initial/result hash table.
2690 * @HashWorkingPointer: pointer to an working hash table.
2692 * This routine iterates an initial hash table pointed by @HashResultPointer
2693 * with the values from the working hash table pointeed by @HashWorkingPointer.
2694 * The results are putting back to the initial hash table, returned through
2695 * the @HashResultPointer as the result hash table.
2698 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2702 uint32_t A, B, C, D, E;
2705 HashWorkingPointer[t] =
2707 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2709 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2710 } while (++t <= 79);
2712 A = HashResultPointer[0];
2713 B = HashResultPointer[1];
2714 C = HashResultPointer[2];
2715 D = HashResultPointer[3];
2716 E = HashResultPointer[4];
2720 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2721 } else if (t < 40) {
2722 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2723 } else if (t < 60) {
2724 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2726 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2728 TEMP += S(5, A) + E + HashWorkingPointer[t];
2734 } while (++t <= 79);
2736 HashResultPointer[0] += A;
2737 HashResultPointer[1] += B;
2738 HashResultPointer[2] += C;
2739 HashResultPointer[3] += D;
2740 HashResultPointer[4] += E;
2745 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2746 * @RandomChallenge: pointer to the entry of host challenge random number array.
2747 * @HashWorking: pointer to the entry of the working hash array.
2749 * This routine calculates the working hash array referred by @HashWorking
2750 * from the challenge random numbers associated with the host, referred by
2751 * @RandomChallenge. The result is put into the entry of the working hash
2752 * array and returned by reference through @HashWorking.
2755 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2757 *HashWorking = (*RandomChallenge ^ *HashWorking);
2761 * lpfc_hba_init - Perform special handling for LC HBA initialization
2762 * @phba: pointer to lpfc hba data structure.
2763 * @hbainit: pointer to an array of unsigned 32-bit integers.
2765 * This routine performs the special handling for LC HBA initialization.
2768 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2771 uint32_t *HashWorking;
2772 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2774 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2778 HashWorking[0] = HashWorking[78] = *pwwnn++;
2779 HashWorking[1] = HashWorking[79] = *pwwnn;
2781 for (t = 0; t < 7; t++)
2782 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2784 lpfc_sha_init(hbainit);
2785 lpfc_sha_iterate(hbainit, HashWorking);
2790 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2791 * @vport: pointer to a virtual N_Port data structure.
2793 * This routine performs the necessary cleanups before deleting the @vport.
2794 * It invokes the discovery state machine to perform necessary state
2795 * transitions and to release the ndlps associated with the @vport. Note,
2796 * the physical port is treated as @vport 0.
2799 lpfc_cleanup(struct lpfc_vport *vport)
2801 struct lpfc_hba *phba = vport->phba;
2802 struct lpfc_nodelist *ndlp, *next_ndlp;
2805 if (phba->link_state > LPFC_LINK_DOWN)
2806 lpfc_port_link_failure(vport);
2808 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2809 if (!NLP_CHK_NODE_ACT(ndlp)) {
2810 ndlp = lpfc_enable_node(vport, ndlp,
2811 NLP_STE_UNUSED_NODE);
2814 spin_lock_irq(&phba->ndlp_lock);
2815 NLP_SET_FREE_REQ(ndlp);
2816 spin_unlock_irq(&phba->ndlp_lock);
2817 /* Trigger the release of the ndlp memory */
2821 spin_lock_irq(&phba->ndlp_lock);
2822 if (NLP_CHK_FREE_REQ(ndlp)) {
2823 /* The ndlp should not be in memory free mode already */
2824 spin_unlock_irq(&phba->ndlp_lock);
2827 /* Indicate request for freeing ndlp memory */
2828 NLP_SET_FREE_REQ(ndlp);
2829 spin_unlock_irq(&phba->ndlp_lock);
2831 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2832 ndlp->nlp_DID == Fabric_DID) {
2833 /* Just free up ndlp with Fabric_DID for vports */
2838 /* take care of nodes in unused state before the state
2839 * machine taking action.
2841 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2846 if (ndlp->nlp_type & NLP_FABRIC)
2847 lpfc_disc_state_machine(vport, ndlp, NULL,
2848 NLP_EVT_DEVICE_RECOVERY);
2850 lpfc_disc_state_machine(vport, ndlp, NULL,
2854 /* At this point, ALL ndlp's should be gone
2855 * because of the previous NLP_EVT_DEVICE_RM.
2856 * Lets wait for this to happen, if needed.
2858 while (!list_empty(&vport->fc_nodes)) {
2860 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2861 "0233 Nodelist not empty\n");
2862 list_for_each_entry_safe(ndlp, next_ndlp,
2863 &vport->fc_nodes, nlp_listp) {
2864 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2866 "0282 did:x%x ndlp:x%p "
2867 "usgmap:x%x refcnt:%d\n",
2868 ndlp->nlp_DID, (void *)ndlp,
2870 kref_read(&ndlp->kref));
2875 /* Wait for any activity on ndlps to settle */
2878 lpfc_cleanup_vports_rrqs(vport, NULL);
2882 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2883 * @vport: pointer to a virtual N_Port data structure.
2885 * This routine stops all the timers associated with a @vport. This function
2886 * is invoked before disabling or deleting a @vport. Note that the physical
2887 * port is treated as @vport 0.
2890 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2892 del_timer_sync(&vport->els_tmofunc);
2893 del_timer_sync(&vport->delayed_disc_tmo);
2894 lpfc_can_disctmo(vport);
2899 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2900 * @phba: pointer to lpfc hba data structure.
2902 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2903 * caller of this routine should already hold the host lock.
2906 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2908 /* Clear pending FCF rediscovery wait flag */
2909 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2911 /* Now, try to stop the timer */
2912 del_timer(&phba->fcf.redisc_wait);
2916 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2917 * @phba: pointer to lpfc hba data structure.
2919 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2920 * checks whether the FCF rediscovery wait timer is pending with the host
2921 * lock held before proceeding with disabling the timer and clearing the
2922 * wait timer pendig flag.
2925 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2927 spin_lock_irq(&phba->hbalock);
2928 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2929 /* FCF rediscovery timer already fired or stopped */
2930 spin_unlock_irq(&phba->hbalock);
2933 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2934 /* Clear failover in progress flags */
2935 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2936 spin_unlock_irq(&phba->hbalock);
2940 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2941 * @phba: pointer to lpfc hba data structure.
2943 * This routine stops all the timers associated with a HBA. This function is
2944 * invoked before either putting a HBA offline or unloading the driver.
2947 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2950 lpfc_stop_vport_timers(phba->pport);
2951 cancel_delayed_work_sync(&phba->eq_delay_work);
2952 del_timer_sync(&phba->sli.mbox_tmo);
2953 del_timer_sync(&phba->fabric_block_timer);
2954 del_timer_sync(&phba->eratt_poll);
2955 del_timer_sync(&phba->hb_tmofunc);
2956 if (phba->sli_rev == LPFC_SLI_REV4) {
2957 del_timer_sync(&phba->rrq_tmr);
2958 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2960 phba->hb_outstanding = 0;
2962 switch (phba->pci_dev_grp) {
2963 case LPFC_PCI_DEV_LP:
2964 /* Stop any LightPulse device specific driver timers */
2965 del_timer_sync(&phba->fcp_poll_timer);
2967 case LPFC_PCI_DEV_OC:
2968 /* Stop any OneConnect device specific driver timers */
2969 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2973 "0297 Invalid device group (x%x)\n",
2981 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2982 * @phba: pointer to lpfc hba data structure.
2984 * This routine marks a HBA's management interface as blocked. Once the HBA's
2985 * management interface is marked as blocked, all the user space access to
2986 * the HBA, whether they are from sysfs interface or libdfc interface will
2987 * all be blocked. The HBA is set to block the management interface when the
2988 * driver prepares the HBA interface for online or offline.
2991 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
2993 unsigned long iflag;
2994 uint8_t actcmd = MBX_HEARTBEAT;
2995 unsigned long timeout;
2997 spin_lock_irqsave(&phba->hbalock, iflag);
2998 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2999 spin_unlock_irqrestore(&phba->hbalock, iflag);
3000 if (mbx_action == LPFC_MBX_NO_WAIT)
3002 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3003 spin_lock_irqsave(&phba->hbalock, iflag);
3004 if (phba->sli.mbox_active) {
3005 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3006 /* Determine how long we might wait for the active mailbox
3007 * command to be gracefully completed by firmware.
3009 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3010 phba->sli.mbox_active) * 1000) + jiffies;
3012 spin_unlock_irqrestore(&phba->hbalock, iflag);
3014 /* Wait for the outstnading mailbox command to complete */
3015 while (phba->sli.mbox_active) {
3016 /* Check active mailbox complete status every 2ms */
3018 if (time_after(jiffies, timeout)) {
3019 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3020 "2813 Mgmt IO is Blocked %x "
3021 "- mbox cmd %x still active\n",
3022 phba->sli.sli_flag, actcmd);
3029 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3030 * @phba: pointer to lpfc hba data structure.
3032 * Allocate RPIs for all active remote nodes. This is needed whenever
3033 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3034 * is to fixup the temporary rpi assignments.
3037 lpfc_sli4_node_prep(struct lpfc_hba *phba)
3039 struct lpfc_nodelist *ndlp, *next_ndlp;
3040 struct lpfc_vport **vports;
3042 unsigned long flags;
3044 if (phba->sli_rev != LPFC_SLI_REV4)
3047 vports = lpfc_create_vport_work_array(phba);
3051 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3052 if (vports[i]->load_flag & FC_UNLOADING)
3055 list_for_each_entry_safe(ndlp, next_ndlp,
3056 &vports[i]->fc_nodes,
3058 if (!NLP_CHK_NODE_ACT(ndlp))
3060 rpi = lpfc_sli4_alloc_rpi(phba);
3061 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3062 spin_lock_irqsave(&phba->ndlp_lock, flags);
3063 NLP_CLR_NODE_ACT(ndlp);
3064 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3067 ndlp->nlp_rpi = rpi;
3068 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3069 "0009 rpi:%x DID:%x "
3070 "flg:%x map:%x %p\n", ndlp->nlp_rpi,
3071 ndlp->nlp_DID, ndlp->nlp_flag,
3072 ndlp->nlp_usg_map, ndlp);
3075 lpfc_destroy_vport_work_array(phba, vports);
3079 * lpfc_create_expedite_pool - create expedite pool
3080 * @phba: pointer to lpfc hba data structure.
3082 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3083 * to expedite pool. Mark them as expedite.
3085 static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3087 struct lpfc_sli4_hdw_queue *qp;
3088 struct lpfc_io_buf *lpfc_ncmd;
3089 struct lpfc_io_buf *lpfc_ncmd_next;
3090 struct lpfc_epd_pool *epd_pool;
3091 unsigned long iflag;
3093 epd_pool = &phba->epd_pool;
3094 qp = &phba->sli4_hba.hdwq[0];
3096 spin_lock_init(&epd_pool->lock);
3097 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3098 spin_lock(&epd_pool->lock);
3099 INIT_LIST_HEAD(&epd_pool->list);
3100 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3101 &qp->lpfc_io_buf_list_put, list) {
3102 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3103 lpfc_ncmd->expedite = true;
3106 if (epd_pool->count >= XRI_BATCH)
3109 spin_unlock(&epd_pool->lock);
3110 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3114 * lpfc_destroy_expedite_pool - destroy expedite pool
3115 * @phba: pointer to lpfc hba data structure.
3117 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3118 * of HWQ 0. Clear the mark.
3120 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3122 struct lpfc_sli4_hdw_queue *qp;
3123 struct lpfc_io_buf *lpfc_ncmd;
3124 struct lpfc_io_buf *lpfc_ncmd_next;
3125 struct lpfc_epd_pool *epd_pool;
3126 unsigned long iflag;
3128 epd_pool = &phba->epd_pool;
3129 qp = &phba->sli4_hba.hdwq[0];
3131 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3132 spin_lock(&epd_pool->lock);
3133 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3134 &epd_pool->list, list) {
3135 list_move_tail(&lpfc_ncmd->list,
3136 &qp->lpfc_io_buf_list_put);
3137 lpfc_ncmd->flags = false;
3141 spin_unlock(&epd_pool->lock);
3142 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3146 * lpfc_create_multixri_pools - create multi-XRI pools
3147 * @phba: pointer to lpfc hba data structure.
3149 * This routine initialize public, private per HWQ. Then, move XRIs from
3150 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3153 void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3158 struct lpfc_io_buf *lpfc_ncmd;
3159 struct lpfc_io_buf *lpfc_ncmd_next;
3160 unsigned long iflag;
3161 struct lpfc_sli4_hdw_queue *qp;
3162 struct lpfc_multixri_pool *multixri_pool;
3163 struct lpfc_pbl_pool *pbl_pool;
3164 struct lpfc_pvt_pool *pvt_pool;
3166 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3167 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3168 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3169 phba->sli4_hba.io_xri_cnt);
3171 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3172 lpfc_create_expedite_pool(phba);
3174 hwq_count = phba->cfg_hdw_queue;
3175 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3177 for (i = 0; i < hwq_count; i++) {
3178 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3180 if (!multixri_pool) {
3181 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3182 "1238 Failed to allocate memory for "
3185 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3186 lpfc_destroy_expedite_pool(phba);
3190 qp = &phba->sli4_hba.hdwq[j];
3191 kfree(qp->p_multixri_pool);
3194 phba->cfg_xri_rebalancing = 0;
3198 qp = &phba->sli4_hba.hdwq[i];
3199 qp->p_multixri_pool = multixri_pool;
3201 multixri_pool->xri_limit = count_per_hwq;
3202 multixri_pool->rrb_next_hwqid = i;
3204 /* Deal with public free xri pool */
3205 pbl_pool = &multixri_pool->pbl_pool;
3206 spin_lock_init(&pbl_pool->lock);
3207 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3208 spin_lock(&pbl_pool->lock);
3209 INIT_LIST_HEAD(&pbl_pool->list);
3210 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3211 &qp->lpfc_io_buf_list_put, list) {
3212 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3216 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3217 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3218 pbl_pool->count, i);
3219 spin_unlock(&pbl_pool->lock);
3220 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3222 /* Deal with private free xri pool */
3223 pvt_pool = &multixri_pool->pvt_pool;
3224 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3225 pvt_pool->low_watermark = XRI_BATCH;
3226 spin_lock_init(&pvt_pool->lock);
3227 spin_lock_irqsave(&pvt_pool->lock, iflag);
3228 INIT_LIST_HEAD(&pvt_pool->list);
3229 pvt_pool->count = 0;
3230 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3235 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3236 * @phba: pointer to lpfc hba data structure.
3238 * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3240 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3244 struct lpfc_io_buf *lpfc_ncmd;
3245 struct lpfc_io_buf *lpfc_ncmd_next;
3246 unsigned long iflag;
3247 struct lpfc_sli4_hdw_queue *qp;
3248 struct lpfc_multixri_pool *multixri_pool;
3249 struct lpfc_pbl_pool *pbl_pool;
3250 struct lpfc_pvt_pool *pvt_pool;
3252 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3253 lpfc_destroy_expedite_pool(phba);
3255 if (!(phba->pport->load_flag & FC_UNLOADING)) {
3256 lpfc_sli_flush_fcp_rings(phba);
3258 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3259 lpfc_sli_flush_nvme_rings(phba);
3262 hwq_count = phba->cfg_hdw_queue;
3264 for (i = 0; i < hwq_count; i++) {
3265 qp = &phba->sli4_hba.hdwq[i];
3266 multixri_pool = qp->p_multixri_pool;
3270 qp->p_multixri_pool = NULL;
3272 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3274 /* Deal with public free xri pool */
3275 pbl_pool = &multixri_pool->pbl_pool;
3276 spin_lock(&pbl_pool->lock);
3278 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3279 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3280 pbl_pool->count, i);
3282 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3283 &pbl_pool->list, list) {
3284 list_move_tail(&lpfc_ncmd->list,
3285 &qp->lpfc_io_buf_list_put);
3290 INIT_LIST_HEAD(&pbl_pool->list);
3291 pbl_pool->count = 0;
3293 spin_unlock(&pbl_pool->lock);
3295 /* Deal with private free xri pool */
3296 pvt_pool = &multixri_pool->pvt_pool;
3297 spin_lock(&pvt_pool->lock);
3299 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3300 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3301 pvt_pool->count, i);
3303 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3304 &pvt_pool->list, list) {
3305 list_move_tail(&lpfc_ncmd->list,
3306 &qp->lpfc_io_buf_list_put);
3311 INIT_LIST_HEAD(&pvt_pool->list);
3312 pvt_pool->count = 0;
3314 spin_unlock(&pvt_pool->lock);
3315 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3317 kfree(multixri_pool);
3322 * lpfc_online - Initialize and bring a HBA online
3323 * @phba: pointer to lpfc hba data structure.
3325 * This routine initializes the HBA and brings a HBA online. During this
3326 * process, the management interface is blocked to prevent user space access
3327 * to the HBA interfering with the driver initialization.
3334 lpfc_online(struct lpfc_hba *phba)
3336 struct lpfc_vport *vport;
3337 struct lpfc_vport **vports;
3339 bool vpis_cleared = false;
3343 vport = phba->pport;
3345 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3348 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3349 "0458 Bring Adapter online\n");
3351 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3353 if (phba->sli_rev == LPFC_SLI_REV4) {
3354 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3355 lpfc_unblock_mgmt_io(phba);
3358 spin_lock_irq(&phba->hbalock);
3359 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3360 vpis_cleared = true;
3361 spin_unlock_irq(&phba->hbalock);
3363 /* Reestablish the local initiator port.
3364 * The offline process destroyed the previous lport.
3366 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3367 !phba->nvmet_support) {
3368 error = lpfc_nvme_create_localport(phba->pport);
3370 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3371 "6132 NVME restore reg failed "
3372 "on nvmei error x%x\n", error);
3375 lpfc_sli_queue_init(phba);
3376 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3377 lpfc_unblock_mgmt_io(phba);
3382 vports = lpfc_create_vport_work_array(phba);
3383 if (vports != NULL) {
3384 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3385 struct Scsi_Host *shost;
3386 shost = lpfc_shost_from_vport(vports[i]);
3387 spin_lock_irq(shost->host_lock);
3388 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3389 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3390 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3391 if (phba->sli_rev == LPFC_SLI_REV4) {
3392 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3393 if ((vpis_cleared) &&
3394 (vports[i]->port_type !=
3395 LPFC_PHYSICAL_PORT))
3398 spin_unlock_irq(shost->host_lock);
3401 lpfc_destroy_vport_work_array(phba, vports);
3403 if (phba->cfg_xri_rebalancing)
3404 lpfc_create_multixri_pools(phba);
3406 lpfc_unblock_mgmt_io(phba);
3411 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3412 * @phba: pointer to lpfc hba data structure.
3414 * This routine marks a HBA's management interface as not blocked. Once the
3415 * HBA's management interface is marked as not blocked, all the user space
3416 * access to the HBA, whether they are from sysfs interface or libdfc
3417 * interface will be allowed. The HBA is set to block the management interface
3418 * when the driver prepares the HBA interface for online or offline and then
3419 * set to unblock the management interface afterwards.
3422 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3424 unsigned long iflag;
3426 spin_lock_irqsave(&phba->hbalock, iflag);
3427 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3428 spin_unlock_irqrestore(&phba->hbalock, iflag);
3432 * lpfc_offline_prep - Prepare a HBA to be brought offline
3433 * @phba: pointer to lpfc hba data structure.
3435 * This routine is invoked to prepare a HBA to be brought offline. It performs
3436 * unregistration login to all the nodes on all vports and flushes the mailbox
3437 * queue to make it ready to be brought offline.
3440 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3442 struct lpfc_vport *vport = phba->pport;
3443 struct lpfc_nodelist *ndlp, *next_ndlp;
3444 struct lpfc_vport **vports;
3445 struct Scsi_Host *shost;
3448 if (vport->fc_flag & FC_OFFLINE_MODE)
3451 lpfc_block_mgmt_io(phba, mbx_action);
3453 lpfc_linkdown(phba);
3455 /* Issue an unreg_login to all nodes on all vports */
3456 vports = lpfc_create_vport_work_array(phba);
3457 if (vports != NULL) {
3458 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3459 if (vports[i]->load_flag & FC_UNLOADING)
3461 shost = lpfc_shost_from_vport(vports[i]);
3462 spin_lock_irq(shost->host_lock);
3463 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3464 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3465 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3466 spin_unlock_irq(shost->host_lock);
3468 shost = lpfc_shost_from_vport(vports[i]);
3469 list_for_each_entry_safe(ndlp, next_ndlp,
3470 &vports[i]->fc_nodes,
3472 if (!NLP_CHK_NODE_ACT(ndlp))
3474 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
3476 if (ndlp->nlp_type & NLP_FABRIC) {
3477 lpfc_disc_state_machine(vports[i], ndlp,
3478 NULL, NLP_EVT_DEVICE_RECOVERY);
3479 lpfc_disc_state_machine(vports[i], ndlp,
3480 NULL, NLP_EVT_DEVICE_RM);
3482 spin_lock_irq(shost->host_lock);
3483 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3484 spin_unlock_irq(shost->host_lock);
3486 * Whenever an SLI4 port goes offline, free the
3487 * RPI. Get a new RPI when the adapter port
3488 * comes back online.
3490 if (phba->sli_rev == LPFC_SLI_REV4) {
3491 lpfc_printf_vlog(ndlp->vport,
3492 KERN_INFO, LOG_NODE,
3493 "0011 lpfc_offline: "
3495 "usgmap:x%x rpi:%x\n",
3496 ndlp, ndlp->nlp_DID,
3500 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3502 lpfc_unreg_rpi(vports[i], ndlp);
3506 lpfc_destroy_vport_work_array(phba, vports);
3508 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3511 flush_workqueue(phba->wq);
3515 * lpfc_offline - Bring a HBA offline
3516 * @phba: pointer to lpfc hba data structure.
3518 * This routine actually brings a HBA offline. It stops all the timers
3519 * associated with the HBA, brings down the SLI layer, and eventually
3520 * marks the HBA as in offline state for the upper layer protocol.
3523 lpfc_offline(struct lpfc_hba *phba)
3525 struct Scsi_Host *shost;
3526 struct lpfc_vport **vports;
3529 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3532 /* stop port and all timers associated with this hba */
3533 lpfc_stop_port(phba);
3535 /* Tear down the local and target port registrations. The
3536 * nvme transports need to cleanup.
3538 lpfc_nvmet_destroy_targetport(phba);
3539 lpfc_nvme_destroy_localport(phba->pport);
3541 vports = lpfc_create_vport_work_array(phba);
3543 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3544 lpfc_stop_vport_timers(vports[i]);
3545 lpfc_destroy_vport_work_array(phba, vports);
3546 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3547 "0460 Bring Adapter offline\n");
3548 /* Bring down the SLI Layer and cleanup. The HBA is offline
3550 lpfc_sli_hba_down(phba);
3551 spin_lock_irq(&phba->hbalock);
3553 spin_unlock_irq(&phba->hbalock);
3554 vports = lpfc_create_vport_work_array(phba);
3556 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3557 shost = lpfc_shost_from_vport(vports[i]);
3558 spin_lock_irq(shost->host_lock);
3559 vports[i]->work_port_events = 0;
3560 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3561 spin_unlock_irq(shost->host_lock);
3563 lpfc_destroy_vport_work_array(phba, vports);
3565 if (phba->cfg_xri_rebalancing)
3566 lpfc_destroy_multixri_pools(phba);
3570 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3571 * @phba: pointer to lpfc hba data structure.
3573 * This routine is to free all the SCSI buffers and IOCBs from the driver
3574 * list back to kernel. It is called from lpfc_pci_remove_one to free
3575 * the internal resources before the device is removed from the system.
3578 lpfc_scsi_free(struct lpfc_hba *phba)
3580 struct lpfc_io_buf *sb, *sb_next;
3582 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3585 spin_lock_irq(&phba->hbalock);
3587 /* Release all the lpfc_scsi_bufs maintained by this host. */
3589 spin_lock(&phba->scsi_buf_list_put_lock);
3590 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3592 list_del(&sb->list);
3593 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3596 phba->total_scsi_bufs--;
3598 spin_unlock(&phba->scsi_buf_list_put_lock);
3600 spin_lock(&phba->scsi_buf_list_get_lock);
3601 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3603 list_del(&sb->list);
3604 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3607 phba->total_scsi_bufs--;
3609 spin_unlock(&phba->scsi_buf_list_get_lock);
3610 spin_unlock_irq(&phba->hbalock);
3614 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
3615 * @phba: pointer to lpfc hba data structure.
3617 * This routine is to free all the IO buffers and IOCBs from the driver
3618 * list back to kernel. It is called from lpfc_pci_remove_one to free
3619 * the internal resources before the device is removed from the system.
3622 lpfc_io_free(struct lpfc_hba *phba)
3624 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3625 struct lpfc_sli4_hdw_queue *qp;
3628 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3629 qp = &phba->sli4_hba.hdwq[idx];
3630 /* Release all the lpfc_nvme_bufs maintained by this host. */
3631 spin_lock(&qp->io_buf_list_put_lock);
3632 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3633 &qp->lpfc_io_buf_list_put,
3635 list_del(&lpfc_ncmd->list);
3637 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3638 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3640 qp->total_io_bufs--;
3642 spin_unlock(&qp->io_buf_list_put_lock);
3644 spin_lock(&qp->io_buf_list_get_lock);
3645 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3646 &qp->lpfc_io_buf_list_get,
3648 list_del(&lpfc_ncmd->list);
3650 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3651 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3653 qp->total_io_bufs--;
3655 spin_unlock(&qp->io_buf_list_get_lock);
3660 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
3661 * @phba: pointer to lpfc hba data structure.
3663 * This routine first calculates the sizes of the current els and allocated
3664 * scsi sgl lists, and then goes through all sgls to updates the physical
3665 * XRIs assigned due to port function reset. During port initialization, the
3666 * current els and allocated scsi sgl lists are 0s.
3669 * 0 - successful (for now, it always returns 0)
3672 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3674 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3675 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3676 LIST_HEAD(els_sgl_list);
3680 * update on pci function's els xri-sgl list
3682 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3684 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3685 /* els xri-sgl expanded */
3686 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3687 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3688 "3157 ELS xri-sgl count increased from "
3689 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3691 /* allocate the additional els sgls */
3692 for (i = 0; i < xri_cnt; i++) {
3693 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3695 if (sglq_entry == NULL) {
3696 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3697 "2562 Failure to allocate an "
3698 "ELS sgl entry:%d\n", i);
3702 sglq_entry->buff_type = GEN_BUFF_TYPE;
3703 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3705 if (sglq_entry->virt == NULL) {
3707 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3708 "2563 Failure to allocate an "
3709 "ELS mbuf:%d\n", i);
3713 sglq_entry->sgl = sglq_entry->virt;
3714 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3715 sglq_entry->state = SGL_FREED;
3716 list_add_tail(&sglq_entry->list, &els_sgl_list);
3718 spin_lock_irq(&phba->hbalock);
3719 spin_lock(&phba->sli4_hba.sgl_list_lock);
3720 list_splice_init(&els_sgl_list,
3721 &phba->sli4_hba.lpfc_els_sgl_list);
3722 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3723 spin_unlock_irq(&phba->hbalock);
3724 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3725 /* els xri-sgl shrinked */
3726 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3727 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3728 "3158 ELS xri-sgl count decreased from "
3729 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3731 spin_lock_irq(&phba->hbalock);
3732 spin_lock(&phba->sli4_hba.sgl_list_lock);
3733 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3735 /* release extra els sgls from list */
3736 for (i = 0; i < xri_cnt; i++) {
3737 list_remove_head(&els_sgl_list,
3738 sglq_entry, struct lpfc_sglq, list);
3740 __lpfc_mbuf_free(phba, sglq_entry->virt,
3745 list_splice_init(&els_sgl_list,
3746 &phba->sli4_hba.lpfc_els_sgl_list);
3747 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3748 spin_unlock_irq(&phba->hbalock);
3750 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3751 "3163 ELS xri-sgl count unchanged: %d\n",
3753 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3755 /* update xris to els sgls on the list */
3757 sglq_entry_next = NULL;
3758 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3759 &phba->sli4_hba.lpfc_els_sgl_list, list) {
3760 lxri = lpfc_sli4_next_xritag(phba);
3761 if (lxri == NO_XRI) {
3762 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3763 "2400 Failed to allocate xri for "
3768 sglq_entry->sli4_lxritag = lxri;
3769 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3774 lpfc_free_els_sgl_list(phba);
3779 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
3780 * @phba: pointer to lpfc hba data structure.
3782 * This routine first calculates the sizes of the current els and allocated
3783 * scsi sgl lists, and then goes through all sgls to updates the physical
3784 * XRIs assigned due to port function reset. During port initialization, the
3785 * current els and allocated scsi sgl lists are 0s.
3788 * 0 - successful (for now, it always returns 0)
3791 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3793 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3794 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3795 uint16_t nvmet_xri_cnt;
3796 LIST_HEAD(nvmet_sgl_list);
3800 * update on pci function's nvmet xri-sgl list
3802 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3804 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
3805 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3806 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3807 /* els xri-sgl expanded */
3808 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3809 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3810 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3811 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3812 /* allocate the additional nvmet sgls */
3813 for (i = 0; i < xri_cnt; i++) {
3814 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3816 if (sglq_entry == NULL) {
3817 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3818 "6303 Failure to allocate an "
3819 "NVMET sgl entry:%d\n", i);
3823 sglq_entry->buff_type = NVMET_BUFF_TYPE;
3824 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3826 if (sglq_entry->virt == NULL) {
3828 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3829 "6304 Failure to allocate an "
3830 "NVMET buf:%d\n", i);
3834 sglq_entry->sgl = sglq_entry->virt;
3835 memset(sglq_entry->sgl, 0,
3836 phba->cfg_sg_dma_buf_size);
3837 sglq_entry->state = SGL_FREED;
3838 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3840 spin_lock_irq(&phba->hbalock);
3841 spin_lock(&phba->sli4_hba.sgl_list_lock);
3842 list_splice_init(&nvmet_sgl_list,
3843 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3844 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3845 spin_unlock_irq(&phba->hbalock);
3846 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3847 /* nvmet xri-sgl shrunk */
3848 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3849 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3850 "6305 NVMET xri-sgl count decreased from "
3851 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3853 spin_lock_irq(&phba->hbalock);
3854 spin_lock(&phba->sli4_hba.sgl_list_lock);
3855 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3857 /* release extra nvmet sgls from list */
3858 for (i = 0; i < xri_cnt; i++) {
3859 list_remove_head(&nvmet_sgl_list,
3860 sglq_entry, struct lpfc_sglq, list);
3862 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3867 list_splice_init(&nvmet_sgl_list,
3868 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3869 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3870 spin_unlock_irq(&phba->hbalock);
3872 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3873 "6306 NVMET xri-sgl count unchanged: %d\n",
3875 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3877 /* update xris to nvmet sgls on the list */
3879 sglq_entry_next = NULL;
3880 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3881 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3882 lxri = lpfc_sli4_next_xritag(phba);
3883 if (lxri == NO_XRI) {
3884 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3885 "6307 Failed to allocate xri for "
3890 sglq_entry->sli4_lxritag = lxri;
3891 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3896 lpfc_free_nvmet_sgl_list(phba);
3901 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
3904 struct lpfc_sli4_hdw_queue *qp;
3905 struct lpfc_io_buf *lpfc_cmd;
3906 struct lpfc_io_buf *iobufp, *prev_iobufp;
3907 int idx, cnt, xri, inserted;
3910 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3911 qp = &phba->sli4_hba.hdwq[idx];
3912 spin_lock_irq(&qp->io_buf_list_get_lock);
3913 spin_lock(&qp->io_buf_list_put_lock);
3915 /* Take everything off the get and put lists */
3916 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
3917 list_splice(&qp->lpfc_io_buf_list_put, &blist);
3918 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
3919 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
3920 cnt += qp->get_io_bufs + qp->put_io_bufs;
3921 qp->get_io_bufs = 0;
3922 qp->put_io_bufs = 0;
3923 qp->total_io_bufs = 0;
3924 spin_unlock(&qp->io_buf_list_put_lock);
3925 spin_unlock_irq(&qp->io_buf_list_get_lock);
3929 * Take IO buffers off blist and put on cbuf sorted by XRI.
3930 * This is because POST_SGL takes a sequential range of XRIs
3931 * to post to the firmware.
3933 for (idx = 0; idx < cnt; idx++) {
3934 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
3938 list_add_tail(&lpfc_cmd->list, cbuf);
3941 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
3944 list_for_each_entry(iobufp, cbuf, list) {
3945 if (xri < iobufp->cur_iocbq.sli4_xritag) {
3947 list_add(&lpfc_cmd->list,
3948 &prev_iobufp->list);
3950 list_add(&lpfc_cmd->list, cbuf);
3954 prev_iobufp = iobufp;
3957 list_add_tail(&lpfc_cmd->list, cbuf);
3963 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
3965 struct lpfc_sli4_hdw_queue *qp;
3966 struct lpfc_io_buf *lpfc_cmd;
3969 qp = phba->sli4_hba.hdwq;
3971 while (!list_empty(cbuf)) {
3972 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3973 list_remove_head(cbuf, lpfc_cmd,
3974 struct lpfc_io_buf, list);
3978 qp = &phba->sli4_hba.hdwq[idx];
3979 lpfc_cmd->hdwq_no = idx;
3980 lpfc_cmd->hdwq = qp;
3981 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
3982 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
3983 spin_lock(&qp->io_buf_list_put_lock);
3984 list_add_tail(&lpfc_cmd->list,
3985 &qp->lpfc_io_buf_list_put);
3987 qp->total_io_bufs++;
3988 spin_unlock(&qp->io_buf_list_put_lock);
3995 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
3996 * @phba: pointer to lpfc hba data structure.
3998 * This routine first calculates the sizes of the current els and allocated
3999 * scsi sgl lists, and then goes through all sgls to updates the physical
4000 * XRIs assigned due to port function reset. During port initialization, the
4001 * current els and allocated scsi sgl lists are 0s.
4004 * 0 - successful (for now, it always returns 0)
4007 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4009 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4010 uint16_t i, lxri, els_xri_cnt;
4011 uint16_t io_xri_cnt, io_xri_max;
4012 LIST_HEAD(io_sgl_list);
4016 * update on pci function's allocated nvme xri-sgl list
4019 /* maximum number of xris available for nvme buffers */
4020 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4021 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4022 phba->sli4_hba.io_xri_max = io_xri_max;
4024 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4025 "6074 Current allocated XRI sgl count:%d, "
4026 "maximum XRI count:%d\n",
4027 phba->sli4_hba.io_xri_cnt,
4028 phba->sli4_hba.io_xri_max);
4030 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4032 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4033 /* max nvme xri shrunk below the allocated nvme buffers */
4034 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4035 phba->sli4_hba.io_xri_max;
4036 /* release the extra allocated nvme buffers */
4037 for (i = 0; i < io_xri_cnt; i++) {
4038 list_remove_head(&io_sgl_list, lpfc_ncmd,
4039 struct lpfc_io_buf, list);
4041 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4043 lpfc_ncmd->dma_handle);
4047 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4050 /* update xris associated to remaining allocated nvme buffers */
4052 lpfc_ncmd_next = NULL;
4053 phba->sli4_hba.io_xri_cnt = cnt;
4054 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4055 &io_sgl_list, list) {
4056 lxri = lpfc_sli4_next_xritag(phba);
4057 if (lxri == NO_XRI) {
4058 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4059 "6075 Failed to allocate xri for "
4064 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4065 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4067 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4076 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4077 * @vport: The virtual port for which this call being executed.
4078 * @num_to_allocate: The requested number of buffers to allocate.
4080 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4081 * the nvme buffer contains all the necessary information needed to initiate
4082 * an I/O. After allocating up to @num_to_allocate IO buffers and put
4083 * them on a list, it post them to the port by using SGL block post.
4086 * int - number of IO buffers that were allocated and posted.
4087 * 0 = failure, less than num_to_alloc is a partial failure.
4090 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4092 struct lpfc_io_buf *lpfc_ncmd;
4093 struct lpfc_iocbq *pwqeq;
4094 uint16_t iotag, lxri = 0;
4095 int bcnt, num_posted;
4096 LIST_HEAD(prep_nblist);
4097 LIST_HEAD(post_nblist);
4098 LIST_HEAD(nvme_nblist);
4100 /* Sanity check to ensure our sizing is right for both SCSI and NVME */
4101 if (sizeof(struct lpfc_io_buf) > LPFC_COMMON_IO_BUF_SZ) {
4102 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
4103 "6426 Common buffer size %zd exceeds %d\n",
4104 sizeof(struct lpfc_io_buf),
4105 LPFC_COMMON_IO_BUF_SZ);
4109 phba->sli4_hba.io_xri_cnt = 0;
4110 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4111 lpfc_ncmd = kzalloc(LPFC_COMMON_IO_BUF_SZ, GFP_KERNEL);
4115 * Get memory from the pci pool to map the virt space to
4116 * pci bus space for an I/O. The DMA buffer includes the
4117 * number of SGE's necessary to support the sg_tablesize.
4119 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4121 &lpfc_ncmd->dma_handle);
4122 if (!lpfc_ncmd->data) {
4128 * 4K Page alignment is CRITICAL to BlockGuard, double check
4131 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4132 (((unsigned long)(lpfc_ncmd->data) &
4133 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4134 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
4135 "3369 Memory alignment err: addr=%lx\n",
4136 (unsigned long)lpfc_ncmd->data);
4137 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4138 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4143 lxri = lpfc_sli4_next_xritag(phba);
4144 if (lxri == NO_XRI) {
4145 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4146 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4150 pwqeq = &lpfc_ncmd->cur_iocbq;
4152 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4153 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4155 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4156 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4158 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
4159 "6121 Failed to allocate IOTAG for"
4160 " XRI:0x%x\n", lxri);
4161 lpfc_sli4_free_xri(phba, lxri);
4164 pwqeq->sli4_lxritag = lxri;
4165 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4166 pwqeq->context1 = lpfc_ncmd;
4168 /* Initialize local short-hand pointers. */
4169 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4170 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4171 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4172 spin_lock_init(&lpfc_ncmd->buf_lock);
4174 /* add the nvme buffer to a post list */
4175 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4176 phba->sli4_hba.io_xri_cnt++;
4178 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4179 "6114 Allocate %d out of %d requested new NVME "
4180 "buffers\n", bcnt, num_to_alloc);
4182 /* post the list of nvme buffer sgls to port if available */
4183 if (!list_empty(&post_nblist))
4184 num_posted = lpfc_sli4_post_io_sgl_list(
4185 phba, &post_nblist, bcnt);
4193 lpfc_get_wwpn(struct lpfc_hba *phba)
4197 LPFC_MBOXQ_t *mboxq;
4200 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4203 return (uint64_t)-1;
4205 /* First get WWN of HBA instance */
4206 lpfc_read_nv(phba, mboxq);
4207 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4208 if (rc != MBX_SUCCESS) {
4209 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4210 "6019 Mailbox failed , mbxCmd x%x "
4211 "READ_NV, mbxStatus x%x\n",
4212 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4213 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4214 mempool_free(mboxq, phba->mbox_mem_pool);
4215 return (uint64_t) -1;
4218 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4219 /* wwn is WWPN of HBA instance */
4220 mempool_free(mboxq, phba->mbox_mem_pool);
4221 if (phba->sli_rev == LPFC_SLI_REV4)
4222 return be64_to_cpu(wwn);
4224 return rol64(wwn, 32);
4228 * lpfc_create_port - Create an FC port
4229 * @phba: pointer to lpfc hba data structure.
4230 * @instance: a unique integer ID to this FC port.
4231 * @dev: pointer to the device data structure.
4233 * This routine creates a FC port for the upper layer protocol. The FC port
4234 * can be created on top of either a physical port or a virtual port provided
4235 * by the HBA. This routine also allocates a SCSI host data structure (shost)
4236 * and associates the FC port created before adding the shost into the SCSI
4240 * @vport - pointer to the virtual N_Port data structure.
4241 * NULL - port create failed.
4244 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4246 struct lpfc_vport *vport;
4247 struct Scsi_Host *shost = NULL;
4251 bool use_no_reset_hba = false;
4254 if (lpfc_no_hba_reset_cnt) {
4255 if (phba->sli_rev < LPFC_SLI_REV4 &&
4256 dev == &phba->pcidev->dev) {
4257 /* Reset the port first */
4258 lpfc_sli_brdrestart(phba);
4259 rc = lpfc_sli_chipset_init(phba);
4263 wwn = lpfc_get_wwpn(phba);
4266 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4267 if (wwn == lpfc_no_hba_reset[i]) {
4268 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4269 "6020 Setting use_no_reset port=%llx\n",
4271 use_no_reset_hba = true;
4276 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4277 if (dev != &phba->pcidev->dev) {
4278 shost = scsi_host_alloc(&lpfc_vport_template,
4279 sizeof(struct lpfc_vport));
4281 if (!use_no_reset_hba)
4282 shost = scsi_host_alloc(&lpfc_template,
4283 sizeof(struct lpfc_vport));
4285 shost = scsi_host_alloc(&lpfc_template_no_hr,
4286 sizeof(struct lpfc_vport));
4288 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
4289 shost = scsi_host_alloc(&lpfc_template_nvme,
4290 sizeof(struct lpfc_vport));
4295 vport = (struct lpfc_vport *) shost->hostdata;
4297 vport->load_flag |= FC_LOADING;
4298 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4299 vport->fc_rscn_flush = 0;
4300 lpfc_get_vport_cfgparam(vport);
4302 /* Adjust value in vport */
4303 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4305 shost->unique_id = instance;
4306 shost->max_id = LPFC_MAX_TARGET;
4307 shost->max_lun = vport->cfg_max_luns;
4308 shost->this_id = -1;
4309 shost->max_cmd_len = 16;
4311 if (phba->sli_rev == LPFC_SLI_REV4) {
4312 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
4313 shost->nr_hw_queues = phba->cfg_hdw_queue;
4315 shost->nr_hw_queues = phba->sli4_hba.num_present_cpu;
4317 shost->dma_boundary =
4318 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4319 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4321 /* SLI-3 has a limited number of hardware queues (3),
4322 * thus there is only one for FCP processing.
4324 shost->nr_hw_queues = 1;
4327 * Set initial can_queue value since 0 is no longer supported and
4328 * scsi_add_host will fail. This will be adjusted later based on the
4329 * max xri value determined in hba setup.
4331 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4332 if (dev != &phba->pcidev->dev) {
4333 shost->transportt = lpfc_vport_transport_template;
4334 vport->port_type = LPFC_NPIV_PORT;
4336 shost->transportt = lpfc_transport_template;
4337 vport->port_type = LPFC_PHYSICAL_PORT;
4340 /* Initialize all internally managed lists. */
4341 INIT_LIST_HEAD(&vport->fc_nodes);
4342 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4343 spin_lock_init(&vport->work_port_lock);
4345 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4347 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4349 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4351 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4352 lpfc_setup_bg(phba, shost);
4354 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4358 spin_lock_irq(&phba->port_list_lock);
4359 list_add_tail(&vport->listentry, &phba->port_list);
4360 spin_unlock_irq(&phba->port_list_lock);
4364 scsi_host_put(shost);
4370 * destroy_port - destroy an FC port
4371 * @vport: pointer to an lpfc virtual N_Port data structure.
4373 * This routine destroys a FC port from the upper layer protocol. All the
4374 * resources associated with the port are released.
4377 destroy_port(struct lpfc_vport *vport)
4379 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4380 struct lpfc_hba *phba = vport->phba;
4382 lpfc_debugfs_terminate(vport);
4383 fc_remove_host(shost);
4384 scsi_remove_host(shost);
4386 spin_lock_irq(&phba->port_list_lock);
4387 list_del_init(&vport->listentry);
4388 spin_unlock_irq(&phba->port_list_lock);
4390 lpfc_cleanup(vport);
4395 * lpfc_get_instance - Get a unique integer ID
4397 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4398 * uses the kernel idr facility to perform the task.
4401 * instance - a unique integer ID allocated as the new instance.
4402 * -1 - lpfc get instance failed.
4405 lpfc_get_instance(void)
4409 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4410 return ret < 0 ? -1 : ret;
4414 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4415 * @shost: pointer to SCSI host data structure.
4416 * @time: elapsed time of the scan in jiffies.
4418 * This routine is called by the SCSI layer with a SCSI host to determine
4419 * whether the scan host is finished.
4421 * Note: there is no scan_start function as adapter initialization will have
4422 * asynchronously kicked off the link initialization.
4425 * 0 - SCSI host scan is not over yet.
4426 * 1 - SCSI host scan is over.
4428 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4430 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4431 struct lpfc_hba *phba = vport->phba;
4434 spin_lock_irq(shost->host_lock);
4436 if (vport->load_flag & FC_UNLOADING) {
4440 if (time >= msecs_to_jiffies(30 * 1000)) {
4441 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4442 "0461 Scanning longer than 30 "
4443 "seconds. Continuing initialization\n");
4447 if (time >= msecs_to_jiffies(15 * 1000) &&
4448 phba->link_state <= LPFC_LINK_DOWN) {
4449 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4450 "0465 Link down longer than 15 "
4451 "seconds. Continuing initialization\n");
4456 if (vport->port_state != LPFC_VPORT_READY)
4458 if (vport->num_disc_nodes || vport->fc_prli_sent)
4460 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4462 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4468 spin_unlock_irq(shost->host_lock);
4472 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4474 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4475 struct lpfc_hba *phba = vport->phba;
4477 fc_host_supported_speeds(shost) = 0;
4478 if (phba->lmt & LMT_128Gb)
4479 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4480 if (phba->lmt & LMT_64Gb)
4481 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4482 if (phba->lmt & LMT_32Gb)
4483 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4484 if (phba->lmt & LMT_16Gb)
4485 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4486 if (phba->lmt & LMT_10Gb)
4487 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4488 if (phba->lmt & LMT_8Gb)
4489 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4490 if (phba->lmt & LMT_4Gb)
4491 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4492 if (phba->lmt & LMT_2Gb)
4493 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4494 if (phba->lmt & LMT_1Gb)
4495 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4499 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4500 * @shost: pointer to SCSI host data structure.
4502 * This routine initializes a given SCSI host attributes on a FC port. The
4503 * SCSI host can be either on top of a physical port or a virtual port.
4505 void lpfc_host_attrib_init(struct Scsi_Host *shost)
4507 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4508 struct lpfc_hba *phba = vport->phba;
4510 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
4513 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4514 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4515 fc_host_supported_classes(shost) = FC_COS_CLASS3;
4517 memset(fc_host_supported_fc4s(shost), 0,
4518 sizeof(fc_host_supported_fc4s(shost)));
4519 fc_host_supported_fc4s(shost)[2] = 1;
4520 fc_host_supported_fc4s(shost)[7] = 1;
4522 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4523 sizeof fc_host_symbolic_name(shost));
4525 lpfc_host_supported_speeds_set(shost);
4527 fc_host_maxframe_size(shost) =
4528 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4529 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4531 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4533 /* This value is also unchanging */
4534 memset(fc_host_active_fc4s(shost), 0,
4535 sizeof(fc_host_active_fc4s(shost)));
4536 fc_host_active_fc4s(shost)[2] = 1;
4537 fc_host_active_fc4s(shost)[7] = 1;
4539 fc_host_max_npiv_vports(shost) = phba->max_vpi;
4540 spin_lock_irq(shost->host_lock);
4541 vport->load_flag &= ~FC_LOADING;
4542 spin_unlock_irq(shost->host_lock);
4546 * lpfc_stop_port_s3 - Stop SLI3 device port
4547 * @phba: pointer to lpfc hba data structure.
4549 * This routine is invoked to stop an SLI3 device port, it stops the device
4550 * from generating interrupts and stops the device driver's timers for the
4554 lpfc_stop_port_s3(struct lpfc_hba *phba)
4556 /* Clear all interrupt enable conditions */
4557 writel(0, phba->HCregaddr);
4558 readl(phba->HCregaddr); /* flush */
4559 /* Clear all pending interrupts */
4560 writel(0xffffffff, phba->HAregaddr);
4561 readl(phba->HAregaddr); /* flush */
4563 /* Reset some HBA SLI setup states */
4564 lpfc_stop_hba_timers(phba);
4565 phba->pport->work_port_events = 0;
4569 * lpfc_stop_port_s4 - Stop SLI4 device port
4570 * @phba: pointer to lpfc hba data structure.
4572 * This routine is invoked to stop an SLI4 device port, it stops the device
4573 * from generating interrupts and stops the device driver's timers for the
4577 lpfc_stop_port_s4(struct lpfc_hba *phba)
4579 /* Reset some HBA SLI4 setup states */
4580 lpfc_stop_hba_timers(phba);
4582 phba->pport->work_port_events = 0;
4583 phba->sli4_hba.intr_enable = 0;
4587 * lpfc_stop_port - Wrapper function for stopping hba port
4588 * @phba: Pointer to HBA context object.
4590 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4591 * the API jump table function pointer from the lpfc_hba struct.
4594 lpfc_stop_port(struct lpfc_hba *phba)
4596 phba->lpfc_stop_port(phba);
4599 flush_workqueue(phba->wq);
4603 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4604 * @phba: Pointer to hba for which this call is being executed.
4606 * This routine starts the timer waiting for the FCF rediscovery to complete.
4609 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4611 unsigned long fcf_redisc_wait_tmo =
4612 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4613 /* Start fcf rediscovery wait period timer */
4614 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4615 spin_lock_irq(&phba->hbalock);
4616 /* Allow action to new fcf asynchronous event */
4617 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4618 /* Mark the FCF rediscovery pending state */
4619 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4620 spin_unlock_irq(&phba->hbalock);
4624 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
4625 * @ptr: Map to lpfc_hba data structure pointer.
4627 * This routine is invoked when waiting for FCF table rediscover has been
4628 * timed out. If new FCF record(s) has (have) been discovered during the
4629 * wait period, a new FCF event shall be added to the FCOE async event
4630 * list, and then worker thread shall be waked up for processing from the
4631 * worker thread context.
4634 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4636 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4638 /* Don't send FCF rediscovery event if timer cancelled */
4639 spin_lock_irq(&phba->hbalock);
4640 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4641 spin_unlock_irq(&phba->hbalock);
4644 /* Clear FCF rediscovery timer pending flag */
4645 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4646 /* FCF rediscovery event to worker thread */
4647 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4648 spin_unlock_irq(&phba->hbalock);
4649 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4650 "2776 FCF rediscover quiescent timer expired\n");
4651 /* wake up worker thread */
4652 lpfc_worker_wake_up(phba);
4656 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
4657 * @phba: pointer to lpfc hba data structure.
4658 * @acqe_link: pointer to the async link completion queue entry.
4660 * This routine is to parse the SLI4 link-attention link fault code.
4663 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4664 struct lpfc_acqe_link *acqe_link)
4666 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4667 case LPFC_ASYNC_LINK_FAULT_NONE:
4668 case LPFC_ASYNC_LINK_FAULT_LOCAL:
4669 case LPFC_ASYNC_LINK_FAULT_REMOTE:
4670 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
4673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4674 "0398 Unknown link fault code: x%x\n",
4675 bf_get(lpfc_acqe_link_fault, acqe_link));
4681 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
4682 * @phba: pointer to lpfc hba data structure.
4683 * @acqe_link: pointer to the async link completion queue entry.
4685 * This routine is to parse the SLI4 link attention type and translate it
4686 * into the base driver's link attention type coding.
4688 * Return: Link attention type in terms of base driver's coding.
4691 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4692 struct lpfc_acqe_link *acqe_link)
4696 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4697 case LPFC_ASYNC_LINK_STATUS_DOWN:
4698 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
4699 att_type = LPFC_ATT_LINK_DOWN;
4701 case LPFC_ASYNC_LINK_STATUS_UP:
4702 /* Ignore physical link up events - wait for logical link up */
4703 att_type = LPFC_ATT_RESERVED;
4705 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
4706 att_type = LPFC_ATT_LINK_UP;
4709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4710 "0399 Invalid link attention type: x%x\n",
4711 bf_get(lpfc_acqe_link_status, acqe_link));
4712 att_type = LPFC_ATT_RESERVED;
4719 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
4720 * @phba: pointer to lpfc hba data structure.
4722 * This routine is to get an SLI3 FC port's link speed in Mbps.
4724 * Return: link speed in terms of Mbps.
4727 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4729 uint32_t link_speed;
4731 if (!lpfc_is_link_up(phba))
4734 if (phba->sli_rev <= LPFC_SLI_REV3) {
4735 switch (phba->fc_linkspeed) {
4736 case LPFC_LINK_SPEED_1GHZ:
4739 case LPFC_LINK_SPEED_2GHZ:
4742 case LPFC_LINK_SPEED_4GHZ:
4745 case LPFC_LINK_SPEED_8GHZ:
4748 case LPFC_LINK_SPEED_10GHZ:
4751 case LPFC_LINK_SPEED_16GHZ:
4758 if (phba->sli4_hba.link_state.logical_speed)
4760 phba->sli4_hba.link_state.logical_speed;
4762 link_speed = phba->sli4_hba.link_state.speed;
4768 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
4769 * @phba: pointer to lpfc hba data structure.
4770 * @evt_code: asynchronous event code.
4771 * @speed_code: asynchronous event link speed code.
4773 * This routine is to parse the giving SLI4 async event link speed code into
4774 * value of Mbps for the link speed.
4776 * Return: link speed in terms of Mbps.
4779 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
4782 uint32_t port_speed;
4785 case LPFC_TRAILER_CODE_LINK:
4786 switch (speed_code) {
4787 case LPFC_ASYNC_LINK_SPEED_ZERO:
4790 case LPFC_ASYNC_LINK_SPEED_10MBPS:
4793 case LPFC_ASYNC_LINK_SPEED_100MBPS:
4796 case LPFC_ASYNC_LINK_SPEED_1GBPS:
4799 case LPFC_ASYNC_LINK_SPEED_10GBPS:
4802 case LPFC_ASYNC_LINK_SPEED_20GBPS:
4805 case LPFC_ASYNC_LINK_SPEED_25GBPS:
4808 case LPFC_ASYNC_LINK_SPEED_40GBPS:
4815 case LPFC_TRAILER_CODE_FC:
4816 switch (speed_code) {
4817 case LPFC_FC_LA_SPEED_UNKNOWN:
4820 case LPFC_FC_LA_SPEED_1G:
4823 case LPFC_FC_LA_SPEED_2G:
4826 case LPFC_FC_LA_SPEED_4G:
4829 case LPFC_FC_LA_SPEED_8G:
4832 case LPFC_FC_LA_SPEED_10G:
4835 case LPFC_FC_LA_SPEED_16G:
4838 case LPFC_FC_LA_SPEED_32G:
4841 case LPFC_FC_LA_SPEED_64G:
4844 case LPFC_FC_LA_SPEED_128G:
4845 port_speed = 128000;
4858 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
4859 * @phba: pointer to lpfc hba data structure.
4860 * @acqe_link: pointer to the async link completion queue entry.
4862 * This routine is to handle the SLI4 asynchronous FCoE link event.
4865 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
4866 struct lpfc_acqe_link *acqe_link)
4868 struct lpfc_dmabuf *mp;
4871 struct lpfc_mbx_read_top *la;
4875 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
4876 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
4878 phba->fcoe_eventtag = acqe_link->event_tag;
4879 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4881 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4882 "0395 The mboxq allocation failed\n");
4885 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4887 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4888 "0396 The lpfc_dmabuf allocation failed\n");
4891 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4893 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4894 "0397 The mbuf allocation failed\n");
4895 goto out_free_dmabuf;
4898 /* Cleanup any outstanding ELS commands */
4899 lpfc_els_flush_all_cmd(phba);
4901 /* Block ELS IOCBs until we have done process link event */
4902 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
4904 /* Update link event statistics */
4905 phba->sli.slistat.link_event++;
4907 /* Create lpfc_handle_latt mailbox command from link ACQE */
4908 lpfc_read_topology(phba, pmb, mp);
4909 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
4910 pmb->vport = phba->pport;
4912 /* Keep the link status for extra SLI4 state machine reference */
4913 phba->sli4_hba.link_state.speed =
4914 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
4915 bf_get(lpfc_acqe_link_speed, acqe_link));
4916 phba->sli4_hba.link_state.duplex =
4917 bf_get(lpfc_acqe_link_duplex, acqe_link);
4918 phba->sli4_hba.link_state.status =
4919 bf_get(lpfc_acqe_link_status, acqe_link);
4920 phba->sli4_hba.link_state.type =
4921 bf_get(lpfc_acqe_link_type, acqe_link);
4922 phba->sli4_hba.link_state.number =
4923 bf_get(lpfc_acqe_link_number, acqe_link);
4924 phba->sli4_hba.link_state.fault =
4925 bf_get(lpfc_acqe_link_fault, acqe_link);
4926 phba->sli4_hba.link_state.logical_speed =
4927 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
4929 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4930 "2900 Async FC/FCoE Link event - Speed:%dGBit "
4931 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
4932 "Logical speed:%dMbps Fault:%d\n",
4933 phba->sli4_hba.link_state.speed,
4934 phba->sli4_hba.link_state.topology,
4935 phba->sli4_hba.link_state.status,
4936 phba->sli4_hba.link_state.type,
4937 phba->sli4_hba.link_state.number,
4938 phba->sli4_hba.link_state.logical_speed,
4939 phba->sli4_hba.link_state.fault);
4941 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
4942 * topology info. Note: Optional for non FC-AL ports.
4944 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
4945 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4946 if (rc == MBX_NOT_FINISHED)
4947 goto out_free_dmabuf;
4951 * For FCoE Mode: fill in all the topology information we need and call
4952 * the READ_TOPOLOGY completion routine to continue without actually
4953 * sending the READ_TOPOLOGY mailbox command to the port.
4955 /* Initialize completion status */
4957 mb->mbxStatus = MBX_SUCCESS;
4959 /* Parse port fault information field */
4960 lpfc_sli4_parse_latt_fault(phba, acqe_link);
4962 /* Parse and translate link attention fields */
4963 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
4964 la->eventTag = acqe_link->event_tag;
4965 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
4966 bf_set(lpfc_mbx_read_top_link_spd, la,
4967 (bf_get(lpfc_acqe_link_speed, acqe_link)));
4969 /* Fake the the following irrelvant fields */
4970 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
4971 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
4972 bf_set(lpfc_mbx_read_top_il, la, 0);
4973 bf_set(lpfc_mbx_read_top_pb, la, 0);
4974 bf_set(lpfc_mbx_read_top_fa, la, 0);
4975 bf_set(lpfc_mbx_read_top_mm, la, 0);
4977 /* Invoke the lpfc_handle_latt mailbox command callback function */
4978 lpfc_mbx_cmpl_read_topology(phba, pmb);
4985 mempool_free(pmb, phba->mbox_mem_pool);
4989 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
4991 * @phba: pointer to lpfc hba data structure.
4992 * @evt_code: asynchronous event code.
4993 * @speed_code: asynchronous event link speed code.
4995 * This routine is to parse the giving SLI4 async event link speed code into
4996 * value of Read topology link speed.
4998 * Return: link speed in terms of Read topology.
5001 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5005 switch (speed_code) {
5006 case LPFC_FC_LA_SPEED_1G:
5007 port_speed = LPFC_LINK_SPEED_1GHZ;
5009 case LPFC_FC_LA_SPEED_2G:
5010 port_speed = LPFC_LINK_SPEED_2GHZ;
5012 case LPFC_FC_LA_SPEED_4G:
5013 port_speed = LPFC_LINK_SPEED_4GHZ;
5015 case LPFC_FC_LA_SPEED_8G:
5016 port_speed = LPFC_LINK_SPEED_8GHZ;
5018 case LPFC_FC_LA_SPEED_16G:
5019 port_speed = LPFC_LINK_SPEED_16GHZ;
5021 case LPFC_FC_LA_SPEED_32G:
5022 port_speed = LPFC_LINK_SPEED_32GHZ;
5024 case LPFC_FC_LA_SPEED_64G:
5025 port_speed = LPFC_LINK_SPEED_64GHZ;
5027 case LPFC_FC_LA_SPEED_128G:
5028 port_speed = LPFC_LINK_SPEED_128GHZ;
5030 case LPFC_FC_LA_SPEED_256G:
5031 port_speed = LPFC_LINK_SPEED_256GHZ;
5041 #define trunk_link_status(__idx)\
5042 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5043 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
5044 "Link up" : "Link down") : "NA"
5045 /* Did port __idx reported an error */
5046 #define trunk_port_fault(__idx)\
5047 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5048 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
5051 lpfc_update_trunk_link_status(struct lpfc_hba *phba,
5052 struct lpfc_acqe_fc_la *acqe_fc)
5054 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
5055 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
5057 phba->sli4_hba.link_state.speed =
5058 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5059 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5061 phba->sli4_hba.link_state.logical_speed =
5062 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5063 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
5064 phba->fc_linkspeed =
5065 lpfc_async_link_speed_to_read_top(
5067 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5069 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
5070 phba->trunk_link.link0.state =
5071 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
5072 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5073 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
5075 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
5076 phba->trunk_link.link1.state =
5077 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
5078 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5079 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
5081 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
5082 phba->trunk_link.link2.state =
5083 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
5084 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5085 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
5087 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
5088 phba->trunk_link.link3.state =
5089 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
5090 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5091 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
5094 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5095 "2910 Async FC Trunking Event - Speed:%d\n"
5096 "\tLogical speed:%d "
5097 "port0: %s port1: %s port2: %s port3: %s\n",
5098 phba->sli4_hba.link_state.speed,
5099 phba->sli4_hba.link_state.logical_speed,
5100 trunk_link_status(0), trunk_link_status(1),
5101 trunk_link_status(2), trunk_link_status(3));
5104 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5105 "3202 trunk error:0x%x (%s) seen on port0:%s "
5107 * SLI-4: We have only 0xA error codes
5108 * defined as of now. print an appropriate
5109 * message in case driver needs to be updated.
5111 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
5112 "UNDEFINED. update driver." : trunk_errmsg[err],
5113 trunk_port_fault(0), trunk_port_fault(1),
5114 trunk_port_fault(2), trunk_port_fault(3));
5119 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
5120 * @phba: pointer to lpfc hba data structure.
5121 * @acqe_fc: pointer to the async fc completion queue entry.
5123 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
5124 * that the event was received and then issue a read_topology mailbox command so
5125 * that the rest of the driver will treat it the same as SLI3.
5128 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
5130 struct lpfc_dmabuf *mp;
5133 struct lpfc_mbx_read_top *la;
5136 if (bf_get(lpfc_trailer_type, acqe_fc) !=
5137 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
5138 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5139 "2895 Non FC link Event detected.(%d)\n",
5140 bf_get(lpfc_trailer_type, acqe_fc));
5144 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5145 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
5146 lpfc_update_trunk_link_status(phba, acqe_fc);
5150 /* Keep the link status for extra SLI4 state machine reference */
5151 phba->sli4_hba.link_state.speed =
5152 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5153 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5154 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
5155 phba->sli4_hba.link_state.topology =
5156 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
5157 phba->sli4_hba.link_state.status =
5158 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
5159 phba->sli4_hba.link_state.type =
5160 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
5161 phba->sli4_hba.link_state.number =
5162 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
5163 phba->sli4_hba.link_state.fault =
5164 bf_get(lpfc_acqe_link_fault, acqe_fc);
5166 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5167 LPFC_FC_LA_TYPE_LINK_DOWN)
5168 phba->sli4_hba.link_state.logical_speed = 0;
5169 else if (!phba->sli4_hba.conf_trunk)
5170 phba->sli4_hba.link_state.logical_speed =
5171 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5173 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5174 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
5175 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
5176 "%dMbps Fault:%d\n",
5177 phba->sli4_hba.link_state.speed,
5178 phba->sli4_hba.link_state.topology,
5179 phba->sli4_hba.link_state.status,
5180 phba->sli4_hba.link_state.type,
5181 phba->sli4_hba.link_state.number,
5182 phba->sli4_hba.link_state.logical_speed,
5183 phba->sli4_hba.link_state.fault);
5184 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5186 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5187 "2897 The mboxq allocation failed\n");
5190 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5192 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5193 "2898 The lpfc_dmabuf allocation failed\n");
5196 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5198 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5199 "2899 The mbuf allocation failed\n");
5200 goto out_free_dmabuf;
5203 /* Cleanup any outstanding ELS commands */
5204 lpfc_els_flush_all_cmd(phba);
5206 /* Block ELS IOCBs until we have done process link event */
5207 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5209 /* Update link event statistics */
5210 phba->sli.slistat.link_event++;
5212 /* Create lpfc_handle_latt mailbox command from link ACQE */
5213 lpfc_read_topology(phba, pmb, mp);
5214 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5215 pmb->vport = phba->pport;
5217 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
5218 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
5220 switch (phba->sli4_hba.link_state.status) {
5221 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
5222 phba->link_flag |= LS_MDS_LINK_DOWN;
5224 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
5225 phba->link_flag |= LS_MDS_LOOPBACK;
5231 /* Initialize completion status */
5233 mb->mbxStatus = MBX_SUCCESS;
5235 /* Parse port fault information field */
5236 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
5238 /* Parse and translate link attention fields */
5239 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
5240 la->eventTag = acqe_fc->event_tag;
5242 if (phba->sli4_hba.link_state.status ==
5243 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
5244 bf_set(lpfc_mbx_read_top_att_type, la,
5245 LPFC_FC_LA_TYPE_UNEXP_WWPN);
5247 bf_set(lpfc_mbx_read_top_att_type, la,
5248 LPFC_FC_LA_TYPE_LINK_DOWN);
5250 /* Invoke the mailbox command callback function */
5251 lpfc_mbx_cmpl_read_topology(phba, pmb);
5256 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5257 if (rc == MBX_NOT_FINISHED)
5258 goto out_free_dmabuf;
5264 mempool_free(pmb, phba->mbox_mem_pool);
5268 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
5269 * @phba: pointer to lpfc hba data structure.
5270 * @acqe_fc: pointer to the async SLI completion queue entry.
5272 * This routine is to handle the SLI4 asynchronous SLI events.
5275 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
5281 uint8_t operational = 0;
5282 struct temp_event temp_event_data;
5283 struct lpfc_acqe_misconfigured_event *misconfigured;
5284 struct Scsi_Host *shost;
5285 struct lpfc_vport **vports;
5288 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
5290 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5291 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
5292 "x%08x SLI Event Type:%d\n",
5293 acqe_sli->event_data1, acqe_sli->event_data2,
5296 port_name = phba->Port[0];
5297 if (port_name == 0x00)
5298 port_name = '?'; /* get port name is empty */
5301 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
5302 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5303 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
5304 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5306 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5307 "3190 Over Temperature:%d Celsius- Port Name %c\n",
5308 acqe_sli->event_data1, port_name);
5310 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
5311 shost = lpfc_shost_from_vport(phba->pport);
5312 fc_host_post_vendor_event(shost, fc_get_event_number(),
5313 sizeof(temp_event_data),
5314 (char *)&temp_event_data,
5315 SCSI_NL_VID_TYPE_PCI
5316 | PCI_VENDOR_ID_EMULEX);
5318 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
5319 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5320 temp_event_data.event_code = LPFC_NORMAL_TEMP;
5321 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5323 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5324 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
5325 acqe_sli->event_data1, port_name);
5327 shost = lpfc_shost_from_vport(phba->pport);
5328 fc_host_post_vendor_event(shost, fc_get_event_number(),
5329 sizeof(temp_event_data),
5330 (char *)&temp_event_data,
5331 SCSI_NL_VID_TYPE_PCI
5332 | PCI_VENDOR_ID_EMULEX);
5334 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
5335 misconfigured = (struct lpfc_acqe_misconfigured_event *)
5336 &acqe_sli->event_data1;
5338 /* fetch the status for this port */
5339 switch (phba->sli4_hba.lnk_info.lnk_no) {
5340 case LPFC_LINK_NUMBER_0:
5341 status = bf_get(lpfc_sli_misconfigured_port0_state,
5342 &misconfigured->theEvent);
5343 operational = bf_get(lpfc_sli_misconfigured_port0_op,
5344 &misconfigured->theEvent);
5346 case LPFC_LINK_NUMBER_1:
5347 status = bf_get(lpfc_sli_misconfigured_port1_state,
5348 &misconfigured->theEvent);
5349 operational = bf_get(lpfc_sli_misconfigured_port1_op,
5350 &misconfigured->theEvent);
5352 case LPFC_LINK_NUMBER_2:
5353 status = bf_get(lpfc_sli_misconfigured_port2_state,
5354 &misconfigured->theEvent);
5355 operational = bf_get(lpfc_sli_misconfigured_port2_op,
5356 &misconfigured->theEvent);
5358 case LPFC_LINK_NUMBER_3:
5359 status = bf_get(lpfc_sli_misconfigured_port3_state,
5360 &misconfigured->theEvent);
5361 operational = bf_get(lpfc_sli_misconfigured_port3_op,
5362 &misconfigured->theEvent);
5365 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5367 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
5368 "event: Invalid link %d",
5369 phba->sli4_hba.lnk_info.lnk_no);
5373 /* Skip if optic state unchanged */
5374 if (phba->sli4_hba.lnk_info.optic_state == status)
5378 case LPFC_SLI_EVENT_STATUS_VALID:
5379 sprintf(message, "Physical Link is functional");
5381 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
5382 sprintf(message, "Optics faulted/incorrectly "
5383 "installed/not installed - Reseat optics, "
5384 "if issue not resolved, replace.");
5386 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
5388 "Optics of two types installed - Remove one "
5389 "optic or install matching pair of optics.");
5391 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
5392 sprintf(message, "Incompatible optics - Replace with "
5393 "compatible optics for card to function.");
5395 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
5396 sprintf(message, "Unqualified optics - Replace with "
5397 "Avago optics for Warranty and Technical "
5398 "Support - Link is%s operational",
5399 (operational) ? " not" : "");
5401 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
5402 sprintf(message, "Uncertified optics - Replace with "
5403 "Avago-certified optics to enable link "
5404 "operation - Link is%s operational",
5405 (operational) ? " not" : "");
5408 /* firmware is reporting a status we don't know about */
5409 sprintf(message, "Unknown event status x%02x", status);
5413 /* Issue READ_CONFIG mbox command to refresh supported speeds */
5414 rc = lpfc_sli4_read_config(phba);
5417 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5418 "3194 Unable to retrieve supported "
5419 "speeds, rc = 0x%x\n", rc);
5421 vports = lpfc_create_vport_work_array(phba);
5422 if (vports != NULL) {
5423 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5425 shost = lpfc_shost_from_vport(vports[i]);
5426 lpfc_host_supported_speeds_set(shost);
5429 lpfc_destroy_vport_work_array(phba, vports);
5431 phba->sli4_hba.lnk_info.optic_state = status;
5432 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5433 "3176 Port Name %c %s\n", port_name, message);
5435 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
5436 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5437 "3192 Remote DPort Test Initiated - "
5438 "Event Data1:x%08x Event Data2: x%08x\n",
5439 acqe_sli->event_data1, acqe_sli->event_data2);
5442 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5443 "3193 Async SLI event - Event Data1:x%08x Event Data2:"
5444 "x%08x SLI Event Type:%d\n",
5445 acqe_sli->event_data1, acqe_sli->event_data2,
5452 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
5453 * @vport: pointer to vport data structure.
5455 * This routine is to perform Clear Virtual Link (CVL) on a vport in
5456 * response to a CVL event.
5458 * Return the pointer to the ndlp with the vport if successful, otherwise
5461 static struct lpfc_nodelist *
5462 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
5464 struct lpfc_nodelist *ndlp;
5465 struct Scsi_Host *shost;
5466 struct lpfc_hba *phba;
5473 ndlp = lpfc_findnode_did(vport, Fabric_DID);
5475 /* Cannot find existing Fabric ndlp, so allocate a new one */
5476 ndlp = lpfc_nlp_init(vport, Fabric_DID);
5479 /* Set the node type */
5480 ndlp->nlp_type |= NLP_FABRIC;
5481 /* Put ndlp onto node list */
5482 lpfc_enqueue_node(vport, ndlp);
5483 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5484 /* re-setup ndlp without removing from node list */
5485 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
5489 if ((phba->pport->port_state < LPFC_FLOGI) &&
5490 (phba->pport->port_state != LPFC_VPORT_FAILED))
5492 /* If virtual link is not yet instantiated ignore CVL */
5493 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
5494 && (vport->port_state != LPFC_VPORT_FAILED))
5496 shost = lpfc_shost_from_vport(vport);
5499 lpfc_linkdown_port(vport);
5500 lpfc_cleanup_pending_mbox(vport);
5501 spin_lock_irq(shost->host_lock);
5502 vport->fc_flag |= FC_VPORT_CVL_RCVD;
5503 spin_unlock_irq(shost->host_lock);
5509 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
5510 * @vport: pointer to lpfc hba data structure.
5512 * This routine is to perform Clear Virtual Link (CVL) on all vports in
5513 * response to a FCF dead event.
5516 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
5518 struct lpfc_vport **vports;
5521 vports = lpfc_create_vport_work_array(phba);
5523 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
5524 lpfc_sli4_perform_vport_cvl(vports[i]);
5525 lpfc_destroy_vport_work_array(phba, vports);
5529 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
5530 * @phba: pointer to lpfc hba data structure.
5531 * @acqe_link: pointer to the async fcoe completion queue entry.
5533 * This routine is to handle the SLI4 asynchronous fcoe event.
5536 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
5537 struct lpfc_acqe_fip *acqe_fip)
5539 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
5541 struct lpfc_vport *vport;
5542 struct lpfc_nodelist *ndlp;
5543 struct Scsi_Host *shost;
5544 int active_vlink_present;
5545 struct lpfc_vport **vports;
5548 phba->fc_eventTag = acqe_fip->event_tag;
5549 phba->fcoe_eventtag = acqe_fip->event_tag;
5550 switch (event_type) {
5551 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
5552 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
5553 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
5554 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5556 "2546 New FCF event, evt_tag:x%x, "
5558 acqe_fip->event_tag,
5561 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
5563 "2788 FCF param modified event, "
5564 "evt_tag:x%x, index:x%x\n",
5565 acqe_fip->event_tag,
5567 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5569 * During period of FCF discovery, read the FCF
5570 * table record indexed by the event to update
5571 * FCF roundrobin failover eligible FCF bmask.
5573 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5575 "2779 Read FCF (x%x) for updating "
5576 "roundrobin FCF failover bmask\n",
5578 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
5581 /* If the FCF discovery is in progress, do nothing. */
5582 spin_lock_irq(&phba->hbalock);
5583 if (phba->hba_flag & FCF_TS_INPROG) {
5584 spin_unlock_irq(&phba->hbalock);
5587 /* If fast FCF failover rescan event is pending, do nothing */
5588 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
5589 spin_unlock_irq(&phba->hbalock);
5593 /* If the FCF has been in discovered state, do nothing. */
5594 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
5595 spin_unlock_irq(&phba->hbalock);
5598 spin_unlock_irq(&phba->hbalock);
5600 /* Otherwise, scan the entire FCF table and re-discover SAN */
5601 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5602 "2770 Start FCF table scan per async FCF "
5603 "event, evt_tag:x%x, index:x%x\n",
5604 acqe_fip->event_tag, acqe_fip->index);
5605 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
5606 LPFC_FCOE_FCF_GET_FIRST);
5608 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5609 "2547 Issue FCF scan read FCF mailbox "
5610 "command failed (x%x)\n", rc);
5613 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
5614 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5615 "2548 FCF Table full count 0x%x tag 0x%x\n",
5616 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5617 acqe_fip->event_tag);
5620 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
5621 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5622 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5623 "2549 FCF (x%x) disconnected from network, "
5624 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
5626 * If we are in the middle of FCF failover process, clear
5627 * the corresponding FCF bit in the roundrobin bitmap.
5629 spin_lock_irq(&phba->hbalock);
5630 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
5631 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
5632 spin_unlock_irq(&phba->hbalock);
5633 /* Update FLOGI FCF failover eligible FCF bmask */
5634 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
5637 spin_unlock_irq(&phba->hbalock);
5639 /* If the event is not for currently used fcf do nothing */
5640 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
5644 * Otherwise, request the port to rediscover the entire FCF
5645 * table for a fast recovery from case that the current FCF
5646 * is no longer valid as we are not in the middle of FCF
5647 * failover process already.
5649 spin_lock_irq(&phba->hbalock);
5650 /* Mark the fast failover process in progress */
5651 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
5652 spin_unlock_irq(&phba->hbalock);
5654 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5655 "2771 Start FCF fast failover process due to "
5656 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5657 "\n", acqe_fip->event_tag, acqe_fip->index);
5658 rc = lpfc_sli4_redisc_fcf_table(phba);
5660 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5662 "2772 Issue FCF rediscover mailbox "
5663 "command failed, fail through to FCF "
5665 spin_lock_irq(&phba->hbalock);
5666 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
5667 spin_unlock_irq(&phba->hbalock);
5669 * Last resort will fail over by treating this
5670 * as a link down to FCF registration.
5672 lpfc_sli4_fcf_dead_failthrough(phba);
5674 /* Reset FCF roundrobin bmask for new discovery */
5675 lpfc_sli4_clear_fcf_rr_bmask(phba);
5677 * Handling fast FCF failover to a DEAD FCF event is
5678 * considered equalivant to receiving CVL to all vports.
5680 lpfc_sli4_perform_all_vport_cvl(phba);
5683 case LPFC_FIP_EVENT_TYPE_CVL:
5684 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5685 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5686 "2718 Clear Virtual Link Received for VPI 0x%x"
5687 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
5689 vport = lpfc_find_vport_by_vpid(phba,
5691 ndlp = lpfc_sli4_perform_vport_cvl(vport);
5694 active_vlink_present = 0;
5696 vports = lpfc_create_vport_work_array(phba);
5698 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5700 if ((!(vports[i]->fc_flag &
5701 FC_VPORT_CVL_RCVD)) &&
5702 (vports[i]->port_state > LPFC_FDISC)) {
5703 active_vlink_present = 1;
5707 lpfc_destroy_vport_work_array(phba, vports);
5711 * Don't re-instantiate if vport is marked for deletion.
5712 * If we are here first then vport_delete is going to wait
5713 * for discovery to complete.
5715 if (!(vport->load_flag & FC_UNLOADING) &&
5716 active_vlink_present) {
5718 * If there are other active VLinks present,
5719 * re-instantiate the Vlink using FDISC.
5721 mod_timer(&ndlp->nlp_delayfunc,
5722 jiffies + msecs_to_jiffies(1000));
5723 shost = lpfc_shost_from_vport(vport);
5724 spin_lock_irq(shost->host_lock);
5725 ndlp->nlp_flag |= NLP_DELAY_TMO;
5726 spin_unlock_irq(shost->host_lock);
5727 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5728 vport->port_state = LPFC_FDISC;
5731 * Otherwise, we request port to rediscover
5732 * the entire FCF table for a fast recovery
5733 * from possible case that the current FCF
5734 * is no longer valid if we are not already
5735 * in the FCF failover process.
5737 spin_lock_irq(&phba->hbalock);
5738 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5739 spin_unlock_irq(&phba->hbalock);
5742 /* Mark the fast failover process in progress */
5743 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
5744 spin_unlock_irq(&phba->hbalock);
5745 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5747 "2773 Start FCF failover per CVL, "
5748 "evt_tag:x%x\n", acqe_fip->event_tag);
5749 rc = lpfc_sli4_redisc_fcf_table(phba);
5751 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5753 "2774 Issue FCF rediscover "
5754 "mailbox command failed, "
5755 "through to CVL event\n");
5756 spin_lock_irq(&phba->hbalock);
5757 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
5758 spin_unlock_irq(&phba->hbalock);
5760 * Last resort will be re-try on the
5761 * the current registered FCF entry.
5763 lpfc_retry_pport_discovery(phba);
5766 * Reset FCF roundrobin bmask for new
5769 lpfc_sli4_clear_fcf_rr_bmask(phba);
5773 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5774 "0288 Unknown FCoE event type 0x%x event tag "
5775 "0x%x\n", event_type, acqe_fip->event_tag);
5781 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
5782 * @phba: pointer to lpfc hba data structure.
5783 * @acqe_link: pointer to the async dcbx completion queue entry.
5785 * This routine is to handle the SLI4 asynchronous dcbx event.
5788 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
5789 struct lpfc_acqe_dcbx *acqe_dcbx)
5791 phba->fc_eventTag = acqe_dcbx->event_tag;
5792 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5793 "0290 The SLI4 DCBX asynchronous event is not "
5798 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
5799 * @phba: pointer to lpfc hba data structure.
5800 * @acqe_link: pointer to the async grp5 completion queue entry.
5802 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
5803 * is an asynchronous notified of a logical link speed change. The Port
5804 * reports the logical link speed in units of 10Mbps.
5807 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
5808 struct lpfc_acqe_grp5 *acqe_grp5)
5810 uint16_t prev_ll_spd;
5812 phba->fc_eventTag = acqe_grp5->event_tag;
5813 phba->fcoe_eventtag = acqe_grp5->event_tag;
5814 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
5815 phba->sli4_hba.link_state.logical_speed =
5816 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
5817 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5818 "2789 GRP5 Async Event: Updating logical link speed "
5819 "from %dMbps to %dMbps\n", prev_ll_spd,
5820 phba->sli4_hba.link_state.logical_speed);
5824 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
5825 * @phba: pointer to lpfc hba data structure.
5827 * This routine is invoked by the worker thread to process all the pending
5828 * SLI4 asynchronous events.
5830 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
5832 struct lpfc_cq_event *cq_event;
5834 /* First, declare the async event has been handled */
5835 spin_lock_irq(&phba->hbalock);
5836 phba->hba_flag &= ~ASYNC_EVENT;
5837 spin_unlock_irq(&phba->hbalock);
5838 /* Now, handle all the async events */
5839 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
5840 /* Get the first event from the head of the event queue */
5841 spin_lock_irq(&phba->hbalock);
5842 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
5843 cq_event, struct lpfc_cq_event, list);
5844 spin_unlock_irq(&phba->hbalock);
5845 /* Process the asynchronous event */
5846 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
5847 case LPFC_TRAILER_CODE_LINK:
5848 lpfc_sli4_async_link_evt(phba,
5849 &cq_event->cqe.acqe_link);
5851 case LPFC_TRAILER_CODE_FCOE:
5852 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
5854 case LPFC_TRAILER_CODE_DCBX:
5855 lpfc_sli4_async_dcbx_evt(phba,
5856 &cq_event->cqe.acqe_dcbx);
5858 case LPFC_TRAILER_CODE_GRP5:
5859 lpfc_sli4_async_grp5_evt(phba,
5860 &cq_event->cqe.acqe_grp5);
5862 case LPFC_TRAILER_CODE_FC:
5863 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
5865 case LPFC_TRAILER_CODE_SLI:
5866 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
5869 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5870 "1804 Invalid asynchrous event code: "
5871 "x%x\n", bf_get(lpfc_trailer_code,
5872 &cq_event->cqe.mcqe_cmpl));
5875 /* Free the completion event processed to the free pool */
5876 lpfc_sli4_cq_event_release(phba, cq_event);
5881 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
5882 * @phba: pointer to lpfc hba data structure.
5884 * This routine is invoked by the worker thread to process FCF table
5885 * rediscovery pending completion event.
5887 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
5891 spin_lock_irq(&phba->hbalock);
5892 /* Clear FCF rediscovery timeout event */
5893 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
5894 /* Clear driver fast failover FCF record flag */
5895 phba->fcf.failover_rec.flag = 0;
5896 /* Set state for FCF fast failover */
5897 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
5898 spin_unlock_irq(&phba->hbalock);
5900 /* Scan FCF table from the first entry to re-discover SAN */
5901 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5902 "2777 Start post-quiescent FCF table scan\n");
5903 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
5905 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5906 "2747 Issue FCF scan read FCF mailbox "
5907 "command failed 0x%x\n", rc);
5911 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
5912 * @phba: pointer to lpfc hba data structure.
5913 * @dev_grp: The HBA PCI-Device group number.
5915 * This routine is invoked to set up the per HBA PCI-Device group function
5916 * API jump table entries.
5918 * Return: 0 if success, otherwise -ENODEV
5921 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5925 /* Set up lpfc PCI-device group */
5926 phba->pci_dev_grp = dev_grp;
5928 /* The LPFC_PCI_DEV_OC uses SLI4 */
5929 if (dev_grp == LPFC_PCI_DEV_OC)
5930 phba->sli_rev = LPFC_SLI_REV4;
5932 /* Set up device INIT API function jump table */
5933 rc = lpfc_init_api_table_setup(phba, dev_grp);
5936 /* Set up SCSI API function jump table */
5937 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
5940 /* Set up SLI API function jump table */
5941 rc = lpfc_sli_api_table_setup(phba, dev_grp);
5944 /* Set up MBOX API function jump table */
5945 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
5953 * lpfc_log_intr_mode - Log the active interrupt mode
5954 * @phba: pointer to lpfc hba data structure.
5955 * @intr_mode: active interrupt mode adopted.
5957 * This routine it invoked to log the currently used active interrupt mode
5960 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
5962 switch (intr_mode) {
5964 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5965 "0470 Enable INTx interrupt mode.\n");
5968 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5969 "0481 Enabled MSI interrupt mode.\n");
5972 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5973 "0480 Enabled MSI-X interrupt mode.\n");
5976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5977 "0482 Illegal interrupt mode.\n");
5984 * lpfc_enable_pci_dev - Enable a generic PCI device.
5985 * @phba: pointer to lpfc hba data structure.
5987 * This routine is invoked to enable the PCI device that is common to all
5992 * other values - error
5995 lpfc_enable_pci_dev(struct lpfc_hba *phba)
5997 struct pci_dev *pdev;
5999 /* Obtain PCI device reference */
6003 pdev = phba->pcidev;
6004 /* Enable PCI device */
6005 if (pci_enable_device_mem(pdev))
6007 /* Request PCI resource for the device */
6008 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
6009 goto out_disable_device;
6010 /* Set up device as PCI master and save state for EEH */
6011 pci_set_master(pdev);
6012 pci_try_set_mwi(pdev);
6013 pci_save_state(pdev);
6015 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6016 if (pci_is_pcie(pdev))
6017 pdev->needs_freset = 1;
6022 pci_disable_device(pdev);
6024 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6025 "1401 Failed to enable pci device\n");
6030 * lpfc_disable_pci_dev - Disable a generic PCI device.
6031 * @phba: pointer to lpfc hba data structure.
6033 * This routine is invoked to disable the PCI device that is common to all
6037 lpfc_disable_pci_dev(struct lpfc_hba *phba)
6039 struct pci_dev *pdev;
6041 /* Obtain PCI device reference */
6045 pdev = phba->pcidev;
6046 /* Release PCI resource and disable PCI device */
6047 pci_release_mem_regions(pdev);
6048 pci_disable_device(pdev);
6054 * lpfc_reset_hba - Reset a hba
6055 * @phba: pointer to lpfc hba data structure.
6057 * This routine is invoked to reset a hba device. It brings the HBA
6058 * offline, performs a board restart, and then brings the board back
6059 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
6060 * on outstanding mailbox commands.
6063 lpfc_reset_hba(struct lpfc_hba *phba)
6065 /* If resets are disabled then set error state and return. */
6066 if (!phba->cfg_enable_hba_reset) {
6067 phba->link_state = LPFC_HBA_ERROR;
6070 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
6071 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6073 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
6075 lpfc_sli_brdrestart(phba);
6077 lpfc_unblock_mgmt_io(phba);
6081 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
6082 * @phba: pointer to lpfc hba data structure.
6084 * This function enables the PCI SR-IOV virtual functions to a physical
6085 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6086 * enable the number of virtual functions to the physical function. As
6087 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6088 * API call does not considered as an error condition for most of the device.
6091 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
6093 struct pci_dev *pdev = phba->pcidev;
6097 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6101 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
6106 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
6107 * @phba: pointer to lpfc hba data structure.
6108 * @nr_vfn: number of virtual functions to be enabled.
6110 * This function enables the PCI SR-IOV virtual functions to a physical
6111 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6112 * enable the number of virtual functions to the physical function. As
6113 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6114 * API call does not considered as an error condition for most of the device.
6117 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
6119 struct pci_dev *pdev = phba->pcidev;
6120 uint16_t max_nr_vfn;
6123 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
6124 if (nr_vfn > max_nr_vfn) {
6125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6126 "3057 Requested vfs (%d) greater than "
6127 "supported vfs (%d)", nr_vfn, max_nr_vfn);
6131 rc = pci_enable_sriov(pdev, nr_vfn);
6133 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6134 "2806 Failed to enable sriov on this device "
6135 "with vfn number nr_vf:%d, rc:%d\n",
6138 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6139 "2807 Successful enable sriov on this device "
6140 "with vfn number nr_vf:%d\n", nr_vfn);
6145 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
6146 * @phba: pointer to lpfc hba data structure.
6148 * This routine is invoked to set up the driver internal resources before the
6149 * device specific resource setup to support the HBA device it attached to.
6153 * other values - error
6156 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
6158 struct lpfc_sli *psli = &phba->sli;
6161 * Driver resources common to all SLI revisions
6163 atomic_set(&phba->fast_event_count, 0);
6164 spin_lock_init(&phba->hbalock);
6166 /* Initialize ndlp management spinlock */
6167 spin_lock_init(&phba->ndlp_lock);
6169 /* Initialize port_list spinlock */
6170 spin_lock_init(&phba->port_list_lock);
6171 INIT_LIST_HEAD(&phba->port_list);
6173 INIT_LIST_HEAD(&phba->work_list);
6174 init_waitqueue_head(&phba->wait_4_mlo_m_q);
6176 /* Initialize the wait queue head for the kernel thread */
6177 init_waitqueue_head(&phba->work_waitq);
6179 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6180 "1403 Protocols supported %s %s %s\n",
6181 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
6183 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
6185 (phba->nvmet_support ? "NVMET" : " "));
6187 /* Initialize the IO buffer list used by driver for SLI3 SCSI */
6188 spin_lock_init(&phba->scsi_buf_list_get_lock);
6189 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
6190 spin_lock_init(&phba->scsi_buf_list_put_lock);
6191 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
6193 /* Initialize the fabric iocb list */
6194 INIT_LIST_HEAD(&phba->fabric_iocb_list);
6196 /* Initialize list to save ELS buffers */
6197 INIT_LIST_HEAD(&phba->elsbuf);
6199 /* Initialize FCF connection rec list */
6200 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
6202 /* Initialize OAS configuration list */
6203 spin_lock_init(&phba->devicelock);
6204 INIT_LIST_HEAD(&phba->luns);
6206 /* MBOX heartbeat timer */
6207 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
6208 /* Fabric block timer */
6209 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
6210 /* EA polling mode timer */
6211 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
6212 /* Heartbeat timer */
6213 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
6215 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
6221 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
6222 * @phba: pointer to lpfc hba data structure.
6224 * This routine is invoked to set up the driver internal resources specific to
6225 * support the SLI-3 HBA device it attached to.
6229 * other values - error
6232 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
6237 * Initialize timers used by driver
6240 /* FCP polling mode timer */
6241 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
6243 /* Host attention work mask setup */
6244 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
6245 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
6247 /* Get all the module params for configuring this host */
6248 lpfc_get_cfgparam(phba);
6249 /* Set up phase-1 common device driver resources */
6251 rc = lpfc_setup_driver_resource_phase1(phba);
6255 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
6256 phba->menlo_flag |= HBA_MENLO_SUPPORT;
6257 /* check for menlo minimum sg count */
6258 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
6259 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
6262 if (!phba->sli.sli3_ring)
6263 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
6264 sizeof(struct lpfc_sli_ring),
6266 if (!phba->sli.sli3_ring)
6270 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
6271 * used to create the sg_dma_buf_pool must be dynamically calculated.
6274 /* Initialize the host templates the configured values. */
6275 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
6276 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
6277 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
6279 if (phba->sli_rev == LPFC_SLI_REV4)
6280 entry_sz = sizeof(struct sli4_sge);
6282 entry_sz = sizeof(struct ulp_bde64);
6284 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
6285 if (phba->cfg_enable_bg) {
6287 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
6288 * the FCP rsp, and a BDE for each. Sice we have no control
6289 * over how many protection data segments the SCSI Layer
6290 * will hand us (ie: there could be one for every block
6291 * in the IO), we just allocate enough BDEs to accomidate
6292 * our max amount and we need to limit lpfc_sg_seg_cnt to
6293 * minimize the risk of running out.
6295 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6296 sizeof(struct fcp_rsp) +
6297 (LPFC_MAX_SG_SEG_CNT * entry_sz);
6299 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
6300 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
6302 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
6303 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
6306 * The scsi_buf for a regular I/O will hold the FCP cmnd,
6307 * the FCP rsp, a BDE for each, and a BDE for up to
6308 * cfg_sg_seg_cnt data segments.
6310 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6311 sizeof(struct fcp_rsp) +
6312 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
6314 /* Total BDEs in BPL for scsi_sg_list */
6315 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
6318 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6319 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
6320 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6321 phba->cfg_total_seg_cnt);
6323 phba->max_vpi = LPFC_MAX_VPI;
6324 /* This will be set to correct value after config_port mbox */
6325 phba->max_vports = 0;
6328 * Initialize the SLI Layer to run with lpfc HBAs.
6330 lpfc_sli_setup(phba);
6331 lpfc_sli_queue_init(phba);
6333 /* Allocate device driver memory */
6334 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
6338 * Enable sr-iov virtual functions if supported and configured
6339 * through the module parameter.
6341 if (phba->cfg_sriov_nr_virtfn > 0) {
6342 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6343 phba->cfg_sriov_nr_virtfn);
6345 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6346 "2808 Requested number of SR-IOV "
6347 "virtual functions (%d) is not "
6349 phba->cfg_sriov_nr_virtfn);
6350 phba->cfg_sriov_nr_virtfn = 0;
6358 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
6359 * @phba: pointer to lpfc hba data structure.
6361 * This routine is invoked to unset the driver internal resources set up
6362 * specific for supporting the SLI-3 HBA device it attached to.
6365 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
6367 /* Free device driver memory allocated */
6368 lpfc_mem_free_all(phba);
6374 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
6375 * @phba: pointer to lpfc hba data structure.
6377 * This routine is invoked to set up the driver internal resources specific to
6378 * support the SLI-4 HBA device it attached to.
6382 * other values - error
6385 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6387 LPFC_MBOXQ_t *mboxq;
6389 int rc, i, max_buf_size;
6390 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
6391 struct lpfc_mqe *mqe;
6398 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
6399 phba->sli4_hba.num_possible_cpu = num_possible_cpus();
6400 phba->sli4_hba.curr_disp_cpu = 0;
6402 /* Get all the module params for configuring this host */
6403 lpfc_get_cfgparam(phba);
6405 /* Set up phase-1 common device driver resources */
6406 rc = lpfc_setup_driver_resource_phase1(phba);
6410 /* Before proceed, wait for POST done and device ready */
6411 rc = lpfc_sli4_post_status_check(phba);
6416 * Initialize timers used by driver
6419 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
6421 /* FCF rediscover timer */
6422 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
6425 * Control structure for handling external multi-buffer mailbox
6426 * command pass-through.
6428 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
6429 sizeof(struct lpfc_mbox_ext_buf_ctx));
6430 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
6432 phba->max_vpi = LPFC_MAX_VPI;
6434 /* This will be set to correct value after the read_config mbox */
6435 phba->max_vports = 0;
6437 /* Program the default value of vlan_id and fc_map */
6438 phba->valid_vlan = 0;
6439 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
6440 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
6441 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
6444 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
6445 * we will associate a new ring, for each EQ/CQ/WQ tuple.
6446 * The WQ create will allocate the ring.
6450 * 1 for cmd, 1 for rsp, NVME adds an extra one
6451 * for boundary conditions in its max_sgl_segment template.
6454 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
6458 * It doesn't matter what family our adapter is in, we are
6459 * limited to 2 Pages, 512 SGEs, for our SGL.
6460 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
6462 max_buf_size = (2 * SLI4_PAGE_SIZE);
6465 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
6466 * used to create the sg_dma_buf_pool must be calculated.
6468 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
6470 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
6471 * the FCP rsp, and a SGE. Sice we have no control
6472 * over how many protection segments the SCSI Layer
6473 * will hand us (ie: there could be one for every block
6474 * in the IO), just allocate enough SGEs to accomidate
6475 * our max amount and we need to limit lpfc_sg_seg_cnt
6476 * to minimize the risk of running out.
6478 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6479 sizeof(struct fcp_rsp) + max_buf_size;
6481 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
6482 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
6485 * If supporting DIF, reduce the seg count for scsi to
6486 * allow room for the DIF sges.
6488 if (phba->cfg_enable_bg &&
6489 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
6490 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
6492 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6496 * The scsi_buf for a regular I/O holds the FCP cmnd,
6497 * the FCP rsp, a SGE for each, and a SGE for up to
6498 * cfg_sg_seg_cnt data segments.
6500 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6501 sizeof(struct fcp_rsp) +
6502 ((phba->cfg_sg_seg_cnt + extra) *
6503 sizeof(struct sli4_sge));
6505 /* Total SGEs for scsi_sg_list */
6506 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
6507 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6510 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
6511 * need to post 1 page for the SGL.
6515 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
6516 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6517 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
6518 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
6519 "6300 Reducing NVME sg segment "
6521 LPFC_MAX_NVME_SEG_CNT);
6522 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
6524 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
6527 /* Initialize the host templates with the updated values. */
6528 lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
6529 lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
6530 lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
6532 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
6533 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
6535 phba->cfg_sg_dma_buf_size =
6536 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
6538 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6539 "9087 sg_seg_cnt:%d dmabuf_size:%d "
6540 "total:%d scsi:%d nvme:%d\n",
6541 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6542 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
6543 phba->cfg_nvme_seg_cnt);
6545 /* Initialize buffer queue management fields */
6546 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
6547 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
6548 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
6551 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
6553 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
6554 /* Initialize the Abort scsi buffer list used by driver */
6555 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
6556 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
6559 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6560 /* Initialize the Abort nvme buffer list used by driver */
6561 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
6562 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
6563 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
6564 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
6565 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
6568 /* This abort list used by worker thread */
6569 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
6570 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
6573 * Initialize driver internal slow-path work queues
6576 /* Driver internel slow-path CQ Event pool */
6577 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
6578 /* Response IOCB work queue list */
6579 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
6580 /* Asynchronous event CQ Event work queue list */
6581 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
6582 /* Fast-path XRI aborted CQ Event work queue list */
6583 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
6584 /* Slow-path XRI aborted CQ Event work queue list */
6585 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
6586 /* Receive queue CQ Event work queue list */
6587 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
6589 /* Initialize extent block lists. */
6590 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
6591 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
6592 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
6593 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
6595 /* Initialize mboxq lists. If the early init routines fail
6596 * these lists need to be correctly initialized.
6598 INIT_LIST_HEAD(&phba->sli.mboxq);
6599 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
6601 /* initialize optic_state to 0xFF */
6602 phba->sli4_hba.lnk_info.optic_state = 0xff;
6604 /* Allocate device driver memory */
6605 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
6609 /* IF Type 2 ports get initialized now. */
6610 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
6611 LPFC_SLI_INTF_IF_TYPE_2) {
6612 rc = lpfc_pci_function_reset(phba);
6617 phba->temp_sensor_support = 1;
6620 /* Create the bootstrap mailbox command */
6621 rc = lpfc_create_bootstrap_mbox(phba);
6625 /* Set up the host's endian order with the device. */
6626 rc = lpfc_setup_endian_order(phba);
6628 goto out_free_bsmbx;
6630 /* Set up the hba's configuration parameters. */
6631 rc = lpfc_sli4_read_config(phba);
6633 goto out_free_bsmbx;
6634 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
6636 goto out_free_bsmbx;
6638 /* IF Type 0 ports get initialized now. */
6639 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6640 LPFC_SLI_INTF_IF_TYPE_0) {
6641 rc = lpfc_pci_function_reset(phba);
6643 goto out_free_bsmbx;
6646 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6650 goto out_free_bsmbx;
6653 /* Check for NVMET being configured */
6654 phba->nvmet_support = 0;
6655 if (lpfc_enable_nvmet_cnt) {
6657 /* First get WWN of HBA instance */
6658 lpfc_read_nv(phba, mboxq);
6659 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6660 if (rc != MBX_SUCCESS) {
6661 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6662 "6016 Mailbox failed , mbxCmd x%x "
6663 "READ_NV, mbxStatus x%x\n",
6664 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6665 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6666 mempool_free(mboxq, phba->mbox_mem_pool);
6668 goto out_free_bsmbx;
6671 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
6673 wwn = cpu_to_be64(wwn);
6674 phba->sli4_hba.wwnn.u.name = wwn;
6675 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
6677 /* wwn is WWPN of HBA instance */
6678 wwn = cpu_to_be64(wwn);
6679 phba->sli4_hba.wwpn.u.name = wwn;
6681 /* Check to see if it matches any module parameter */
6682 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
6683 if (wwn == lpfc_enable_nvmet[i]) {
6684 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
6685 if (lpfc_nvmet_mem_alloc(phba))
6688 phba->nvmet_support = 1; /* a match */
6690 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6691 "6017 NVME Target %016llx\n",
6694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6695 "6021 Can't enable NVME Target."
6696 " NVME_TARGET_FC infrastructure"
6697 " is not in kernel\n");
6699 /* Not supported for NVMET */
6700 phba->cfg_xri_rebalancing = 0;
6706 lpfc_nvme_mod_param_dep(phba);
6708 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
6709 lpfc_supported_pages(mboxq);
6710 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6712 mqe = &mboxq->u.mqe;
6713 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
6714 LPFC_MAX_SUPPORTED_PAGES);
6715 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
6716 switch (pn_page[i]) {
6717 case LPFC_SLI4_PARAMETERS:
6718 phba->sli4_hba.pc_sli4_params.supported = 1;
6724 /* Read the port's SLI4 Parameters capabilities if supported. */
6725 if (phba->sli4_hba.pc_sli4_params.supported)
6726 rc = lpfc_pc_sli4_params_get(phba, mboxq);
6728 mempool_free(mboxq, phba->mbox_mem_pool);
6730 goto out_free_bsmbx;
6735 * Get sli4 parameters that override parameters from Port capabilities.
6736 * If this call fails, it isn't critical unless the SLI4 parameters come
6739 rc = lpfc_get_sli4_parameters(phba, mboxq);
6741 if_type = bf_get(lpfc_sli_intf_if_type,
6742 &phba->sli4_hba.sli_intf);
6743 if_fam = bf_get(lpfc_sli_intf_sli_family,
6744 &phba->sli4_hba.sli_intf);
6745 if (phba->sli4_hba.extents_in_use &&
6746 phba->sli4_hba.rpi_hdrs_in_use) {
6747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6748 "2999 Unsupported SLI4 Parameters "
6749 "Extents and RPI headers enabled.\n");
6750 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6751 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
6752 mempool_free(mboxq, phba->mbox_mem_pool);
6754 goto out_free_bsmbx;
6757 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6758 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
6759 mempool_free(mboxq, phba->mbox_mem_pool);
6761 goto out_free_bsmbx;
6765 mempool_free(mboxq, phba->mbox_mem_pool);
6767 /* Verify OAS is supported */
6768 lpfc_sli4_oas_verify(phba);
6770 /* Verify RAS support on adapter */
6771 lpfc_sli4_ras_init(phba);
6773 /* Verify all the SLI4 queues */
6774 rc = lpfc_sli4_queue_verify(phba);
6776 goto out_free_bsmbx;
6778 /* Create driver internal CQE event pool */
6779 rc = lpfc_sli4_cq_event_pool_create(phba);
6781 goto out_free_bsmbx;
6783 /* Initialize sgl lists per host */
6784 lpfc_init_sgl_list(phba);
6786 /* Allocate and initialize active sgl array */
6787 rc = lpfc_init_active_sgl_array(phba);
6789 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6790 "1430 Failed to initialize sgl list.\n");
6791 goto out_destroy_cq_event_pool;
6793 rc = lpfc_sli4_init_rpi_hdrs(phba);
6795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6796 "1432 Failed to initialize rpi headers.\n");
6797 goto out_free_active_sgl;
6800 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
6801 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
6802 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
6804 if (!phba->fcf.fcf_rr_bmask) {
6805 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6806 "2759 Failed allocate memory for FCF round "
6807 "robin failover bmask\n");
6809 goto out_remove_rpi_hdrs;
6812 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
6813 sizeof(struct lpfc_hba_eq_hdl),
6815 if (!phba->sli4_hba.hba_eq_hdl) {
6816 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6817 "2572 Failed allocate memory for "
6818 "fast-path per-EQ handle array\n");
6820 goto out_free_fcf_rr_bmask;
6823 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
6824 sizeof(struct lpfc_vector_map_info),
6826 if (!phba->sli4_hba.cpu_map) {
6827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6828 "3327 Failed allocate memory for msi-x "
6829 "interrupt vector mapping\n");
6831 goto out_free_hba_eq_hdl;
6834 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
6835 if (!phba->sli4_hba.eq_info) {
6836 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6837 "3321 Failed allocation for per_cpu stats\n");
6839 goto out_free_hba_cpu_map;
6842 * Enable sr-iov virtual functions if supported and configured
6843 * through the module parameter.
6845 if (phba->cfg_sriov_nr_virtfn > 0) {
6846 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6847 phba->cfg_sriov_nr_virtfn);
6849 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6850 "3020 Requested number of SR-IOV "
6851 "virtual functions (%d) is not "
6853 phba->cfg_sriov_nr_virtfn);
6854 phba->cfg_sriov_nr_virtfn = 0;
6860 out_free_hba_cpu_map:
6861 kfree(phba->sli4_hba.cpu_map);
6862 out_free_hba_eq_hdl:
6863 kfree(phba->sli4_hba.hba_eq_hdl);
6864 out_free_fcf_rr_bmask:
6865 kfree(phba->fcf.fcf_rr_bmask);
6866 out_remove_rpi_hdrs:
6867 lpfc_sli4_remove_rpi_hdrs(phba);
6868 out_free_active_sgl:
6869 lpfc_free_active_sgl(phba);
6870 out_destroy_cq_event_pool:
6871 lpfc_sli4_cq_event_pool_destroy(phba);
6873 lpfc_destroy_bootstrap_mbox(phba);
6875 lpfc_mem_free(phba);
6880 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
6881 * @phba: pointer to lpfc hba data structure.
6883 * This routine is invoked to unset the driver internal resources set up
6884 * specific for supporting the SLI-4 HBA device it attached to.
6887 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
6889 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6891 free_percpu(phba->sli4_hba.eq_info);
6893 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
6894 kfree(phba->sli4_hba.cpu_map);
6895 phba->sli4_hba.num_possible_cpu = 0;
6896 phba->sli4_hba.num_present_cpu = 0;
6897 phba->sli4_hba.curr_disp_cpu = 0;
6899 /* Free memory allocated for fast-path work queue handles */
6900 kfree(phba->sli4_hba.hba_eq_hdl);
6902 /* Free the allocated rpi headers. */
6903 lpfc_sli4_remove_rpi_hdrs(phba);
6904 lpfc_sli4_remove_rpis(phba);
6906 /* Free eligible FCF index bmask */
6907 kfree(phba->fcf.fcf_rr_bmask);
6909 /* Free the ELS sgl list */
6910 lpfc_free_active_sgl(phba);
6911 lpfc_free_els_sgl_list(phba);
6912 lpfc_free_nvmet_sgl_list(phba);
6914 /* Free the completion queue EQ event pool */
6915 lpfc_sli4_cq_event_release_all(phba);
6916 lpfc_sli4_cq_event_pool_destroy(phba);
6918 /* Release resource identifiers. */
6919 lpfc_sli4_dealloc_resource_identifiers(phba);
6921 /* Free the bsmbx region. */
6922 lpfc_destroy_bootstrap_mbox(phba);
6924 /* Free the SLI Layer memory with SLI4 HBAs */
6925 lpfc_mem_free_all(phba);
6927 /* Free the current connect table */
6928 list_for_each_entry_safe(conn_entry, next_conn_entry,
6929 &phba->fcf_conn_rec_list, list) {
6930 list_del_init(&conn_entry->list);
6938 * lpfc_init_api_table_setup - Set up init api function jump table
6939 * @phba: The hba struct for which this call is being executed.
6940 * @dev_grp: The HBA PCI-Device group number.
6942 * This routine sets up the device INIT interface API function jump table
6945 * Returns: 0 - success, -ENODEV - failure.
6948 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6950 phba->lpfc_hba_init_link = lpfc_hba_init_link;
6951 phba->lpfc_hba_down_link = lpfc_hba_down_link;
6952 phba->lpfc_selective_reset = lpfc_selective_reset;
6954 case LPFC_PCI_DEV_LP:
6955 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
6956 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
6957 phba->lpfc_stop_port = lpfc_stop_port_s3;
6959 case LPFC_PCI_DEV_OC:
6960 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
6961 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
6962 phba->lpfc_stop_port = lpfc_stop_port_s4;
6965 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6966 "1431 Invalid HBA PCI-device group: 0x%x\n",
6975 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
6976 * @phba: pointer to lpfc hba data structure.
6978 * This routine is invoked to set up the driver internal resources after the
6979 * device specific resource setup to support the HBA device it attached to.
6983 * other values - error
6986 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
6990 /* Startup the kernel thread for this host adapter. */
6991 phba->worker_thread = kthread_run(lpfc_do_work, phba,
6992 "lpfc_worker_%d", phba->brd_no);
6993 if (IS_ERR(phba->worker_thread)) {
6994 error = PTR_ERR(phba->worker_thread);
6998 /* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */
6999 if (phba->sli_rev == LPFC_SLI_REV4)
7000 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7008 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
7009 * @phba: pointer to lpfc hba data structure.
7011 * This routine is invoked to unset the driver internal resources set up after
7012 * the device specific resource setup for supporting the HBA device it
7016 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
7019 flush_workqueue(phba->wq);
7020 destroy_workqueue(phba->wq);
7024 /* Stop kernel worker thread */
7025 if (phba->worker_thread)
7026 kthread_stop(phba->worker_thread);
7030 * lpfc_free_iocb_list - Free iocb list.
7031 * @phba: pointer to lpfc hba data structure.
7033 * This routine is invoked to free the driver's IOCB list and memory.
7036 lpfc_free_iocb_list(struct lpfc_hba *phba)
7038 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
7040 spin_lock_irq(&phba->hbalock);
7041 list_for_each_entry_safe(iocbq_entry, iocbq_next,
7042 &phba->lpfc_iocb_list, list) {
7043 list_del(&iocbq_entry->list);
7045 phba->total_iocbq_bufs--;
7047 spin_unlock_irq(&phba->hbalock);
7053 * lpfc_init_iocb_list - Allocate and initialize iocb list.
7054 * @phba: pointer to lpfc hba data structure.
7056 * This routine is invoked to allocate and initizlize the driver's IOCB
7057 * list and set up the IOCB tag array accordingly.
7061 * other values - error
7064 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
7066 struct lpfc_iocbq *iocbq_entry = NULL;
7070 /* Initialize and populate the iocb list per host. */
7071 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
7072 for (i = 0; i < iocb_count; i++) {
7073 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
7074 if (iocbq_entry == NULL) {
7075 printk(KERN_ERR "%s: only allocated %d iocbs of "
7076 "expected %d count. Unloading driver.\n",
7077 __func__, i, LPFC_IOCB_LIST_CNT);
7078 goto out_free_iocbq;
7081 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
7084 printk(KERN_ERR "%s: failed to allocate IOTAG. "
7085 "Unloading driver.\n", __func__);
7086 goto out_free_iocbq;
7088 iocbq_entry->sli4_lxritag = NO_XRI;
7089 iocbq_entry->sli4_xritag = NO_XRI;
7091 spin_lock_irq(&phba->hbalock);
7092 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
7093 phba->total_iocbq_bufs++;
7094 spin_unlock_irq(&phba->hbalock);
7100 lpfc_free_iocb_list(phba);
7106 * lpfc_free_sgl_list - Free a given sgl list.
7107 * @phba: pointer to lpfc hba data structure.
7108 * @sglq_list: pointer to the head of sgl list.
7110 * This routine is invoked to free a give sgl list and memory.
7113 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
7115 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7117 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
7118 list_del(&sglq_entry->list);
7119 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
7125 * lpfc_free_els_sgl_list - Free els sgl list.
7126 * @phba: pointer to lpfc hba data structure.
7128 * This routine is invoked to free the driver's els sgl list and memory.
7131 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
7133 LIST_HEAD(sglq_list);
7135 /* Retrieve all els sgls from driver list */
7136 spin_lock_irq(&phba->hbalock);
7137 spin_lock(&phba->sli4_hba.sgl_list_lock);
7138 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
7139 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7140 spin_unlock_irq(&phba->hbalock);
7142 /* Now free the sgl list */
7143 lpfc_free_sgl_list(phba, &sglq_list);
7147 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
7148 * @phba: pointer to lpfc hba data structure.
7150 * This routine is invoked to free the driver's nvmet sgl list and memory.
7153 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
7155 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7156 LIST_HEAD(sglq_list);
7158 /* Retrieve all nvmet sgls from driver list */
7159 spin_lock_irq(&phba->hbalock);
7160 spin_lock(&phba->sli4_hba.sgl_list_lock);
7161 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
7162 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7163 spin_unlock_irq(&phba->hbalock);
7165 /* Now free the sgl list */
7166 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
7167 list_del(&sglq_entry->list);
7168 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
7172 /* Update the nvmet_xri_cnt to reflect no current sgls.
7173 * The next initialization cycle sets the count and allocates
7174 * the sgls over again.
7176 phba->sli4_hba.nvmet_xri_cnt = 0;
7180 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
7181 * @phba: pointer to lpfc hba data structure.
7183 * This routine is invoked to allocate the driver's active sgl memory.
7184 * This array will hold the sglq_entry's for active IOs.
7187 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
7190 size = sizeof(struct lpfc_sglq *);
7191 size *= phba->sli4_hba.max_cfg_param.max_xri;
7193 phba->sli4_hba.lpfc_sglq_active_list =
7194 kzalloc(size, GFP_KERNEL);
7195 if (!phba->sli4_hba.lpfc_sglq_active_list)
7201 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
7202 * @phba: pointer to lpfc hba data structure.
7204 * This routine is invoked to walk through the array of active sglq entries
7205 * and free all of the resources.
7206 * This is just a place holder for now.
7209 lpfc_free_active_sgl(struct lpfc_hba *phba)
7211 kfree(phba->sli4_hba.lpfc_sglq_active_list);
7215 * lpfc_init_sgl_list - Allocate and initialize sgl list.
7216 * @phba: pointer to lpfc hba data structure.
7218 * This routine is invoked to allocate and initizlize the driver's sgl
7219 * list and set up the sgl xritag tag array accordingly.
7223 lpfc_init_sgl_list(struct lpfc_hba *phba)
7225 /* Initialize and populate the sglq list per host/VF. */
7226 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
7227 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7228 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
7229 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7231 /* els xri-sgl book keeping */
7232 phba->sli4_hba.els_xri_cnt = 0;
7234 /* nvme xri-buffer book keeping */
7235 phba->sli4_hba.io_xri_cnt = 0;
7239 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
7240 * @phba: pointer to lpfc hba data structure.
7242 * This routine is invoked to post rpi header templates to the
7243 * port for those SLI4 ports that do not support extents. This routine
7244 * posts a PAGE_SIZE memory region to the port to hold up to
7245 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
7246 * and should be called only when interrupts are disabled.
7250 * -ERROR - otherwise.
7253 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
7256 struct lpfc_rpi_hdr *rpi_hdr;
7258 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
7259 if (!phba->sli4_hba.rpi_hdrs_in_use)
7261 if (phba->sli4_hba.extents_in_use)
7264 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
7266 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7267 "0391 Error during rpi post operation\n");
7268 lpfc_sli4_remove_rpis(phba);
7276 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
7277 * @phba: pointer to lpfc hba data structure.
7279 * This routine is invoked to allocate a single 4KB memory region to
7280 * support rpis and stores them in the phba. This single region
7281 * provides support for up to 64 rpis. The region is used globally
7285 * A valid rpi hdr on success.
7286 * A NULL pointer on any failure.
7288 struct lpfc_rpi_hdr *
7289 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
7291 uint16_t rpi_limit, curr_rpi_range;
7292 struct lpfc_dmabuf *dmabuf;
7293 struct lpfc_rpi_hdr *rpi_hdr;
7296 * If the SLI4 port supports extents, posting the rpi header isn't
7297 * required. Set the expected maximum count and let the actual value
7298 * get set when extents are fully allocated.
7300 if (!phba->sli4_hba.rpi_hdrs_in_use)
7302 if (phba->sli4_hba.extents_in_use)
7305 /* The limit on the logical index is just the max_rpi count. */
7306 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
7308 spin_lock_irq(&phba->hbalock);
7310 * Establish the starting RPI in this header block. The starting
7311 * rpi is normalized to a zero base because the physical rpi is
7314 curr_rpi_range = phba->sli4_hba.next_rpi;
7315 spin_unlock_irq(&phba->hbalock);
7317 /* Reached full RPI range */
7318 if (curr_rpi_range == rpi_limit)
7322 * First allocate the protocol header region for the port. The
7323 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
7325 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7329 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
7330 LPFC_HDR_TEMPLATE_SIZE,
7331 &dmabuf->phys, GFP_KERNEL);
7332 if (!dmabuf->virt) {
7334 goto err_free_dmabuf;
7337 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
7339 goto err_free_coherent;
7342 /* Save the rpi header data for cleanup later. */
7343 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
7345 goto err_free_coherent;
7347 rpi_hdr->dmabuf = dmabuf;
7348 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
7349 rpi_hdr->page_count = 1;
7350 spin_lock_irq(&phba->hbalock);
7352 /* The rpi_hdr stores the logical index only. */
7353 rpi_hdr->start_rpi = curr_rpi_range;
7354 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
7355 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
7357 spin_unlock_irq(&phba->hbalock);
7361 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
7362 dmabuf->virt, dmabuf->phys);
7369 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
7370 * @phba: pointer to lpfc hba data structure.
7372 * This routine is invoked to remove all memory resources allocated
7373 * to support rpis for SLI4 ports not supporting extents. This routine
7374 * presumes the caller has released all rpis consumed by fabric or port
7375 * logins and is prepared to have the header pages removed.
7378 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
7380 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
7382 if (!phba->sli4_hba.rpi_hdrs_in_use)
7385 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
7386 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
7387 list_del(&rpi_hdr->list);
7388 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
7389 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
7390 kfree(rpi_hdr->dmabuf);
7394 /* There are no rpis available to the port now. */
7395 phba->sli4_hba.next_rpi = 0;
7399 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
7400 * @pdev: pointer to pci device data structure.
7402 * This routine is invoked to allocate the driver hba data structure for an
7403 * HBA device. If the allocation is successful, the phba reference to the
7404 * PCI device data structure is set.
7407 * pointer to @phba - successful
7410 static struct lpfc_hba *
7411 lpfc_hba_alloc(struct pci_dev *pdev)
7413 struct lpfc_hba *phba;
7415 /* Allocate memory for HBA structure */
7416 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
7418 dev_err(&pdev->dev, "failed to allocate hba struct\n");
7422 /* Set reference to PCI device in HBA structure */
7423 phba->pcidev = pdev;
7425 /* Assign an unused board number */
7426 phba->brd_no = lpfc_get_instance();
7427 if (phba->brd_no < 0) {
7431 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
7433 spin_lock_init(&phba->ct_ev_lock);
7434 INIT_LIST_HEAD(&phba->ct_ev_waiters);
7440 * lpfc_hba_free - Free driver hba data structure with a device.
7441 * @phba: pointer to lpfc hba data structure.
7443 * This routine is invoked to free the driver hba data structure with an
7447 lpfc_hba_free(struct lpfc_hba *phba)
7449 if (phba->sli_rev == LPFC_SLI_REV4)
7450 kfree(phba->sli4_hba.hdwq);
7452 /* Release the driver assigned board number */
7453 idr_remove(&lpfc_hba_index, phba->brd_no);
7455 /* Free memory allocated with sli3 rings */
7456 kfree(phba->sli.sli3_ring);
7457 phba->sli.sli3_ring = NULL;
7464 * lpfc_create_shost - Create hba physical port with associated scsi host.
7465 * @phba: pointer to lpfc hba data structure.
7467 * This routine is invoked to create HBA physical port and associate a SCSI
7472 * other values - error
7475 lpfc_create_shost(struct lpfc_hba *phba)
7477 struct lpfc_vport *vport;
7478 struct Scsi_Host *shost;
7480 /* Initialize HBA FC structure */
7481 phba->fc_edtov = FF_DEF_EDTOV;
7482 phba->fc_ratov = FF_DEF_RATOV;
7483 phba->fc_altov = FF_DEF_ALTOV;
7484 phba->fc_arbtov = FF_DEF_ARBTOV;
7486 atomic_set(&phba->sdev_cnt, 0);
7487 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
7491 shost = lpfc_shost_from_vport(vport);
7492 phba->pport = vport;
7494 if (phba->nvmet_support) {
7495 /* Only 1 vport (pport) will support NVME target */
7496 if (phba->txrdy_payload_pool == NULL) {
7497 phba->txrdy_payload_pool = dma_pool_create(
7498 "txrdy_pool", &phba->pcidev->dev,
7499 TXRDY_PAYLOAD_LEN, 16, 0);
7500 if (phba->txrdy_payload_pool) {
7501 phba->targetport = NULL;
7502 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
7503 lpfc_printf_log(phba, KERN_INFO,
7504 LOG_INIT | LOG_NVME_DISC,
7505 "6076 NVME Target Found\n");
7510 lpfc_debugfs_initialize(vport);
7511 /* Put reference to SCSI host to driver's device private data */
7512 pci_set_drvdata(phba->pcidev, shost);
7515 * At this point we are fully registered with PSA. In addition,
7516 * any initial discovery should be completed.
7518 vport->load_flag |= FC_ALLOW_FDMI;
7519 if (phba->cfg_enable_SmartSAN ||
7520 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
7522 /* Setup appropriate attribute masks */
7523 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
7524 if (phba->cfg_enable_SmartSAN)
7525 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
7527 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
7533 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
7534 * @phba: pointer to lpfc hba data structure.
7536 * This routine is invoked to destroy HBA physical port and the associated
7540 lpfc_destroy_shost(struct lpfc_hba *phba)
7542 struct lpfc_vport *vport = phba->pport;
7544 /* Destroy physical port that associated with the SCSI host */
7545 destroy_port(vport);
7551 * lpfc_setup_bg - Setup Block guard structures and debug areas.
7552 * @phba: pointer to lpfc hba data structure.
7553 * @shost: the shost to be used to detect Block guard settings.
7555 * This routine sets up the local Block guard protocol settings for @shost.
7556 * This routine also allocates memory for debugging bg buffers.
7559 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
7565 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7566 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7567 "1478 Registering BlockGuard with the "
7570 old_mask = phba->cfg_prot_mask;
7571 old_guard = phba->cfg_prot_guard;
7573 /* Only allow supported values */
7574 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
7575 SHOST_DIX_TYPE0_PROTECTION |
7576 SHOST_DIX_TYPE1_PROTECTION);
7577 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
7578 SHOST_DIX_GUARD_CRC);
7580 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
7581 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
7582 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
7584 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7585 if ((old_mask != phba->cfg_prot_mask) ||
7586 (old_guard != phba->cfg_prot_guard))
7587 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7588 "1475 Registering BlockGuard with the "
7589 "SCSI layer: mask %d guard %d\n",
7590 phba->cfg_prot_mask,
7591 phba->cfg_prot_guard);
7593 scsi_host_set_prot(shost, phba->cfg_prot_mask);
7594 scsi_host_set_guard(shost, phba->cfg_prot_guard);
7596 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7597 "1479 Not Registering BlockGuard with the SCSI "
7598 "layer, Bad protection parameters: %d %d\n",
7599 old_mask, old_guard);
7602 if (!_dump_buf_data) {
7604 spin_lock_init(&_dump_buf_lock);
7606 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
7607 if (_dump_buf_data) {
7608 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7609 "9043 BLKGRD: allocated %d pages for "
7610 "_dump_buf_data at 0x%p\n",
7611 (1 << pagecnt), _dump_buf_data);
7612 _dump_buf_data_order = pagecnt;
7613 memset(_dump_buf_data, 0,
7614 ((1 << PAGE_SHIFT) << pagecnt));
7619 if (!_dump_buf_data_order)
7620 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7621 "9044 BLKGRD: ERROR unable to allocate "
7622 "memory for hexdump\n");
7624 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7625 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
7626 "\n", _dump_buf_data);
7627 if (!_dump_buf_dif) {
7630 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
7631 if (_dump_buf_dif) {
7632 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7633 "9046 BLKGRD: allocated %d pages for "
7634 "_dump_buf_dif at 0x%p\n",
7635 (1 << pagecnt), _dump_buf_dif);
7636 _dump_buf_dif_order = pagecnt;
7637 memset(_dump_buf_dif, 0,
7638 ((1 << PAGE_SHIFT) << pagecnt));
7643 if (!_dump_buf_dif_order)
7644 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7645 "9047 BLKGRD: ERROR unable to allocate "
7646 "memory for hexdump\n");
7648 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7649 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
7654 * lpfc_post_init_setup - Perform necessary device post initialization setup.
7655 * @phba: pointer to lpfc hba data structure.
7657 * This routine is invoked to perform all the necessary post initialization
7658 * setup for the device.
7661 lpfc_post_init_setup(struct lpfc_hba *phba)
7663 struct Scsi_Host *shost;
7664 struct lpfc_adapter_event_header adapter_event;
7666 /* Get the default values for Model Name and Description */
7667 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
7670 * hba setup may have changed the hba_queue_depth so we need to
7671 * adjust the value of can_queue.
7673 shost = pci_get_drvdata(phba->pcidev);
7674 shost->can_queue = phba->cfg_hba_queue_depth - 10;
7676 lpfc_host_attrib_init(shost);
7678 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7679 spin_lock_irq(shost->host_lock);
7680 lpfc_poll_start_timer(phba);
7681 spin_unlock_irq(shost->host_lock);
7684 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7685 "0428 Perform SCSI scan\n");
7686 /* Send board arrival event to upper layer */
7687 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
7688 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
7689 fc_host_post_vendor_event(shost, fc_get_event_number(),
7690 sizeof(adapter_event),
7691 (char *) &adapter_event,
7697 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
7698 * @phba: pointer to lpfc hba data structure.
7700 * This routine is invoked to set up the PCI device memory space for device
7701 * with SLI-3 interface spec.
7705 * other values - error
7708 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7710 struct pci_dev *pdev = phba->pcidev;
7711 unsigned long bar0map_len, bar2map_len;
7719 /* Set the device DMA mask size */
7720 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7722 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7727 /* Get the bus address of Bar0 and Bar2 and the number of bytes
7728 * required by each mapping.
7730 phba->pci_bar0_map = pci_resource_start(pdev, 0);
7731 bar0map_len = pci_resource_len(pdev, 0);
7733 phba->pci_bar2_map = pci_resource_start(pdev, 2);
7734 bar2map_len = pci_resource_len(pdev, 2);
7736 /* Map HBA SLIM to a kernel virtual address. */
7737 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
7738 if (!phba->slim_memmap_p) {
7739 dev_printk(KERN_ERR, &pdev->dev,
7740 "ioremap failed for SLIM memory.\n");
7744 /* Map HBA Control Registers to a kernel virtual address. */
7745 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
7746 if (!phba->ctrl_regs_memmap_p) {
7747 dev_printk(KERN_ERR, &pdev->dev,
7748 "ioremap failed for HBA control registers.\n");
7749 goto out_iounmap_slim;
7752 /* Allocate memory for SLI-2 structures */
7753 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7754 &phba->slim2p.phys, GFP_KERNEL);
7755 if (!phba->slim2p.virt)
7758 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
7759 phba->mbox_ext = (phba->slim2p.virt +
7760 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
7761 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
7762 phba->IOCBs = (phba->slim2p.virt +
7763 offsetof(struct lpfc_sli2_slim, IOCBs));
7765 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
7766 lpfc_sli_hbq_size(),
7767 &phba->hbqslimp.phys,
7769 if (!phba->hbqslimp.virt)
7772 hbq_count = lpfc_sli_hbq_count();
7773 ptr = phba->hbqslimp.virt;
7774 for (i = 0; i < hbq_count; ++i) {
7775 phba->hbqs[i].hbq_virt = ptr;
7776 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
7777 ptr += (lpfc_hbq_defs[i]->entry_count *
7778 sizeof(struct lpfc_hbq_entry));
7780 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
7781 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
7783 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
7785 phba->MBslimaddr = phba->slim_memmap_p;
7786 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
7787 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
7788 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
7789 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
7794 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7795 phba->slim2p.virt, phba->slim2p.phys);
7797 iounmap(phba->ctrl_regs_memmap_p);
7799 iounmap(phba->slim_memmap_p);
7805 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
7806 * @phba: pointer to lpfc hba data structure.
7808 * This routine is invoked to unset the PCI device memory space for device
7809 * with SLI-3 interface spec.
7812 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
7814 struct pci_dev *pdev;
7816 /* Obtain PCI device reference */
7820 pdev = phba->pcidev;
7822 /* Free coherent DMA memory allocated */
7823 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7824 phba->hbqslimp.virt, phba->hbqslimp.phys);
7825 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7826 phba->slim2p.virt, phba->slim2p.phys);
7828 /* I/O memory unmap */
7829 iounmap(phba->ctrl_regs_memmap_p);
7830 iounmap(phba->slim_memmap_p);
7836 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
7837 * @phba: pointer to lpfc hba data structure.
7839 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
7840 * done and check status.
7842 * Return 0 if successful, otherwise -ENODEV.
7845 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
7847 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
7848 struct lpfc_register reg_data;
7849 int i, port_error = 0;
7852 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
7853 memset(®_data, 0, sizeof(reg_data));
7854 if (!phba->sli4_hba.PSMPHRregaddr)
7857 /* Wait up to 30 seconds for the SLI Port POST done and ready */
7858 for (i = 0; i < 3000; i++) {
7859 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
7860 &portsmphr_reg.word0) ||
7861 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
7862 /* Port has a fatal POST error, break out */
7863 port_error = -ENODEV;
7866 if (LPFC_POST_STAGE_PORT_READY ==
7867 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
7873 * If there was a port error during POST, then don't proceed with
7874 * other register reads as the data may not be valid. Just exit.
7877 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7878 "1408 Port Failed POST - portsmphr=0x%x, "
7879 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
7880 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
7881 portsmphr_reg.word0,
7882 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
7883 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
7884 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
7885 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
7886 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
7887 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
7888 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
7889 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
7891 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7892 "2534 Device Info: SLIFamily=0x%x, "
7893 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
7894 "SLIHint_2=0x%x, FT=0x%x\n",
7895 bf_get(lpfc_sli_intf_sli_family,
7896 &phba->sli4_hba.sli_intf),
7897 bf_get(lpfc_sli_intf_slirev,
7898 &phba->sli4_hba.sli_intf),
7899 bf_get(lpfc_sli_intf_if_type,
7900 &phba->sli4_hba.sli_intf),
7901 bf_get(lpfc_sli_intf_sli_hint1,
7902 &phba->sli4_hba.sli_intf),
7903 bf_get(lpfc_sli_intf_sli_hint2,
7904 &phba->sli4_hba.sli_intf),
7905 bf_get(lpfc_sli_intf_func_type,
7906 &phba->sli4_hba.sli_intf));
7908 * Check for other Port errors during the initialization
7909 * process. Fail the load if the port did not come up
7912 if_type = bf_get(lpfc_sli_intf_if_type,
7913 &phba->sli4_hba.sli_intf);
7915 case LPFC_SLI_INTF_IF_TYPE_0:
7916 phba->sli4_hba.ue_mask_lo =
7917 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
7918 phba->sli4_hba.ue_mask_hi =
7919 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
7921 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
7923 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
7924 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
7925 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
7926 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7927 "1422 Unrecoverable Error "
7928 "Detected during POST "
7929 "uerr_lo_reg=0x%x, "
7930 "uerr_hi_reg=0x%x, "
7931 "ue_mask_lo_reg=0x%x, "
7932 "ue_mask_hi_reg=0x%x\n",
7935 phba->sli4_hba.ue_mask_lo,
7936 phba->sli4_hba.ue_mask_hi);
7937 port_error = -ENODEV;
7940 case LPFC_SLI_INTF_IF_TYPE_2:
7941 case LPFC_SLI_INTF_IF_TYPE_6:
7942 /* Final checks. The port status should be clean. */
7943 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7945 (bf_get(lpfc_sliport_status_err, ®_data) &&
7946 !bf_get(lpfc_sliport_status_rn, ®_data))) {
7947 phba->work_status[0] =
7948 readl(phba->sli4_hba.u.if_type2.
7950 phba->work_status[1] =
7951 readl(phba->sli4_hba.u.if_type2.
7953 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7954 "2888 Unrecoverable port error "
7955 "following POST: port status reg "
7956 "0x%x, port_smphr reg 0x%x, "
7957 "error 1=0x%x, error 2=0x%x\n",
7959 portsmphr_reg.word0,
7960 phba->work_status[0],
7961 phba->work_status[1]);
7962 port_error = -ENODEV;
7965 case LPFC_SLI_INTF_IF_TYPE_1:
7974 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
7975 * @phba: pointer to lpfc hba data structure.
7976 * @if_type: The SLI4 interface type getting configured.
7978 * This routine is invoked to set up SLI4 BAR0 PCI config space register
7982 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
7985 case LPFC_SLI_INTF_IF_TYPE_0:
7986 phba->sli4_hba.u.if_type0.UERRLOregaddr =
7987 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
7988 phba->sli4_hba.u.if_type0.UERRHIregaddr =
7989 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
7990 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
7991 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
7992 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
7993 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
7994 phba->sli4_hba.SLIINTFregaddr =
7995 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
7997 case LPFC_SLI_INTF_IF_TYPE_2:
7998 phba->sli4_hba.u.if_type2.EQDregaddr =
7999 phba->sli4_hba.conf_regs_memmap_p +
8000 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8001 phba->sli4_hba.u.if_type2.ERR1regaddr =
8002 phba->sli4_hba.conf_regs_memmap_p +
8003 LPFC_CTL_PORT_ER1_OFFSET;
8004 phba->sli4_hba.u.if_type2.ERR2regaddr =
8005 phba->sli4_hba.conf_regs_memmap_p +
8006 LPFC_CTL_PORT_ER2_OFFSET;
8007 phba->sli4_hba.u.if_type2.CTRLregaddr =
8008 phba->sli4_hba.conf_regs_memmap_p +
8009 LPFC_CTL_PORT_CTL_OFFSET;
8010 phba->sli4_hba.u.if_type2.STATUSregaddr =
8011 phba->sli4_hba.conf_regs_memmap_p +
8012 LPFC_CTL_PORT_STA_OFFSET;
8013 phba->sli4_hba.SLIINTFregaddr =
8014 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8015 phba->sli4_hba.PSMPHRregaddr =
8016 phba->sli4_hba.conf_regs_memmap_p +
8017 LPFC_CTL_PORT_SEM_OFFSET;
8018 phba->sli4_hba.RQDBregaddr =
8019 phba->sli4_hba.conf_regs_memmap_p +
8020 LPFC_ULP0_RQ_DOORBELL;
8021 phba->sli4_hba.WQDBregaddr =
8022 phba->sli4_hba.conf_regs_memmap_p +
8023 LPFC_ULP0_WQ_DOORBELL;
8024 phba->sli4_hba.CQDBregaddr =
8025 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
8026 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8027 phba->sli4_hba.MQDBregaddr =
8028 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
8029 phba->sli4_hba.BMBXregaddr =
8030 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8032 case LPFC_SLI_INTF_IF_TYPE_6:
8033 phba->sli4_hba.u.if_type2.EQDregaddr =
8034 phba->sli4_hba.conf_regs_memmap_p +
8035 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8036 phba->sli4_hba.u.if_type2.ERR1regaddr =
8037 phba->sli4_hba.conf_regs_memmap_p +
8038 LPFC_CTL_PORT_ER1_OFFSET;
8039 phba->sli4_hba.u.if_type2.ERR2regaddr =
8040 phba->sli4_hba.conf_regs_memmap_p +
8041 LPFC_CTL_PORT_ER2_OFFSET;
8042 phba->sli4_hba.u.if_type2.CTRLregaddr =
8043 phba->sli4_hba.conf_regs_memmap_p +
8044 LPFC_CTL_PORT_CTL_OFFSET;
8045 phba->sli4_hba.u.if_type2.STATUSregaddr =
8046 phba->sli4_hba.conf_regs_memmap_p +
8047 LPFC_CTL_PORT_STA_OFFSET;
8048 phba->sli4_hba.PSMPHRregaddr =
8049 phba->sli4_hba.conf_regs_memmap_p +
8050 LPFC_CTL_PORT_SEM_OFFSET;
8051 phba->sli4_hba.BMBXregaddr =
8052 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8054 case LPFC_SLI_INTF_IF_TYPE_1:
8056 dev_printk(KERN_ERR, &phba->pcidev->dev,
8057 "FATAL - unsupported SLI4 interface type - %d\n",
8064 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
8065 * @phba: pointer to lpfc hba data structure.
8067 * This routine is invoked to set up SLI4 BAR1 register memory map.
8070 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8073 case LPFC_SLI_INTF_IF_TYPE_0:
8074 phba->sli4_hba.PSMPHRregaddr =
8075 phba->sli4_hba.ctrl_regs_memmap_p +
8076 LPFC_SLIPORT_IF0_SMPHR;
8077 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8079 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8081 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8084 case LPFC_SLI_INTF_IF_TYPE_6:
8085 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8086 LPFC_IF6_RQ_DOORBELL;
8087 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8088 LPFC_IF6_WQ_DOORBELL;
8089 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8090 LPFC_IF6_CQ_DOORBELL;
8091 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8092 LPFC_IF6_EQ_DOORBELL;
8093 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8094 LPFC_IF6_MQ_DOORBELL;
8096 case LPFC_SLI_INTF_IF_TYPE_2:
8097 case LPFC_SLI_INTF_IF_TYPE_1:
8099 dev_err(&phba->pcidev->dev,
8100 "FATAL - unsupported SLI4 interface type - %d\n",
8107 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
8108 * @phba: pointer to lpfc hba data structure.
8109 * @vf: virtual function number
8111 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
8112 * based on the given viftual function number, @vf.
8114 * Return 0 if successful, otherwise -ENODEV.
8117 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
8119 if (vf > LPFC_VIR_FUNC_MAX)
8122 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8123 vf * LPFC_VFR_PAGE_SIZE +
8124 LPFC_ULP0_RQ_DOORBELL);
8125 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8126 vf * LPFC_VFR_PAGE_SIZE +
8127 LPFC_ULP0_WQ_DOORBELL);
8128 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8129 vf * LPFC_VFR_PAGE_SIZE +
8130 LPFC_EQCQ_DOORBELL);
8131 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8132 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8133 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
8134 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8135 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
8140 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
8141 * @phba: pointer to lpfc hba data structure.
8143 * This routine is invoked to create the bootstrap mailbox
8144 * region consistent with the SLI-4 interface spec. This
8145 * routine allocates all memory necessary to communicate
8146 * mailbox commands to the port and sets up all alignment
8147 * needs. No locks are expected to be held when calling
8152 * -ENOMEM - could not allocated memory.
8155 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
8158 struct lpfc_dmabuf *dmabuf;
8159 struct dma_address *dma_address;
8163 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8168 * The bootstrap mailbox region is comprised of 2 parts
8169 * plus an alignment restriction of 16 bytes.
8171 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
8172 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
8173 &dmabuf->phys, GFP_KERNEL);
8174 if (!dmabuf->virt) {
8180 * Initialize the bootstrap mailbox pointers now so that the register
8181 * operations are simple later. The mailbox dma address is required
8182 * to be 16-byte aligned. Also align the virtual memory as each
8183 * maibox is copied into the bmbx mailbox region before issuing the
8184 * command to the port.
8186 phba->sli4_hba.bmbx.dmabuf = dmabuf;
8187 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
8189 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
8190 LPFC_ALIGN_16_BYTE);
8191 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
8192 LPFC_ALIGN_16_BYTE);
8195 * Set the high and low physical addresses now. The SLI4 alignment
8196 * requirement is 16 bytes and the mailbox is posted to the port
8197 * as two 30-bit addresses. The other data is a bit marking whether
8198 * the 30-bit address is the high or low address.
8199 * Upcast bmbx aphys to 64bits so shift instruction compiles
8200 * clean on 32 bit machines.
8202 dma_address = &phba->sli4_hba.bmbx.dma_address;
8203 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
8204 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
8205 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
8206 LPFC_BMBX_BIT1_ADDR_HI);
8208 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
8209 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
8210 LPFC_BMBX_BIT1_ADDR_LO);
8215 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
8216 * @phba: pointer to lpfc hba data structure.
8218 * This routine is invoked to teardown the bootstrap mailbox
8219 * region and release all host resources. This routine requires
8220 * the caller to ensure all mailbox commands recovered, no
8221 * additional mailbox comands are sent, and interrupts are disabled
8222 * before calling this routine.
8226 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
8228 dma_free_coherent(&phba->pcidev->dev,
8229 phba->sli4_hba.bmbx.bmbx_size,
8230 phba->sli4_hba.bmbx.dmabuf->virt,
8231 phba->sli4_hba.bmbx.dmabuf->phys);
8233 kfree(phba->sli4_hba.bmbx.dmabuf);
8234 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
8238 * lpfc_sli4_read_config - Get the config parameters.
8239 * @phba: pointer to lpfc hba data structure.
8241 * This routine is invoked to read the configuration parameters from the HBA.
8242 * The configuration parameters are used to set the base and maximum values
8243 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
8244 * allocation for the port.
8248 * -ENOMEM - No available memory
8249 * -EIO - The mailbox failed to complete successfully.
8252 lpfc_sli4_read_config(struct lpfc_hba *phba)
8255 struct lpfc_mbx_read_config *rd_config;
8256 union lpfc_sli4_cfg_shdr *shdr;
8257 uint32_t shdr_status, shdr_add_status;
8258 struct lpfc_mbx_get_func_cfg *get_func_cfg;
8259 struct lpfc_rsrc_desc_fcfcoe *desc;
8261 uint16_t forced_link_speed;
8262 uint32_t if_type, qmin;
8263 int length, i, rc = 0, rc2;
8265 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8267 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8268 "2011 Unable to allocate memory for issuing "
8269 "SLI_CONFIG_SPECIAL mailbox command\n");
8273 lpfc_read_config(phba, pmb);
8275 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8276 if (rc != MBX_SUCCESS) {
8277 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8278 "2012 Mailbox failed , mbxCmd x%x "
8279 "READ_CONFIG, mbxStatus x%x\n",
8280 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8281 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8284 rd_config = &pmb->u.mqe.un.rd_config;
8285 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
8286 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
8287 phba->sli4_hba.lnk_info.lnk_tp =
8288 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
8289 phba->sli4_hba.lnk_info.lnk_no =
8290 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
8291 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8292 "3081 lnk_type:%d, lnk_numb:%d\n",
8293 phba->sli4_hba.lnk_info.lnk_tp,
8294 phba->sli4_hba.lnk_info.lnk_no);
8296 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8297 "3082 Mailbox (x%x) returned ldv:x0\n",
8298 bf_get(lpfc_mqe_command, &pmb->u.mqe));
8299 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
8300 phba->bbcredit_support = 1;
8301 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
8304 phba->sli4_hba.conf_trunk =
8305 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
8306 phba->sli4_hba.extents_in_use =
8307 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
8308 phba->sli4_hba.max_cfg_param.max_xri =
8309 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
8310 phba->sli4_hba.max_cfg_param.xri_base =
8311 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
8312 phba->sli4_hba.max_cfg_param.max_vpi =
8313 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
8314 /* Limit the max we support */
8315 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
8316 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
8317 phba->sli4_hba.max_cfg_param.vpi_base =
8318 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
8319 phba->sli4_hba.max_cfg_param.max_rpi =
8320 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
8321 phba->sli4_hba.max_cfg_param.rpi_base =
8322 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
8323 phba->sli4_hba.max_cfg_param.max_vfi =
8324 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
8325 phba->sli4_hba.max_cfg_param.vfi_base =
8326 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
8327 phba->sli4_hba.max_cfg_param.max_fcfi =
8328 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
8329 phba->sli4_hba.max_cfg_param.max_eq =
8330 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
8331 phba->sli4_hba.max_cfg_param.max_rq =
8332 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
8333 phba->sli4_hba.max_cfg_param.max_wq =
8334 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
8335 phba->sli4_hba.max_cfg_param.max_cq =
8336 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
8337 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
8338 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
8339 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
8340 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
8341 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
8342 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
8343 phba->max_vports = phba->max_vpi;
8344 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8345 "2003 cfg params Extents? %d "
8350 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
8351 phba->sli4_hba.extents_in_use,
8352 phba->sli4_hba.max_cfg_param.xri_base,
8353 phba->sli4_hba.max_cfg_param.max_xri,
8354 phba->sli4_hba.max_cfg_param.vpi_base,
8355 phba->sli4_hba.max_cfg_param.max_vpi,
8356 phba->sli4_hba.max_cfg_param.vfi_base,
8357 phba->sli4_hba.max_cfg_param.max_vfi,
8358 phba->sli4_hba.max_cfg_param.rpi_base,
8359 phba->sli4_hba.max_cfg_param.max_rpi,
8360 phba->sli4_hba.max_cfg_param.max_fcfi,
8361 phba->sli4_hba.max_cfg_param.max_eq,
8362 phba->sli4_hba.max_cfg_param.max_cq,
8363 phba->sli4_hba.max_cfg_param.max_wq,
8364 phba->sli4_hba.max_cfg_param.max_rq);
8367 * Calculate queue resources based on how
8368 * many WQ/CQ/EQs are available.
8370 qmin = phba->sli4_hba.max_cfg_param.max_wq;
8371 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
8372 qmin = phba->sli4_hba.max_cfg_param.max_cq;
8373 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
8374 qmin = phba->sli4_hba.max_cfg_param.max_eq;
8376 * Whats left after this can go toward NVME / FCP.
8377 * The minus 4 accounts for ELS, NVME LS, MBOX
8378 * plus one extra. When configured for
8379 * NVMET, FCP io channel WQs are not created.
8383 /* If NVME is configured, double the number of CQ/WQs needed */
8384 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
8385 !phba->nvmet_support)
8388 /* Check to see if there is enough for NVME */
8389 if ((phba->cfg_irq_chann > qmin) ||
8390 (phba->cfg_hdw_queue > qmin)) {
8391 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8392 "2005 Reducing Queues: "
8393 "WQ %d CQ %d EQ %d: min %d: "
8395 phba->sli4_hba.max_cfg_param.max_wq,
8396 phba->sli4_hba.max_cfg_param.max_cq,
8397 phba->sli4_hba.max_cfg_param.max_eq,
8398 qmin, phba->cfg_irq_chann,
8399 phba->cfg_hdw_queue);
8401 if (phba->cfg_irq_chann > qmin)
8402 phba->cfg_irq_chann = qmin;
8403 if (phba->cfg_hdw_queue > qmin)
8404 phba->cfg_hdw_queue = qmin;
8411 /* Update link speed if forced link speed is supported */
8412 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8413 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
8415 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
8416 if (forced_link_speed) {
8417 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
8419 switch (forced_link_speed) {
8421 phba->cfg_link_speed =
8422 LPFC_USER_LINK_SPEED_1G;
8425 phba->cfg_link_speed =
8426 LPFC_USER_LINK_SPEED_2G;
8429 phba->cfg_link_speed =
8430 LPFC_USER_LINK_SPEED_4G;
8433 phba->cfg_link_speed =
8434 LPFC_USER_LINK_SPEED_8G;
8436 case LINK_SPEED_10G:
8437 phba->cfg_link_speed =
8438 LPFC_USER_LINK_SPEED_10G;
8440 case LINK_SPEED_16G:
8441 phba->cfg_link_speed =
8442 LPFC_USER_LINK_SPEED_16G;
8444 case LINK_SPEED_32G:
8445 phba->cfg_link_speed =
8446 LPFC_USER_LINK_SPEED_32G;
8448 case LINK_SPEED_64G:
8449 phba->cfg_link_speed =
8450 LPFC_USER_LINK_SPEED_64G;
8453 phba->cfg_link_speed =
8454 LPFC_USER_LINK_SPEED_AUTO;
8457 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8458 "0047 Unrecognized link "
8461 phba->cfg_link_speed =
8462 LPFC_USER_LINK_SPEED_AUTO;
8467 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
8468 length = phba->sli4_hba.max_cfg_param.max_xri -
8469 lpfc_sli4_get_els_iocb_cnt(phba);
8470 if (phba->cfg_hba_queue_depth > length) {
8471 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8472 "3361 HBA queue depth changed from %d to %d\n",
8473 phba->cfg_hba_queue_depth, length);
8474 phba->cfg_hba_queue_depth = length;
8477 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
8478 LPFC_SLI_INTF_IF_TYPE_2)
8481 /* get the pf# and vf# for SLI4 if_type 2 port */
8482 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
8483 sizeof(struct lpfc_sli4_cfg_mhdr));
8484 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
8485 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
8486 length, LPFC_SLI4_MBX_EMBED);
8488 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8489 shdr = (union lpfc_sli4_cfg_shdr *)
8490 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
8491 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8492 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8493 if (rc2 || shdr_status || shdr_add_status) {
8494 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8495 "3026 Mailbox failed , mbxCmd x%x "
8496 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
8497 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8498 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8502 /* search for fc_fcoe resrouce descriptor */
8503 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
8505 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
8506 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
8507 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
8508 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
8509 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
8510 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
8513 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
8514 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
8515 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
8516 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
8517 phba->sli4_hba.iov.pf_number =
8518 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
8519 phba->sli4_hba.iov.vf_number =
8520 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
8525 if (i < LPFC_RSRC_DESC_MAX_NUM)
8526 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8527 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
8528 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
8529 phba->sli4_hba.iov.vf_number);
8531 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8532 "3028 GET_FUNCTION_CONFIG: failed to find "
8533 "Resource Descriptor:x%x\n",
8534 LPFC_RSRC_DESC_TYPE_FCFCOE);
8537 mempool_free(pmb, phba->mbox_mem_pool);
8542 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
8543 * @phba: pointer to lpfc hba data structure.
8545 * This routine is invoked to setup the port-side endian order when
8546 * the port if_type is 0. This routine has no function for other
8551 * -ENOMEM - No available memory
8552 * -EIO - The mailbox failed to complete successfully.
8555 lpfc_setup_endian_order(struct lpfc_hba *phba)
8557 LPFC_MBOXQ_t *mboxq;
8558 uint32_t if_type, rc = 0;
8559 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
8560 HOST_ENDIAN_HIGH_WORD1};
8562 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8564 case LPFC_SLI_INTF_IF_TYPE_0:
8565 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8568 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8569 "0492 Unable to allocate memory for "
8570 "issuing SLI_CONFIG_SPECIAL mailbox "
8576 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
8577 * two words to contain special data values and no other data.
8579 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
8580 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
8581 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8582 if (rc != MBX_SUCCESS) {
8583 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8584 "0493 SLI_CONFIG_SPECIAL mailbox "
8585 "failed with status x%x\n",
8589 mempool_free(mboxq, phba->mbox_mem_pool);
8591 case LPFC_SLI_INTF_IF_TYPE_6:
8592 case LPFC_SLI_INTF_IF_TYPE_2:
8593 case LPFC_SLI_INTF_IF_TYPE_1:
8601 * lpfc_sli4_queue_verify - Verify and update EQ counts
8602 * @phba: pointer to lpfc hba data structure.
8604 * This routine is invoked to check the user settable queue counts for EQs.
8605 * After this routine is called the counts will be set to valid values that
8606 * adhere to the constraints of the system's interrupt vectors and the port's
8611 * -ENOMEM - No available memory
8614 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
8617 * Sanity check for configured queue parameters against the run-time
8621 if (phba->nvmet_support) {
8622 if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq)
8623 phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
8624 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8625 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
8628 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8629 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
8630 phba->cfg_hdw_queue, phba->cfg_irq_chann,
8631 phba->cfg_nvmet_mrq);
8633 /* Get EQ depth from module parameter, fake the default for now */
8634 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8635 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8637 /* Get CQ depth from module parameter, fake the default for now */
8638 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8639 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8644 lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
8646 struct lpfc_queue *qdesc;
8649 cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
8650 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8651 phba->sli4_hba.cq_esize,
8652 LPFC_CQE_EXP_COUNT, cpu);
8654 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8655 "0508 Failed allocate fast-path NVME CQ (%d)\n",
8659 qdesc->qe_valid = 1;
8660 qdesc->hdwq = wqidx;
8662 phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc;
8664 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8665 LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT,
8668 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8669 "0509 Failed allocate fast-path NVME WQ (%d)\n",
8673 qdesc->hdwq = wqidx;
8674 qdesc->chann = wqidx;
8675 phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc;
8676 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8681 lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
8683 struct lpfc_queue *qdesc;
8687 cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
8688 /* Create Fast Path FCP CQs */
8689 if (phba->enab_exp_wqcq_pages)
8690 /* Increase the CQ size when WQEs contain an embedded cdb */
8691 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8692 phba->sli4_hba.cq_esize,
8693 LPFC_CQE_EXP_COUNT, cpu);
8696 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8697 phba->sli4_hba.cq_esize,
8698 phba->sli4_hba.cq_ecount, cpu);
8700 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8701 "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
8704 qdesc->qe_valid = 1;
8705 qdesc->hdwq = wqidx;
8707 phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc;
8709 /* Create Fast Path FCP WQs */
8710 if (phba->enab_exp_wqcq_pages) {
8711 /* Increase the WQ size when WQEs contain an embedded cdb */
8712 wqesize = (phba->fcp_embed_io) ?
8713 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
8714 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8716 LPFC_WQE_EXP_COUNT, cpu);
8718 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8719 phba->sli4_hba.wq_esize,
8720 phba->sli4_hba.wq_ecount, cpu);
8723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8724 "0503 Failed allocate fast-path FCP WQ (%d)\n",
8728 qdesc->hdwq = wqidx;
8729 qdesc->chann = wqidx;
8730 phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc;
8731 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8736 * lpfc_sli4_queue_create - Create all the SLI4 queues
8737 * @phba: pointer to lpfc hba data structure.
8739 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
8740 * operation. For each SLI4 queue type, the parameters such as queue entry
8741 * count (queue depth) shall be taken from the module parameter. For now,
8742 * we just use some constant number as place holder.
8746 * -ENOMEM - No availble memory
8747 * -EIO - The mailbox failed to complete successfully.
8750 lpfc_sli4_queue_create(struct lpfc_hba *phba)
8752 struct lpfc_queue *qdesc;
8753 int idx, cpu, eqcpu;
8754 struct lpfc_sli4_hdw_queue *qp;
8755 struct lpfc_vector_map_info *cpup;
8756 struct lpfc_vector_map_info *eqcpup;
8757 struct lpfc_eq_intr_info *eqi;
8760 * Create HBA Record arrays.
8761 * Both NVME and FCP will share that same vectors / EQs
8763 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
8764 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
8765 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
8766 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
8767 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
8768 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
8769 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8770 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8771 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8772 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8774 if (!phba->sli4_hba.hdwq) {
8775 phba->sli4_hba.hdwq = kcalloc(
8776 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
8778 if (!phba->sli4_hba.hdwq) {
8779 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8780 "6427 Failed allocate memory for "
8781 "fast-path Hardware Queue array\n");
8784 /* Prepare hardware queues to take IO buffers */
8785 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8786 qp = &phba->sli4_hba.hdwq[idx];
8787 spin_lock_init(&qp->io_buf_list_get_lock);
8788 spin_lock_init(&qp->io_buf_list_put_lock);
8789 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
8790 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
8791 qp->get_io_bufs = 0;
8792 qp->put_io_bufs = 0;
8793 qp->total_io_bufs = 0;
8794 spin_lock_init(&qp->abts_scsi_buf_list_lock);
8795 INIT_LIST_HEAD(&qp->lpfc_abts_scsi_buf_list);
8796 qp->abts_scsi_io_bufs = 0;
8797 spin_lock_init(&qp->abts_nvme_buf_list_lock);
8798 INIT_LIST_HEAD(&qp->lpfc_abts_nvme_buf_list);
8799 qp->abts_nvme_io_bufs = 0;
8803 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8804 if (phba->nvmet_support) {
8805 phba->sli4_hba.nvmet_cqset = kcalloc(
8806 phba->cfg_nvmet_mrq,
8807 sizeof(struct lpfc_queue *),
8809 if (!phba->sli4_hba.nvmet_cqset) {
8810 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8811 "3121 Fail allocate memory for "
8812 "fast-path CQ set array\n");
8815 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
8816 phba->cfg_nvmet_mrq,
8817 sizeof(struct lpfc_queue *),
8819 if (!phba->sli4_hba.nvmet_mrq_hdr) {
8820 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8821 "3122 Fail allocate memory for "
8822 "fast-path RQ set hdr array\n");
8825 phba->sli4_hba.nvmet_mrq_data = kcalloc(
8826 phba->cfg_nvmet_mrq,
8827 sizeof(struct lpfc_queue *),
8829 if (!phba->sli4_hba.nvmet_mrq_data) {
8830 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8831 "3124 Fail allocate memory for "
8832 "fast-path RQ set data array\n");
8838 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
8840 /* Create HBA Event Queues (EQs) */
8841 for_each_present_cpu(cpu) {
8842 /* We only want to create 1 EQ per vector, even though
8843 * multiple CPUs might be using that vector. so only
8844 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
8846 cpup = &phba->sli4_hba.cpu_map[cpu];
8847 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
8850 /* Get a ptr to the Hardware Queue associated with this CPU */
8851 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
8853 /* Allocate an EQ */
8854 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8855 phba->sli4_hba.eq_esize,
8856 phba->sli4_hba.eq_ecount, cpu);
8858 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8859 "0497 Failed allocate EQ (%d)\n",
8863 qdesc->qe_valid = 1;
8864 qdesc->hdwq = cpup->hdwq;
8865 qdesc->chann = cpu; /* First CPU this EQ is affinitised to */
8866 qdesc->last_cpu = qdesc->chann;
8868 /* Save the allocated EQ in the Hardware Queue */
8871 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
8872 list_add(&qdesc->cpu_list, &eqi->list);
8875 /* Now we need to populate the other Hardware Queues, that share
8876 * an IRQ vector, with the associated EQ ptr.
8878 for_each_present_cpu(cpu) {
8879 cpup = &phba->sli4_hba.cpu_map[cpu];
8881 /* Check for EQ already allocated in previous loop */
8882 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
8885 /* Check for multiple CPUs per hdwq */
8886 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
8890 /* We need to share an EQ for this hdwq */
8891 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
8892 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
8893 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
8896 /* Allocate SCSI SLI4 CQ/WQs */
8897 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8898 if (lpfc_alloc_fcp_wq_cq(phba, idx))
8902 /* Allocate NVME SLI4 CQ/WQs */
8903 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8904 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8905 if (lpfc_alloc_nvme_wq_cq(phba, idx))
8909 if (phba->nvmet_support) {
8910 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
8911 cpu = lpfc_find_cpu_handle(phba, idx,
8913 qdesc = lpfc_sli4_queue_alloc(
8915 LPFC_DEFAULT_PAGE_SIZE,
8916 phba->sli4_hba.cq_esize,
8917 phba->sli4_hba.cq_ecount,
8921 phba, KERN_ERR, LOG_INIT,
8922 "3142 Failed allocate NVME "
8923 "CQ Set (%d)\n", idx);
8926 qdesc->qe_valid = 1;
8929 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
8935 * Create Slow Path Completion Queues (CQs)
8938 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
8939 /* Create slow-path Mailbox Command Complete Queue */
8940 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8941 phba->sli4_hba.cq_esize,
8942 phba->sli4_hba.cq_ecount, cpu);
8944 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8945 "0500 Failed allocate slow-path mailbox CQ\n");
8948 qdesc->qe_valid = 1;
8949 phba->sli4_hba.mbx_cq = qdesc;
8951 /* Create slow-path ELS Complete Queue */
8952 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8953 phba->sli4_hba.cq_esize,
8954 phba->sli4_hba.cq_ecount, cpu);
8956 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8957 "0501 Failed allocate slow-path ELS CQ\n");
8960 qdesc->qe_valid = 1;
8962 phba->sli4_hba.els_cq = qdesc;
8966 * Create Slow Path Work Queues (WQs)
8969 /* Create Mailbox Command Queue */
8971 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8972 phba->sli4_hba.mq_esize,
8973 phba->sli4_hba.mq_ecount, cpu);
8975 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8976 "0505 Failed allocate slow-path MQ\n");
8980 phba->sli4_hba.mbx_wq = qdesc;
8983 * Create ELS Work Queues
8986 /* Create slow-path ELS Work Queue */
8987 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8988 phba->sli4_hba.wq_esize,
8989 phba->sli4_hba.wq_ecount, cpu);
8991 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8992 "0504 Failed allocate slow-path ELS WQ\n");
8996 phba->sli4_hba.els_wq = qdesc;
8997 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8999 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9000 /* Create NVME LS Complete Queue */
9001 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9002 phba->sli4_hba.cq_esize,
9003 phba->sli4_hba.cq_ecount, cpu);
9005 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9006 "6079 Failed allocate NVME LS CQ\n");
9010 qdesc->qe_valid = 1;
9011 phba->sli4_hba.nvmels_cq = qdesc;
9013 /* Create NVME LS Work Queue */
9014 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9015 phba->sli4_hba.wq_esize,
9016 phba->sli4_hba.wq_ecount, cpu);
9018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9019 "6080 Failed allocate NVME LS WQ\n");
9023 phba->sli4_hba.nvmels_wq = qdesc;
9024 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9028 * Create Receive Queue (RQ)
9031 /* Create Receive Queue for header */
9032 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9033 phba->sli4_hba.rq_esize,
9034 phba->sli4_hba.rq_ecount, cpu);
9036 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9037 "0506 Failed allocate receive HRQ\n");
9040 phba->sli4_hba.hdr_rq = qdesc;
9042 /* Create Receive Queue for data */
9043 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9044 phba->sli4_hba.rq_esize,
9045 phba->sli4_hba.rq_ecount, cpu);
9047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9048 "0507 Failed allocate receive DRQ\n");
9051 phba->sli4_hba.dat_rq = qdesc;
9053 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
9054 phba->nvmet_support) {
9055 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9056 cpu = lpfc_find_cpu_handle(phba, idx,
9058 /* Create NVMET Receive Queue for header */
9059 qdesc = lpfc_sli4_queue_alloc(phba,
9060 LPFC_DEFAULT_PAGE_SIZE,
9061 phba->sli4_hba.rq_esize,
9062 LPFC_NVMET_RQE_DEF_COUNT,
9065 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9066 "3146 Failed allocate "
9071 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
9073 /* Only needed for header of RQ pair */
9074 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
9077 if (qdesc->rqbp == NULL) {
9078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9079 "6131 Failed allocate "
9084 /* Put list in known state in case driver load fails. */
9085 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
9087 /* Create NVMET Receive Queue for data */
9088 qdesc = lpfc_sli4_queue_alloc(phba,
9089 LPFC_DEFAULT_PAGE_SIZE,
9090 phba->sli4_hba.rq_esize,
9091 LPFC_NVMET_RQE_DEF_COUNT,
9094 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9095 "3156 Failed allocate "
9100 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
9104 #if defined(BUILD_NVME)
9105 /* Clear NVME stats */
9106 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9107 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9108 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
9109 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
9114 /* Clear SCSI stats */
9115 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
9116 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9117 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
9118 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
9125 lpfc_sli4_queue_destroy(phba);
9130 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
9133 lpfc_sli4_queue_free(*qp);
9139 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
9146 for (idx = 0; idx < max; idx++)
9147 __lpfc_sli4_release_queue(&(*qs)[idx]);
9154 lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
9156 struct lpfc_sli4_hdw_queue *hdwq;
9157 struct lpfc_queue *eq;
9160 hdwq = phba->sli4_hba.hdwq;
9162 /* Loop thru all Hardware Queues */
9163 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9164 /* Free the CQ/WQ corresponding to the Hardware Queue */
9165 lpfc_sli4_queue_free(hdwq[idx].fcp_cq);
9166 lpfc_sli4_queue_free(hdwq[idx].nvme_cq);
9167 lpfc_sli4_queue_free(hdwq[idx].fcp_wq);
9168 lpfc_sli4_queue_free(hdwq[idx].nvme_wq);
9169 hdwq[idx].hba_eq = NULL;
9170 hdwq[idx].fcp_cq = NULL;
9171 hdwq[idx].nvme_cq = NULL;
9172 hdwq[idx].fcp_wq = NULL;
9173 hdwq[idx].nvme_wq = NULL;
9175 /* Loop thru all IRQ vectors */
9176 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
9177 /* Free the EQ corresponding to the IRQ vector */
9178 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
9179 lpfc_sli4_queue_free(eq);
9180 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
9185 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
9186 * @phba: pointer to lpfc hba data structure.
9188 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
9193 * -ENOMEM - No available memory
9194 * -EIO - The mailbox failed to complete successfully.
9197 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
9200 * Set FREE_INIT before beginning to free the queues.
9201 * Wait until the users of queues to acknowledge to
9202 * release queues by clearing FREE_WAIT.
9204 spin_lock_irq(&phba->hbalock);
9205 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
9206 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
9207 spin_unlock_irq(&phba->hbalock);
9209 spin_lock_irq(&phba->hbalock);
9211 spin_unlock_irq(&phba->hbalock);
9213 /* Release HBA eqs */
9214 if (phba->sli4_hba.hdwq)
9215 lpfc_sli4_release_hdwq(phba);
9217 if (phba->nvmet_support) {
9218 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
9219 phba->cfg_nvmet_mrq);
9221 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
9222 phba->cfg_nvmet_mrq);
9223 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
9224 phba->cfg_nvmet_mrq);
9227 /* Release mailbox command work queue */
9228 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
9230 /* Release ELS work queue */
9231 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
9233 /* Release ELS work queue */
9234 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
9236 /* Release unsolicited receive queue */
9237 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
9238 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
9240 /* Release ELS complete queue */
9241 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
9243 /* Release NVME LS complete queue */
9244 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
9246 /* Release mailbox command complete queue */
9247 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
9249 /* Everything on this list has been freed */
9250 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
9252 /* Done with freeing the queues */
9253 spin_lock_irq(&phba->hbalock);
9254 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
9255 spin_unlock_irq(&phba->hbalock);
9259 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
9261 struct lpfc_rqb *rqbp;
9262 struct lpfc_dmabuf *h_buf;
9263 struct rqb_dmabuf *rqb_buffer;
9266 while (!list_empty(&rqbp->rqb_buffer_list)) {
9267 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
9268 struct lpfc_dmabuf, list);
9270 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
9271 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
9272 rqbp->buffer_count--;
9278 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
9279 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
9280 int qidx, uint32_t qtype)
9282 struct lpfc_sli_ring *pring;
9285 if (!eq || !cq || !wq) {
9286 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9287 "6085 Fast-path %s (%d) not allocated\n",
9288 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
9292 /* create the Cq first */
9293 rc = lpfc_cq_create(phba, cq, eq,
9294 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
9296 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9297 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
9298 qidx, (uint32_t)rc);
9302 if (qtype != LPFC_MBOX) {
9303 /* Setup cq_map for fast lookup */
9305 *cq_map = cq->queue_id;
9307 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9308 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
9309 qidx, cq->queue_id, qidx, eq->queue_id);
9312 rc = lpfc_wq_create(phba, wq, cq, qtype);
9314 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9315 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
9316 qidx, (uint32_t)rc);
9317 /* no need to tear down cq - caller will do so */
9321 /* Bind this CQ/WQ to the NVME ring */
9323 pring->sli.sli4.wqp = (void *)wq;
9326 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9327 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
9328 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
9330 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
9332 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9333 "0539 Failed setup of slow-path MQ: "
9335 /* no need to tear down cq - caller will do so */
9339 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9340 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
9341 phba->sli4_hba.mbx_wq->queue_id,
9342 phba->sli4_hba.mbx_cq->queue_id);
9349 * lpfc_setup_cq_lookup - Setup the CQ lookup table
9350 * @phba: pointer to lpfc hba data structure.
9352 * This routine will populate the cq_lookup table by all
9353 * available CQ queue_id's.
9356 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
9358 struct lpfc_queue *eq, *childq;
9361 memset(phba->sli4_hba.cq_lookup, 0,
9362 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
9363 /* Loop thru all IRQ vectors */
9364 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9365 /* Get the EQ corresponding to the IRQ vector */
9366 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9369 /* Loop through all CQs associated with that EQ */
9370 list_for_each_entry(childq, &eq->child_list, list) {
9371 if (childq->queue_id > phba->sli4_hba.cq_max)
9373 if ((childq->subtype == LPFC_FCP) ||
9374 (childq->subtype == LPFC_NVME))
9375 phba->sli4_hba.cq_lookup[childq->queue_id] =
9382 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
9383 * @phba: pointer to lpfc hba data structure.
9385 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
9390 * -ENOMEM - No available memory
9391 * -EIO - The mailbox failed to complete successfully.
9394 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
9396 uint32_t shdr_status, shdr_add_status;
9397 union lpfc_sli4_cfg_shdr *shdr;
9398 struct lpfc_vector_map_info *cpup;
9399 struct lpfc_sli4_hdw_queue *qp;
9400 LPFC_MBOXQ_t *mboxq;
9402 uint32_t length, usdelay;
9405 /* Check for dual-ULP support */
9406 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9408 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9409 "3249 Unable to allocate memory for "
9410 "QUERY_FW_CFG mailbox command\n");
9413 length = (sizeof(struct lpfc_mbx_query_fw_config) -
9414 sizeof(struct lpfc_sli4_cfg_mhdr));
9415 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9416 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
9417 length, LPFC_SLI4_MBX_EMBED);
9419 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9421 shdr = (union lpfc_sli4_cfg_shdr *)
9422 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9423 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9424 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9425 if (shdr_status || shdr_add_status || rc) {
9426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9427 "3250 QUERY_FW_CFG mailbox failed with status "
9428 "x%x add_status x%x, mbx status x%x\n",
9429 shdr_status, shdr_add_status, rc);
9430 if (rc != MBX_TIMEOUT)
9431 mempool_free(mboxq, phba->mbox_mem_pool);
9436 phba->sli4_hba.fw_func_mode =
9437 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
9438 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
9439 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
9440 phba->sli4_hba.physical_port =
9441 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
9442 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9443 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
9444 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
9445 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
9447 if (rc != MBX_TIMEOUT)
9448 mempool_free(mboxq, phba->mbox_mem_pool);
9451 * Set up HBA Event Queues (EQs)
9453 qp = phba->sli4_hba.hdwq;
9455 /* Set up HBA event queue */
9457 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9458 "3147 Fast-path EQs not allocated\n");
9463 /* Loop thru all IRQ vectors */
9464 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9465 /* Create HBA Event Queues (EQs) in order */
9466 for_each_present_cpu(cpu) {
9467 cpup = &phba->sli4_hba.cpu_map[cpu];
9469 /* Look for the CPU thats using that vector with
9470 * LPFC_CPU_FIRST_IRQ set.
9472 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9474 if (qidx != cpup->eq)
9477 /* Create an EQ for that vector */
9478 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
9479 phba->cfg_fcp_imax);
9481 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9482 "0523 Failed setup of fast-path"
9483 " EQ (%d), rc = 0x%x\n",
9484 cpup->eq, (uint32_t)rc);
9488 /* Save the EQ for that vector in the hba_eq_hdl */
9489 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
9490 qp[cpup->hdwq].hba_eq;
9492 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9493 "2584 HBA EQ setup: queue[%d]-id=%d\n",
9495 qp[cpup->hdwq].hba_eq->queue_id);
9499 /* Loop thru all Hardware Queues */
9500 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9501 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9502 cpu = lpfc_find_cpu_handle(phba, qidx,
9504 cpup = &phba->sli4_hba.cpu_map[cpu];
9506 /* Create the CQ/WQ corresponding to the
9509 rc = lpfc_create_wq_cq(phba,
9510 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
9513 &phba->sli4_hba.hdwq[qidx].nvme_cq_map,
9516 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9517 "6123 Failed to setup fastpath "
9518 "NVME WQ/CQ (%d), rc = 0x%x\n",
9519 qidx, (uint32_t)rc);
9525 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9526 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
9527 cpup = &phba->sli4_hba.cpu_map[cpu];
9529 /* Create the CQ/WQ corresponding to the Hardware Queue */
9530 rc = lpfc_create_wq_cq(phba,
9531 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
9534 &phba->sli4_hba.hdwq[qidx].fcp_cq_map,
9537 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9538 "0535 Failed to setup fastpath "
9539 "FCP WQ/CQ (%d), rc = 0x%x\n",
9540 qidx, (uint32_t)rc);
9546 * Set up Slow Path Complete Queues (CQs)
9549 /* Set up slow-path MBOX CQ/MQ */
9551 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
9552 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9553 "0528 %s not allocated\n",
9554 phba->sli4_hba.mbx_cq ?
9555 "Mailbox WQ" : "Mailbox CQ");
9560 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9561 phba->sli4_hba.mbx_cq,
9562 phba->sli4_hba.mbx_wq,
9563 NULL, 0, LPFC_MBOX);
9565 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9566 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
9570 if (phba->nvmet_support) {
9571 if (!phba->sli4_hba.nvmet_cqset) {
9572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9573 "3165 Fast-path NVME CQ Set "
9574 "array not allocated\n");
9578 if (phba->cfg_nvmet_mrq > 1) {
9579 rc = lpfc_cq_create_set(phba,
9580 phba->sli4_hba.nvmet_cqset,
9582 LPFC_WCQ, LPFC_NVMET);
9584 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9585 "3164 Failed setup of NVME CQ "
9591 /* Set up NVMET Receive Complete Queue */
9592 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
9594 LPFC_WCQ, LPFC_NVMET);
9596 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9597 "6089 Failed setup NVMET CQ: "
9598 "rc = 0x%x\n", (uint32_t)rc);
9601 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
9603 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9604 "6090 NVMET CQ setup: cq-id=%d, "
9605 "parent eq-id=%d\n",
9606 phba->sli4_hba.nvmet_cqset[0]->queue_id,
9607 qp[0].hba_eq->queue_id);
9611 /* Set up slow-path ELS WQ/CQ */
9612 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
9613 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9614 "0530 ELS %s not allocated\n",
9615 phba->sli4_hba.els_cq ? "WQ" : "CQ");
9619 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9620 phba->sli4_hba.els_cq,
9621 phba->sli4_hba.els_wq,
9624 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9625 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
9629 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9630 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
9631 phba->sli4_hba.els_wq->queue_id,
9632 phba->sli4_hba.els_cq->queue_id);
9634 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9635 /* Set up NVME LS Complete Queue */
9636 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
9637 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9638 "6091 LS %s not allocated\n",
9639 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
9643 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9644 phba->sli4_hba.nvmels_cq,
9645 phba->sli4_hba.nvmels_wq,
9646 NULL, 0, LPFC_NVME_LS);
9648 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9649 "0526 Failed setup of NVVME LS WQ/CQ: "
9650 "rc = 0x%x\n", (uint32_t)rc);
9654 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9655 "6096 ELS WQ setup: wq-id=%d, "
9656 "parent cq-id=%d\n",
9657 phba->sli4_hba.nvmels_wq->queue_id,
9658 phba->sli4_hba.nvmels_cq->queue_id);
9662 * Create NVMET Receive Queue (RQ)
9664 if (phba->nvmet_support) {
9665 if ((!phba->sli4_hba.nvmet_cqset) ||
9666 (!phba->sli4_hba.nvmet_mrq_hdr) ||
9667 (!phba->sli4_hba.nvmet_mrq_data)) {
9668 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9669 "6130 MRQ CQ Queues not "
9674 if (phba->cfg_nvmet_mrq > 1) {
9675 rc = lpfc_mrq_create(phba,
9676 phba->sli4_hba.nvmet_mrq_hdr,
9677 phba->sli4_hba.nvmet_mrq_data,
9678 phba->sli4_hba.nvmet_cqset,
9681 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9682 "6098 Failed setup of NVMET "
9689 rc = lpfc_rq_create(phba,
9690 phba->sli4_hba.nvmet_mrq_hdr[0],
9691 phba->sli4_hba.nvmet_mrq_data[0],
9692 phba->sli4_hba.nvmet_cqset[0],
9695 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9696 "6057 Failed setup of NVMET "
9697 "Receive Queue: rc = 0x%x\n",
9703 phba, KERN_INFO, LOG_INIT,
9704 "6099 NVMET RQ setup: hdr-rq-id=%d, "
9705 "dat-rq-id=%d parent cq-id=%d\n",
9706 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
9707 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
9708 phba->sli4_hba.nvmet_cqset[0]->queue_id);
9713 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
9714 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9715 "0540 Receive Queue not allocated\n");
9720 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
9721 phba->sli4_hba.els_cq, LPFC_USOL);
9723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9724 "0541 Failed setup of Receive Queue: "
9725 "rc = 0x%x\n", (uint32_t)rc);
9729 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9730 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
9731 "parent cq-id=%d\n",
9732 phba->sli4_hba.hdr_rq->queue_id,
9733 phba->sli4_hba.dat_rq->queue_id,
9734 phba->sli4_hba.els_cq->queue_id);
9736 if (phba->cfg_fcp_imax)
9737 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
9741 for (qidx = 0; qidx < phba->cfg_irq_chann;
9742 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
9743 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
9746 if (phba->sli4_hba.cq_max) {
9747 kfree(phba->sli4_hba.cq_lookup);
9748 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
9749 sizeof(struct lpfc_queue *), GFP_KERNEL);
9750 if (!phba->sli4_hba.cq_lookup) {
9751 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9752 "0549 Failed setup of CQ Lookup table: "
9753 "size 0x%x\n", phba->sli4_hba.cq_max);
9757 lpfc_setup_cq_lookup(phba);
9762 lpfc_sli4_queue_unset(phba);
9768 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
9769 * @phba: pointer to lpfc hba data structure.
9771 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
9776 * -ENOMEM - No available memory
9777 * -EIO - The mailbox failed to complete successfully.
9780 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
9782 struct lpfc_sli4_hdw_queue *qp;
9783 struct lpfc_queue *eq;
9786 /* Unset mailbox command work queue */
9787 if (phba->sli4_hba.mbx_wq)
9788 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
9790 /* Unset NVME LS work queue */
9791 if (phba->sli4_hba.nvmels_wq)
9792 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
9794 /* Unset ELS work queue */
9795 if (phba->sli4_hba.els_wq)
9796 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
9798 /* Unset unsolicited receive queue */
9799 if (phba->sli4_hba.hdr_rq)
9800 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
9801 phba->sli4_hba.dat_rq);
9803 /* Unset mailbox command complete queue */
9804 if (phba->sli4_hba.mbx_cq)
9805 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
9807 /* Unset ELS complete queue */
9808 if (phba->sli4_hba.els_cq)
9809 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
9811 /* Unset NVME LS complete queue */
9812 if (phba->sli4_hba.nvmels_cq)
9813 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
9815 if (phba->nvmet_support) {
9816 /* Unset NVMET MRQ queue */
9817 if (phba->sli4_hba.nvmet_mrq_hdr) {
9818 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9821 phba->sli4_hba.nvmet_mrq_hdr[qidx],
9822 phba->sli4_hba.nvmet_mrq_data[qidx]);
9825 /* Unset NVMET CQ Set complete queue */
9826 if (phba->sli4_hba.nvmet_cqset) {
9827 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9829 phba, phba->sli4_hba.nvmet_cqset[qidx]);
9833 /* Unset fast-path SLI4 queues */
9834 if (phba->sli4_hba.hdwq) {
9835 /* Loop thru all Hardware Queues */
9836 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9837 /* Destroy the CQ/WQ corresponding to Hardware Queue */
9838 qp = &phba->sli4_hba.hdwq[qidx];
9839 lpfc_wq_destroy(phba, qp->fcp_wq);
9840 lpfc_wq_destroy(phba, qp->nvme_wq);
9841 lpfc_cq_destroy(phba, qp->fcp_cq);
9842 lpfc_cq_destroy(phba, qp->nvme_cq);
9844 /* Loop thru all IRQ vectors */
9845 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9846 /* Destroy the EQ corresponding to the IRQ vector */
9847 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9848 lpfc_eq_destroy(phba, eq);
9852 kfree(phba->sli4_hba.cq_lookup);
9853 phba->sli4_hba.cq_lookup = NULL;
9854 phba->sli4_hba.cq_max = 0;
9858 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
9859 * @phba: pointer to lpfc hba data structure.
9861 * This routine is invoked to allocate and set up a pool of completion queue
9862 * events. The body of the completion queue event is a completion queue entry
9863 * CQE. For now, this pool is used for the interrupt service routine to queue
9864 * the following HBA completion queue events for the worker thread to process:
9865 * - Mailbox asynchronous events
9866 * - Receive queue completion unsolicited events
9867 * Later, this can be used for all the slow-path events.
9871 * -ENOMEM - No available memory
9874 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
9876 struct lpfc_cq_event *cq_event;
9879 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
9880 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
9882 goto out_pool_create_fail;
9883 list_add_tail(&cq_event->list,
9884 &phba->sli4_hba.sp_cqe_event_pool);
9888 out_pool_create_fail:
9889 lpfc_sli4_cq_event_pool_destroy(phba);
9894 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
9895 * @phba: pointer to lpfc hba data structure.
9897 * This routine is invoked to free the pool of completion queue events at
9898 * driver unload time. Note that, it is the responsibility of the driver
9899 * cleanup routine to free all the outstanding completion-queue events
9900 * allocated from this pool back into the pool before invoking this routine
9901 * to destroy the pool.
9904 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
9906 struct lpfc_cq_event *cq_event, *next_cq_event;
9908 list_for_each_entry_safe(cq_event, next_cq_event,
9909 &phba->sli4_hba.sp_cqe_event_pool, list) {
9910 list_del(&cq_event->list);
9916 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
9917 * @phba: pointer to lpfc hba data structure.
9919 * This routine is the lock free version of the API invoked to allocate a
9920 * completion-queue event from the free pool.
9922 * Return: Pointer to the newly allocated completion-queue event if successful
9925 struct lpfc_cq_event *
9926 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9928 struct lpfc_cq_event *cq_event = NULL;
9930 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
9931 struct lpfc_cq_event, list);
9936 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
9937 * @phba: pointer to lpfc hba data structure.
9939 * This routine is the lock version of the API invoked to allocate a
9940 * completion-queue event from the free pool.
9942 * Return: Pointer to the newly allocated completion-queue event if successful
9945 struct lpfc_cq_event *
9946 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9948 struct lpfc_cq_event *cq_event;
9949 unsigned long iflags;
9951 spin_lock_irqsave(&phba->hbalock, iflags);
9952 cq_event = __lpfc_sli4_cq_event_alloc(phba);
9953 spin_unlock_irqrestore(&phba->hbalock, iflags);
9958 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
9959 * @phba: pointer to lpfc hba data structure.
9960 * @cq_event: pointer to the completion queue event to be freed.
9962 * This routine is the lock free version of the API invoked to release a
9963 * completion-queue event back into the free pool.
9966 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
9967 struct lpfc_cq_event *cq_event)
9969 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
9973 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
9974 * @phba: pointer to lpfc hba data structure.
9975 * @cq_event: pointer to the completion queue event to be freed.
9977 * This routine is the lock version of the API invoked to release a
9978 * completion-queue event back into the free pool.
9981 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
9982 struct lpfc_cq_event *cq_event)
9984 unsigned long iflags;
9985 spin_lock_irqsave(&phba->hbalock, iflags);
9986 __lpfc_sli4_cq_event_release(phba, cq_event);
9987 spin_unlock_irqrestore(&phba->hbalock, iflags);
9991 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
9992 * @phba: pointer to lpfc hba data structure.
9994 * This routine is to free all the pending completion-queue events to the
9995 * back into the free pool for device reset.
9998 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
10000 LIST_HEAD(cqelist);
10001 struct lpfc_cq_event *cqe;
10002 unsigned long iflags;
10004 /* Retrieve all the pending WCQEs from pending WCQE lists */
10005 spin_lock_irqsave(&phba->hbalock, iflags);
10006 /* Pending FCP XRI abort events */
10007 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
10009 /* Pending ELS XRI abort events */
10010 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10012 /* Pending asynnc events */
10013 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
10015 spin_unlock_irqrestore(&phba->hbalock, iflags);
10017 while (!list_empty(&cqelist)) {
10018 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
10019 lpfc_sli4_cq_event_release(phba, cqe);
10024 * lpfc_pci_function_reset - Reset pci function.
10025 * @phba: pointer to lpfc hba data structure.
10027 * This routine is invoked to request a PCI function reset. It will destroys
10028 * all resources assigned to the PCI function which originates this request.
10032 * -ENOMEM - No available memory
10033 * -EIO - The mailbox failed to complete successfully.
10036 lpfc_pci_function_reset(struct lpfc_hba *phba)
10038 LPFC_MBOXQ_t *mboxq;
10039 uint32_t rc = 0, if_type;
10040 uint32_t shdr_status, shdr_add_status;
10042 uint32_t port_reset = 0;
10043 union lpfc_sli4_cfg_shdr *shdr;
10044 struct lpfc_register reg_data;
10047 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10049 case LPFC_SLI_INTF_IF_TYPE_0:
10050 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10053 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10054 "0494 Unable to allocate memory for "
10055 "issuing SLI_FUNCTION_RESET mailbox "
10060 /* Setup PCI function reset mailbox-ioctl command */
10061 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10062 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
10063 LPFC_SLI4_MBX_EMBED);
10064 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10065 shdr = (union lpfc_sli4_cfg_shdr *)
10066 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10067 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10068 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
10070 if (rc != MBX_TIMEOUT)
10071 mempool_free(mboxq, phba->mbox_mem_pool);
10072 if (shdr_status || shdr_add_status || rc) {
10073 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10074 "0495 SLI_FUNCTION_RESET mailbox "
10075 "failed with status x%x add_status x%x,"
10076 " mbx status x%x\n",
10077 shdr_status, shdr_add_status, rc);
10081 case LPFC_SLI_INTF_IF_TYPE_2:
10082 case LPFC_SLI_INTF_IF_TYPE_6:
10085 * Poll the Port Status Register and wait for RDY for
10086 * up to 30 seconds. If the port doesn't respond, treat
10089 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
10090 if (lpfc_readl(phba->sli4_hba.u.if_type2.
10091 STATUSregaddr, ®_data.word0)) {
10095 if (bf_get(lpfc_sliport_status_rdy, ®_data))
10100 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
10101 phba->work_status[0] = readl(
10102 phba->sli4_hba.u.if_type2.ERR1regaddr);
10103 phba->work_status[1] = readl(
10104 phba->sli4_hba.u.if_type2.ERR2regaddr);
10105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10106 "2890 Port not ready, port status reg "
10107 "0x%x error 1=0x%x, error 2=0x%x\n",
10109 phba->work_status[0],
10110 phba->work_status[1]);
10117 * Reset the port now
10119 reg_data.word0 = 0;
10120 bf_set(lpfc_sliport_ctrl_end, ®_data,
10121 LPFC_SLIPORT_LITTLE_ENDIAN);
10122 bf_set(lpfc_sliport_ctrl_ip, ®_data,
10123 LPFC_SLIPORT_INIT_PORT);
10124 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
10127 pci_read_config_word(phba->pcidev,
10128 PCI_DEVICE_ID, &devid);
10133 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
10139 case LPFC_SLI_INTF_IF_TYPE_1:
10145 /* Catch the not-ready port failure after a port reset. */
10147 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10148 "3317 HBA not functional: IP Reset Failed "
10149 "try: echo fw_reset > board_mode\n");
10157 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
10158 * @phba: pointer to lpfc hba data structure.
10160 * This routine is invoked to set up the PCI device memory space for device
10161 * with SLI-4 interface spec.
10165 * other values - error
10168 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10170 struct pci_dev *pdev = phba->pcidev;
10171 unsigned long bar0map_len, bar1map_len, bar2map_len;
10178 /* Set the device DMA mask size */
10179 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10181 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10186 * The BARs and register set definitions and offset locations are
10187 * dependent on the if_type.
10189 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
10190 &phba->sli4_hba.sli_intf.word0)) {
10194 /* There is no SLI3 failback for SLI4 devices. */
10195 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
10196 LPFC_SLI_INTF_VALID) {
10197 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10198 "2894 SLI_INTF reg contents invalid "
10199 "sli_intf reg 0x%x\n",
10200 phba->sli4_hba.sli_intf.word0);
10204 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10206 * Get the bus address of SLI4 device Bar regions and the
10207 * number of bytes required by each mapping. The mapping of the
10208 * particular PCI BARs regions is dependent on the type of
10211 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
10212 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
10213 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
10216 * Map SLI4 PCI Config Space Register base to a kernel virtual
10219 phba->sli4_hba.conf_regs_memmap_p =
10220 ioremap(phba->pci_bar0_map, bar0map_len);
10221 if (!phba->sli4_hba.conf_regs_memmap_p) {
10222 dev_printk(KERN_ERR, &pdev->dev,
10223 "ioremap failed for SLI4 PCI config "
10227 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
10228 /* Set up BAR0 PCI config space register memory map */
10229 lpfc_sli4_bar0_register_memmap(phba, if_type);
10231 phba->pci_bar0_map = pci_resource_start(pdev, 1);
10232 bar0map_len = pci_resource_len(pdev, 1);
10233 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10234 dev_printk(KERN_ERR, &pdev->dev,
10235 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
10238 phba->sli4_hba.conf_regs_memmap_p =
10239 ioremap(phba->pci_bar0_map, bar0map_len);
10240 if (!phba->sli4_hba.conf_regs_memmap_p) {
10241 dev_printk(KERN_ERR, &pdev->dev,
10242 "ioremap failed for SLI4 PCI config "
10246 lpfc_sli4_bar0_register_memmap(phba, if_type);
10249 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10250 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
10252 * Map SLI4 if type 0 HBA Control Register base to a
10253 * kernel virtual address and setup the registers.
10255 phba->pci_bar1_map = pci_resource_start(pdev,
10257 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10258 phba->sli4_hba.ctrl_regs_memmap_p =
10259 ioremap(phba->pci_bar1_map,
10261 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
10262 dev_err(&pdev->dev,
10263 "ioremap failed for SLI4 HBA "
10264 "control registers.\n");
10266 goto out_iounmap_conf;
10268 phba->pci_bar2_memmap_p =
10269 phba->sli4_hba.ctrl_regs_memmap_p;
10270 lpfc_sli4_bar1_register_memmap(phba, if_type);
10273 goto out_iounmap_conf;
10277 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
10278 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
10280 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
10281 * virtual address and setup the registers.
10283 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
10284 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10285 phba->sli4_hba.drbl_regs_memmap_p =
10286 ioremap(phba->pci_bar1_map, bar1map_len);
10287 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10288 dev_err(&pdev->dev,
10289 "ioremap failed for SLI4 HBA doorbell registers.\n");
10291 goto out_iounmap_conf;
10293 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
10294 lpfc_sli4_bar1_register_memmap(phba, if_type);
10297 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10298 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10300 * Map SLI4 if type 0 HBA Doorbell Register base to
10301 * a kernel virtual address and setup the registers.
10303 phba->pci_bar2_map = pci_resource_start(pdev,
10305 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10306 phba->sli4_hba.drbl_regs_memmap_p =
10307 ioremap(phba->pci_bar2_map,
10309 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10310 dev_err(&pdev->dev,
10311 "ioremap failed for SLI4 HBA"
10312 " doorbell registers.\n");
10314 goto out_iounmap_ctrl;
10316 phba->pci_bar4_memmap_p =
10317 phba->sli4_hba.drbl_regs_memmap_p;
10318 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
10320 goto out_iounmap_all;
10323 goto out_iounmap_all;
10327 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
10328 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10330 * Map SLI4 if type 6 HBA DPP Register base to a kernel
10331 * virtual address and setup the registers.
10333 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
10334 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10335 phba->sli4_hba.dpp_regs_memmap_p =
10336 ioremap(phba->pci_bar2_map, bar2map_len);
10337 if (!phba->sli4_hba.dpp_regs_memmap_p) {
10338 dev_err(&pdev->dev,
10339 "ioremap failed for SLI4 HBA dpp registers.\n");
10341 goto out_iounmap_ctrl;
10343 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
10346 /* Set up the EQ/CQ register handeling functions now */
10348 case LPFC_SLI_INTF_IF_TYPE_0:
10349 case LPFC_SLI_INTF_IF_TYPE_2:
10350 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
10351 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
10352 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
10354 case LPFC_SLI_INTF_IF_TYPE_6:
10355 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
10356 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
10357 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
10366 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10368 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10370 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10376 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
10377 * @phba: pointer to lpfc hba data structure.
10379 * This routine is invoked to unset the PCI device memory space for device
10380 * with SLI-4 interface spec.
10383 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
10386 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10389 case LPFC_SLI_INTF_IF_TYPE_0:
10390 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10391 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10392 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10394 case LPFC_SLI_INTF_IF_TYPE_2:
10395 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10397 case LPFC_SLI_INTF_IF_TYPE_6:
10398 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10399 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10401 case LPFC_SLI_INTF_IF_TYPE_1:
10403 dev_printk(KERN_ERR, &phba->pcidev->dev,
10404 "FATAL - unsupported SLI4 interface type - %d\n",
10411 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
10412 * @phba: pointer to lpfc hba data structure.
10414 * This routine is invoked to enable the MSI-X interrupt vectors to device
10415 * with SLI-3 interface specs.
10419 * other values - error
10422 lpfc_sli_enable_msix(struct lpfc_hba *phba)
10427 /* Set up MSI-X multi-message vectors */
10428 rc = pci_alloc_irq_vectors(phba->pcidev,
10429 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
10431 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10432 "0420 PCI enable MSI-X failed (%d)\n", rc);
10437 * Assign MSI-X vectors to interrupt handlers
10440 /* vector-0 is associated to slow-path handler */
10441 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
10442 &lpfc_sli_sp_intr_handler, 0,
10443 LPFC_SP_DRIVER_HANDLER_NAME, phba);
10445 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10446 "0421 MSI-X slow-path request_irq failed "
10451 /* vector-1 is associated to fast-path handler */
10452 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
10453 &lpfc_sli_fp_intr_handler, 0,
10454 LPFC_FP_DRIVER_HANDLER_NAME, phba);
10457 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10458 "0429 MSI-X fast-path request_irq failed "
10464 * Configure HBA MSI-X attention conditions to messages
10466 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10470 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10471 "0474 Unable to allocate memory for issuing "
10472 "MBOX_CONFIG_MSI command\n");
10475 rc = lpfc_config_msi(phba, pmb);
10478 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10479 if (rc != MBX_SUCCESS) {
10480 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
10481 "0351 Config MSI mailbox command failed, "
10482 "mbxCmd x%x, mbxStatus x%x\n",
10483 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
10487 /* Free memory allocated for mailbox command */
10488 mempool_free(pmb, phba->mbox_mem_pool);
10492 /* Free memory allocated for mailbox command */
10493 mempool_free(pmb, phba->mbox_mem_pool);
10496 /* free the irq already requested */
10497 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
10500 /* free the irq already requested */
10501 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
10504 /* Unconfigure MSI-X capability structure */
10505 pci_free_irq_vectors(phba->pcidev);
10512 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
10513 * @phba: pointer to lpfc hba data structure.
10515 * This routine is invoked to enable the MSI interrupt mode to device with
10516 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
10517 * enable the MSI vector. The device driver is responsible for calling the
10518 * request_irq() to register MSI vector with a interrupt the handler, which
10519 * is done in this function.
10523 * other values - error
10526 lpfc_sli_enable_msi(struct lpfc_hba *phba)
10530 rc = pci_enable_msi(phba->pcidev);
10532 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10533 "0462 PCI enable MSI mode success.\n");
10535 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10536 "0471 PCI enable MSI mode failed (%d)\n", rc);
10540 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10541 0, LPFC_DRIVER_NAME, phba);
10543 pci_disable_msi(phba->pcidev);
10544 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10545 "0478 MSI request_irq failed (%d)\n", rc);
10551 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
10552 * @phba: pointer to lpfc hba data structure.
10554 * This routine is invoked to enable device interrupt and associate driver's
10555 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
10556 * spec. Depends on the interrupt mode configured to the driver, the driver
10557 * will try to fallback from the configured interrupt mode to an interrupt
10558 * mode which is supported by the platform, kernel, and device in the order
10560 * MSI-X -> MSI -> IRQ.
10564 * other values - error
10567 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
10569 uint32_t intr_mode = LPFC_INTR_ERROR;
10572 if (cfg_mode == 2) {
10573 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
10574 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
10576 /* Now, try to enable MSI-X interrupt mode */
10577 retval = lpfc_sli_enable_msix(phba);
10579 /* Indicate initialization to MSI-X mode */
10580 phba->intr_type = MSIX;
10586 /* Fallback to MSI if MSI-X initialization failed */
10587 if (cfg_mode >= 1 && phba->intr_type == NONE) {
10588 retval = lpfc_sli_enable_msi(phba);
10590 /* Indicate initialization to MSI mode */
10591 phba->intr_type = MSI;
10596 /* Fallback to INTx if both MSI-X/MSI initalization failed */
10597 if (phba->intr_type == NONE) {
10598 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10599 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
10601 /* Indicate initialization to INTx mode */
10602 phba->intr_type = INTx;
10610 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
10611 * @phba: pointer to lpfc hba data structure.
10613 * This routine is invoked to disable device interrupt and disassociate the
10614 * driver's interrupt handler(s) from interrupt vector(s) to device with
10615 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
10616 * release the interrupt vector(s) for the message signaled interrupt.
10619 lpfc_sli_disable_intr(struct lpfc_hba *phba)
10623 if (phba->intr_type == MSIX)
10624 nr_irqs = LPFC_MSIX_VECTORS;
10628 for (i = 0; i < nr_irqs; i++)
10629 free_irq(pci_irq_vector(phba->pcidev, i), phba);
10630 pci_free_irq_vectors(phba->pcidev);
10632 /* Reset interrupt management states */
10633 phba->intr_type = NONE;
10634 phba->sli.slistat.sli_intr = 0;
10638 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
10639 * @phba: pointer to lpfc hba data structure.
10640 * @id: EQ vector index or Hardware Queue index
10641 * @match: LPFC_FIND_BY_EQ = match by EQ
10642 * LPFC_FIND_BY_HDWQ = match by Hardware Queue
10643 * Return the CPU that matches the selection criteria
10646 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
10648 struct lpfc_vector_map_info *cpup;
10651 /* Loop through all CPUs */
10652 for_each_present_cpu(cpu) {
10653 cpup = &phba->sli4_hba.cpu_map[cpu];
10655 /* If we are matching by EQ, there may be multiple CPUs using
10656 * using the same vector, so select the one with
10657 * LPFC_CPU_FIRST_IRQ set.
10659 if ((match == LPFC_FIND_BY_EQ) &&
10660 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
10661 (cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
10665 /* If matching by HDWQ, select the first CPU that matches */
10666 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
10674 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
10675 * @phba: pointer to lpfc hba data structure.
10676 * @cpu: CPU map index
10677 * @phys_id: CPU package physical id
10678 * @core_id: CPU core id
10681 lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
10682 uint16_t phys_id, uint16_t core_id)
10684 struct lpfc_vector_map_info *cpup;
10687 for_each_present_cpu(idx) {
10688 cpup = &phba->sli4_hba.cpu_map[idx];
10689 /* Does the cpup match the one we are looking for */
10690 if ((cpup->phys_id == phys_id) &&
10691 (cpup->core_id == core_id) &&
10700 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
10701 * @phba: pointer to lpfc hba data structure.
10702 * @vectors: number of msix vectors allocated.
10704 * The routine will figure out the CPU affinity assignment for every
10705 * MSI-X vector allocated for the HBA.
10706 * In addition, the CPU to IO channel mapping will be calculated
10707 * and the phba->sli4_hba.cpu_map array will reflect this.
10710 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10712 int i, cpu, idx, new_cpu, start_cpu, first_cpu;
10713 int max_phys_id, min_phys_id;
10714 int max_core_id, min_core_id;
10715 struct lpfc_vector_map_info *cpup;
10716 struct lpfc_vector_map_info *new_cpup;
10717 const struct cpumask *maskp;
10719 struct cpuinfo_x86 *cpuinfo;
10722 /* Init cpu_map array */
10723 for_each_possible_cpu(cpu) {
10724 cpup = &phba->sli4_hba.cpu_map[cpu];
10725 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
10726 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
10727 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
10728 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
10729 cpup->irq = LPFC_VECTOR_MAP_EMPTY;
10734 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
10736 min_core_id = LPFC_VECTOR_MAP_EMPTY;
10738 /* Update CPU map with physical id and core id of each CPU */
10739 for_each_present_cpu(cpu) {
10740 cpup = &phba->sli4_hba.cpu_map[cpu];
10742 cpuinfo = &cpu_data(cpu);
10743 cpup->phys_id = cpuinfo->phys_proc_id;
10744 cpup->core_id = cpuinfo->cpu_core_id;
10745 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
10746 cpup->flag |= LPFC_CPU_MAP_HYPER;
10748 /* No distinction between CPUs for other platforms */
10750 cpup->core_id = cpu;
10753 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10754 "3328 CPU physid %d coreid %d\n",
10755 cpup->phys_id, cpup->core_id);
10757 if (cpup->phys_id > max_phys_id)
10758 max_phys_id = cpup->phys_id;
10759 if (cpup->phys_id < min_phys_id)
10760 min_phys_id = cpup->phys_id;
10762 if (cpup->core_id > max_core_id)
10763 max_core_id = cpup->core_id;
10764 if (cpup->core_id < min_core_id)
10765 min_core_id = cpup->core_id;
10768 for_each_possible_cpu(i) {
10769 struct lpfc_eq_intr_info *eqi =
10770 per_cpu_ptr(phba->sli4_hba.eq_info, i);
10772 INIT_LIST_HEAD(&eqi->list);
10776 /* This loop sets up all CPUs that are affinitized with a
10777 * irq vector assigned to the driver. All affinitized CPUs
10778 * will get a link to that vectors IRQ and EQ.
10780 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10781 /* Get a CPU mask for all CPUs affinitized to this vector */
10782 maskp = pci_irq_get_affinity(phba->pcidev, idx);
10787 /* Loop through all CPUs associated with vector idx */
10788 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
10789 /* Set the EQ index and IRQ for that vector */
10790 cpup = &phba->sli4_hba.cpu_map[cpu];
10792 cpup->irq = pci_irq_vector(phba->pcidev, idx);
10794 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10795 "3336 Set Affinity: CPU %d "
10797 cpu, cpup->irq, cpup->eq);
10799 /* If this is the first CPU thats assigned to this
10800 * vector, set LPFC_CPU_FIRST_IRQ.
10803 cpup->flag |= LPFC_CPU_FIRST_IRQ;
10808 /* After looking at each irq vector assigned to this pcidev, its
10809 * possible to see that not ALL CPUs have been accounted for.
10810 * Next we will set any unassigned (unaffinitized) cpu map
10811 * entries to a IRQ on the same phys_id.
10813 first_cpu = cpumask_first(cpu_present_mask);
10814 start_cpu = first_cpu;
10816 for_each_present_cpu(cpu) {
10817 cpup = &phba->sli4_hba.cpu_map[cpu];
10819 /* Is this CPU entry unassigned */
10820 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
10821 /* Mark CPU as IRQ not assigned by the kernel */
10822 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
10824 /* If so, find a new_cpup thats on the the SAME
10825 * phys_id as cpup. start_cpu will start where we
10826 * left off so all unassigned entries don't get assgined
10827 * the IRQ of the first entry.
10829 new_cpu = start_cpu;
10830 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10831 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10832 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
10833 (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
10834 (new_cpup->phys_id == cpup->phys_id))
10836 new_cpu = cpumask_next(
10837 new_cpu, cpu_present_mask);
10838 if (new_cpu == nr_cpumask_bits)
10839 new_cpu = first_cpu;
10841 /* At this point, we leave the CPU as unassigned */
10844 /* We found a matching phys_id, so copy the IRQ info */
10845 cpup->eq = new_cpup->eq;
10846 cpup->irq = new_cpup->irq;
10848 /* Bump start_cpu to the next slot to minmize the
10849 * chance of having multiple unassigned CPU entries
10850 * selecting the same IRQ.
10852 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10853 if (start_cpu == nr_cpumask_bits)
10854 start_cpu = first_cpu;
10856 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10857 "3337 Set Affinity: CPU %d "
10858 "irq %d from id %d same "
10860 cpu, cpup->irq, new_cpu, cpup->phys_id);
10864 /* Set any unassigned cpu map entries to a IRQ on any phys_id */
10865 start_cpu = first_cpu;
10867 for_each_present_cpu(cpu) {
10868 cpup = &phba->sli4_hba.cpu_map[cpu];
10870 /* Is this entry unassigned */
10871 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
10872 /* Mark it as IRQ not assigned by the kernel */
10873 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
10875 /* If so, find a new_cpup thats on ANY phys_id
10876 * as the cpup. start_cpu will start where we
10877 * left off so all unassigned entries don't get
10878 * assigned the IRQ of the first entry.
10880 new_cpu = start_cpu;
10881 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10882 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10883 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
10884 (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY))
10886 new_cpu = cpumask_next(
10887 new_cpu, cpu_present_mask);
10888 if (new_cpu == nr_cpumask_bits)
10889 new_cpu = first_cpu;
10891 /* We should never leave an entry unassigned */
10892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10893 "3339 Set Affinity: CPU %d "
10894 "irq %d UNASSIGNED\n",
10895 cpup->hdwq, cpup->irq);
10898 /* We found an available entry, copy the IRQ info */
10899 cpup->eq = new_cpup->eq;
10900 cpup->irq = new_cpup->irq;
10902 /* Bump start_cpu to the next slot to minmize the
10903 * chance of having multiple unassigned CPU entries
10904 * selecting the same IRQ.
10906 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10907 if (start_cpu == nr_cpumask_bits)
10908 start_cpu = first_cpu;
10910 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10911 "3338 Set Affinity: CPU %d "
10912 "irq %d from id %d (%d/%d)\n",
10913 cpu, cpup->irq, new_cpu,
10914 new_cpup->phys_id, new_cpup->core_id);
10918 /* Finally we need to associate a hdwq with each cpu_map entry
10919 * This will be 1 to 1 - hdwq to cpu, unless there are less
10920 * hardware queues then CPUs. For that case we will just round-robin
10921 * the available hardware queues as they get assigned to CPUs.
10925 for_each_present_cpu(cpu) {
10926 cpup = &phba->sli4_hba.cpu_map[cpu];
10927 if (idx >= phba->cfg_hdw_queue) {
10928 /* We need to reuse a Hardware Queue for another CPU,
10929 * so be smart about it and pick one that has its
10930 * IRQ/EQ mapped to the same phys_id (CPU package).
10933 new_cpu = start_cpu;
10934 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10935 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10936 if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) &&
10937 (new_cpup->phys_id == cpup->phys_id) &&
10938 (new_cpup->core_id == cpup->core_id))
10940 new_cpu = cpumask_next(
10941 new_cpu, cpu_present_mask);
10942 if (new_cpu == nr_cpumask_bits)
10943 new_cpu = first_cpu;
10946 /* If we can't match both phys_id and core_id,
10947 * settle for just a phys_id match.
10949 new_cpu = start_cpu;
10950 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10951 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10952 if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) &&
10953 (new_cpup->phys_id == cpup->phys_id))
10955 new_cpu = cpumask_next(
10956 new_cpu, cpu_present_mask);
10957 if (new_cpu == nr_cpumask_bits)
10958 new_cpu = first_cpu;
10961 /* Otherwise just round robin on cfg_hdw_queue */
10962 cpup->hdwq = idx % phba->cfg_hdw_queue;
10965 /* We found an available entry, copy the IRQ info */
10966 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10967 if (start_cpu == nr_cpumask_bits)
10968 start_cpu = first_cpu;
10969 cpup->hdwq = new_cpup->hdwq;
10971 /* 1 to 1, CPU to hdwq */
10975 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10976 "3335 Set Affinity: CPU %d (phys %d core %d): "
10977 "hdwq %d eq %d irq %d flg x%x\n",
10978 cpu, cpup->phys_id, cpup->core_id,
10979 cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
10983 /* The cpu_map array will be used later during initialization
10984 * when EQ / CQ / WQs are allocated and configured.
10990 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
10991 * @phba: pointer to lpfc hba data structure.
10993 * This routine is invoked to enable the MSI-X interrupt vectors to device
10994 * with SLI-4 interface spec.
10998 * other values - error
11001 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
11003 int vectors, rc, index;
11006 /* Set up MSI-X multi-message vectors */
11007 vectors = phba->cfg_irq_chann;
11009 rc = pci_alloc_irq_vectors(phba->pcidev,
11011 vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
11013 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11014 "0484 PCI enable MSI-X failed (%d)\n", rc);
11019 /* Assign MSI-X vectors to interrupt handlers */
11020 for (index = 0; index < vectors; index++) {
11021 name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
11022 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
11023 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
11024 LPFC_DRIVER_HANDLER_NAME"%d", index);
11026 phba->sli4_hba.hba_eq_hdl[index].idx = index;
11027 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
11028 rc = request_irq(pci_irq_vector(phba->pcidev, index),
11029 &lpfc_sli4_hba_intr_handler, 0,
11031 &phba->sli4_hba.hba_eq_hdl[index]);
11033 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11034 "0486 MSI-X fast-path (%d) "
11035 "request_irq failed (%d)\n", index, rc);
11040 if (vectors != phba->cfg_irq_chann) {
11041 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11042 "3238 Reducing IO channels to match number of "
11043 "MSI-X vectors, requested %d got %d\n",
11044 phba->cfg_irq_chann, vectors);
11045 if (phba->cfg_irq_chann > vectors)
11046 phba->cfg_irq_chann = vectors;
11047 if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors))
11048 phba->cfg_nvmet_mrq = vectors;
11054 /* free the irq already requested */
11055 for (--index; index >= 0; index--)
11056 free_irq(pci_irq_vector(phba->pcidev, index),
11057 &phba->sli4_hba.hba_eq_hdl[index]);
11059 /* Unconfigure MSI-X capability structure */
11060 pci_free_irq_vectors(phba->pcidev);
11067 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
11068 * @phba: pointer to lpfc hba data structure.
11070 * This routine is invoked to enable the MSI interrupt mode to device with
11071 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
11072 * to enable the MSI vector. The device driver is responsible for calling
11073 * the request_irq() to register MSI vector with a interrupt the handler,
11074 * which is done in this function.
11078 * other values - error
11081 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
11085 rc = pci_enable_msi(phba->pcidev);
11087 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11088 "0487 PCI enable MSI mode success.\n");
11090 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11091 "0488 PCI enable MSI mode failed (%d)\n", rc);
11095 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11096 0, LPFC_DRIVER_NAME, phba);
11098 pci_disable_msi(phba->pcidev);
11099 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11100 "0490 MSI request_irq failed (%d)\n", rc);
11104 for (index = 0; index < phba->cfg_irq_chann; index++) {
11105 phba->sli4_hba.hba_eq_hdl[index].idx = index;
11106 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
11113 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
11114 * @phba: pointer to lpfc hba data structure.
11116 * This routine is invoked to enable device interrupt and associate driver's
11117 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
11118 * interface spec. Depends on the interrupt mode configured to the driver,
11119 * the driver will try to fallback from the configured interrupt mode to an
11120 * interrupt mode which is supported by the platform, kernel, and device in
11122 * MSI-X -> MSI -> IRQ.
11126 * other values - error
11129 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
11131 uint32_t intr_mode = LPFC_INTR_ERROR;
11134 if (cfg_mode == 2) {
11135 /* Preparation before conf_msi mbox cmd */
11138 /* Now, try to enable MSI-X interrupt mode */
11139 retval = lpfc_sli4_enable_msix(phba);
11141 /* Indicate initialization to MSI-X mode */
11142 phba->intr_type = MSIX;
11148 /* Fallback to MSI if MSI-X initialization failed */
11149 if (cfg_mode >= 1 && phba->intr_type == NONE) {
11150 retval = lpfc_sli4_enable_msi(phba);
11152 /* Indicate initialization to MSI mode */
11153 phba->intr_type = MSI;
11158 /* Fallback to INTx if both MSI-X/MSI initalization failed */
11159 if (phba->intr_type == NONE) {
11160 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11161 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
11163 struct lpfc_hba_eq_hdl *eqhdl;
11165 /* Indicate initialization to INTx mode */
11166 phba->intr_type = INTx;
11169 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11170 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
11172 eqhdl->phba = phba;
11180 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
11181 * @phba: pointer to lpfc hba data structure.
11183 * This routine is invoked to disable device interrupt and disassociate
11184 * the driver's interrupt handler(s) from interrupt vector(s) to device
11185 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
11186 * will release the interrupt vector(s) for the message signaled interrupt.
11189 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
11191 /* Disable the currently initialized interrupt mode */
11192 if (phba->intr_type == MSIX) {
11195 /* Free up MSI-X multi-message vectors */
11196 for (index = 0; index < phba->cfg_irq_chann; index++) {
11197 irq_set_affinity_hint(
11198 pci_irq_vector(phba->pcidev, index),
11200 free_irq(pci_irq_vector(phba->pcidev, index),
11201 &phba->sli4_hba.hba_eq_hdl[index]);
11204 free_irq(phba->pcidev->irq, phba);
11207 pci_free_irq_vectors(phba->pcidev);
11209 /* Reset interrupt management states */
11210 phba->intr_type = NONE;
11211 phba->sli.slistat.sli_intr = 0;
11215 * lpfc_unset_hba - Unset SLI3 hba device initialization
11216 * @phba: pointer to lpfc hba data structure.
11218 * This routine is invoked to unset the HBA device initialization steps to
11219 * a device with SLI-3 interface spec.
11222 lpfc_unset_hba(struct lpfc_hba *phba)
11224 struct lpfc_vport *vport = phba->pport;
11225 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11227 spin_lock_irq(shost->host_lock);
11228 vport->load_flag |= FC_UNLOADING;
11229 spin_unlock_irq(shost->host_lock);
11231 kfree(phba->vpi_bmask);
11232 kfree(phba->vpi_ids);
11234 lpfc_stop_hba_timers(phba);
11236 phba->pport->work_port_events = 0;
11238 lpfc_sli_hba_down(phba);
11240 lpfc_sli_brdrestart(phba);
11242 lpfc_sli_disable_intr(phba);
11248 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
11249 * @phba: Pointer to HBA context object.
11251 * This function is called in the SLI4 code path to wait for completion
11252 * of device's XRIs exchange busy. It will check the XRI exchange busy
11253 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
11254 * that, it will check the XRI exchange busy on outstanding FCP and ELS
11255 * I/Os every 30 seconds, log error message, and wait forever. Only when
11256 * all XRI exchange busy complete, the driver unload shall proceed with
11257 * invoking the function reset ioctl mailbox command to the CNA and the
11258 * the rest of the driver unload resource release.
11261 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11263 struct lpfc_sli4_hdw_queue *qp;
11264 int idx, ccnt, fcnt;
11266 int io_xri_cmpl = 1;
11267 int nvmet_xri_cmpl = 1;
11268 int fcp_xri_cmpl = 1;
11269 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11271 /* Driver just aborted IOs during the hba_unset process. Pause
11272 * here to give the HBA time to complete the IO and get entries
11273 * into the abts lists.
11275 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
11277 /* Wait for NVME pending IO to flush back to transport. */
11278 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11279 lpfc_nvme_wait_for_io_drain(phba);
11283 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11284 qp = &phba->sli4_hba.hdwq[idx];
11285 fcp_xri_cmpl = list_empty(
11286 &qp->lpfc_abts_scsi_buf_list);
11287 if (!fcp_xri_cmpl) /* if list is NOT empty */
11289 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11290 io_xri_cmpl = list_empty(
11291 &qp->lpfc_abts_nvme_buf_list);
11292 if (!io_xri_cmpl) /* if list is NOT empty */
11301 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11303 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11306 while (!fcp_xri_cmpl || !els_xri_cmpl || !io_xri_cmpl ||
11308 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
11309 if (!nvmet_xri_cmpl)
11310 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11311 "6424 NVMET XRI exchange busy "
11312 "wait time: %d seconds.\n",
11315 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11316 "6100 NVME XRI exchange busy "
11317 "wait time: %d seconds.\n",
11320 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11321 "2877 FCP XRI exchange busy "
11322 "wait time: %d seconds.\n",
11325 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11326 "2878 ELS XRI exchange busy "
11327 "wait time: %d seconds.\n",
11329 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
11330 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
11332 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
11333 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
11338 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11339 qp = &phba->sli4_hba.hdwq[idx];
11340 fcp_xri_cmpl = list_empty(
11341 &qp->lpfc_abts_scsi_buf_list);
11342 if (!fcp_xri_cmpl) /* if list is NOT empty */
11344 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11345 io_xri_cmpl = list_empty(
11346 &qp->lpfc_abts_nvme_buf_list);
11347 if (!io_xri_cmpl) /* if list is NOT empty */
11356 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11357 nvmet_xri_cmpl = list_empty(
11358 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11361 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11367 * lpfc_sli4_hba_unset - Unset the fcoe hba
11368 * @phba: Pointer to HBA context object.
11370 * This function is called in the SLI4 code path to reset the HBA's FCoE
11371 * function. The caller is not required to hold any lock. This routine
11372 * issues PCI function reset mailbox command to reset the FCoE function.
11373 * At the end of the function, it calls lpfc_hba_down_post function to
11374 * free any pending commands.
11377 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
11380 LPFC_MBOXQ_t *mboxq;
11381 struct pci_dev *pdev = phba->pcidev;
11383 lpfc_stop_hba_timers(phba);
11385 phba->sli4_hba.intr_enable = 0;
11388 * Gracefully wait out the potential current outstanding asynchronous
11392 /* First, block any pending async mailbox command from posted */
11393 spin_lock_irq(&phba->hbalock);
11394 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
11395 spin_unlock_irq(&phba->hbalock);
11396 /* Now, trying to wait it out if we can */
11397 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11399 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
11402 /* Forcefully release the outstanding mailbox command if timed out */
11403 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11404 spin_lock_irq(&phba->hbalock);
11405 mboxq = phba->sli.mbox_active;
11406 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
11407 __lpfc_mbox_cmpl_put(phba, mboxq);
11408 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11409 phba->sli.mbox_active = NULL;
11410 spin_unlock_irq(&phba->hbalock);
11413 /* Abort all iocbs associated with the hba */
11414 lpfc_sli_hba_iocb_abort(phba);
11416 /* Wait for completion of device XRI exchange busy */
11417 lpfc_sli4_xri_exchange_busy_wait(phba);
11419 /* Disable PCI subsystem interrupt */
11420 lpfc_sli4_disable_intr(phba);
11422 /* Disable SR-IOV if enabled */
11423 if (phba->cfg_sriov_nr_virtfn)
11424 pci_disable_sriov(pdev);
11426 /* Stop kthread signal shall trigger work_done one more time */
11427 kthread_stop(phba->worker_thread);
11429 /* Disable FW logging to host memory */
11430 lpfc_ras_stop_fwlog(phba);
11432 /* Unset the queues shared with the hardware then release all
11433 * allocated resources.
11435 lpfc_sli4_queue_unset(phba);
11436 lpfc_sli4_queue_destroy(phba);
11438 /* Reset SLI4 HBA FCoE function */
11439 lpfc_pci_function_reset(phba);
11441 /* Free RAS DMA memory */
11442 if (phba->ras_fwlog.ras_enabled)
11443 lpfc_sli4_ras_dma_free(phba);
11445 /* Stop the SLI4 device port */
11447 phba->pport->work_port_events = 0;
11451 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
11452 * @phba: Pointer to HBA context object.
11453 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
11455 * This function is called in the SLI4 code path to read the port's
11456 * sli4 capabilities.
11458 * This function may be be called from any context that can block-wait
11459 * for the completion. The expectation is that this routine is called
11460 * typically from probe_one or from the online routine.
11463 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11466 struct lpfc_mqe *mqe;
11467 struct lpfc_pc_sli4_params *sli4_params;
11471 mqe = &mboxq->u.mqe;
11473 /* Read the port's SLI4 Parameters port capabilities */
11474 lpfc_pc_sli4_params(mboxq);
11475 if (!phba->sli4_hba.intr_enable)
11476 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11478 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
11479 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11485 sli4_params = &phba->sli4_hba.pc_sli4_params;
11486 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
11487 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
11488 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
11489 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
11490 &mqe->un.sli4_params);
11491 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
11492 &mqe->un.sli4_params);
11493 sli4_params->proto_types = mqe->un.sli4_params.word3;
11494 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
11495 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
11496 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
11497 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
11498 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
11499 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
11500 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
11501 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
11502 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
11503 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
11504 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
11505 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
11506 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
11507 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
11508 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
11509 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
11510 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
11511 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
11512 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
11513 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
11515 /* Make sure that sge_supp_len can be handled by the driver */
11516 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
11517 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
11523 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
11524 * @phba: Pointer to HBA context object.
11525 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
11527 * This function is called in the SLI4 code path to read the port's
11528 * sli4 capabilities.
11530 * This function may be be called from any context that can block-wait
11531 * for the completion. The expectation is that this routine is called
11532 * typically from probe_one or from the online routine.
11535 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11538 struct lpfc_mqe *mqe = &mboxq->u.mqe;
11539 struct lpfc_pc_sli4_params *sli4_params;
11542 bool exp_wqcq_pages = true;
11543 struct lpfc_sli4_parameters *mbx_sli4_parameters;
11546 * By default, the driver assumes the SLI4 port requires RPI
11547 * header postings. The SLI4_PARAM response will correct this
11550 phba->sli4_hba.rpi_hdrs_in_use = 1;
11552 /* Read the port's SLI4 Config Parameters */
11553 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
11554 sizeof(struct lpfc_sli4_cfg_mhdr));
11555 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11556 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
11557 length, LPFC_SLI4_MBX_EMBED);
11558 if (!phba->sli4_hba.intr_enable)
11559 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11561 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
11562 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11566 sli4_params = &phba->sli4_hba.pc_sli4_params;
11567 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
11568 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
11569 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
11570 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
11571 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
11572 mbx_sli4_parameters);
11573 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
11574 mbx_sli4_parameters);
11575 if (bf_get(cfg_phwq, mbx_sli4_parameters))
11576 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
11578 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
11579 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
11580 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
11581 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
11582 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
11583 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
11584 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
11585 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
11586 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
11587 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
11588 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
11589 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
11590 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
11591 mbx_sli4_parameters);
11592 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
11593 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
11594 mbx_sli4_parameters);
11595 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
11596 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
11598 /* Check for firmware nvme support */
11599 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
11600 bf_get(cfg_xib, mbx_sli4_parameters));
11603 /* Save this to indicate the Firmware supports NVME */
11604 sli4_params->nvme = 1;
11606 /* Firmware NVME support, check driver FC4 NVME support */
11607 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
11608 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
11609 "6133 Disabling NVME support: "
11610 "FC4 type not supported: x%x\n",
11611 phba->cfg_enable_fc4_type);
11615 /* No firmware NVME support, check driver FC4 NVME support */
11616 sli4_params->nvme = 0;
11617 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11618 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
11619 "6101 Disabling NVME support: Not "
11620 "supported by firmware (%d %d) x%x\n",
11621 bf_get(cfg_nvme, mbx_sli4_parameters),
11622 bf_get(cfg_xib, mbx_sli4_parameters),
11623 phba->cfg_enable_fc4_type);
11625 phba->nvme_support = 0;
11626 phba->nvmet_support = 0;
11627 phba->cfg_nvmet_mrq = 0;
11629 /* If no FC4 type support, move to just SCSI support */
11630 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
11632 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
11636 /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
11637 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
11638 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
11639 phba->cfg_enable_pbde = 0;
11642 * To support Suppress Response feature we must satisfy 3 conditions.
11643 * lpfc_suppress_rsp module parameter must be set (default).
11644 * In SLI4-Parameters Descriptor:
11645 * Extended Inline Buffers (XIB) must be supported.
11646 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
11647 * (double negative).
11649 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
11650 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
11651 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
11653 phba->cfg_suppress_rsp = 0;
11655 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
11656 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
11658 /* Make sure that sge_supp_len can be handled by the driver */
11659 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
11660 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
11663 * Check whether the adapter supports an embedded copy of the
11664 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
11665 * to use this option, 128-byte WQEs must be used.
11667 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
11668 phba->fcp_embed_io = 1;
11670 phba->fcp_embed_io = 0;
11672 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
11673 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
11674 bf_get(cfg_xib, mbx_sli4_parameters),
11675 phba->cfg_enable_pbde,
11676 phba->fcp_embed_io, phba->nvme_support,
11677 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
11679 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
11680 LPFC_SLI_INTF_IF_TYPE_2) &&
11681 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
11682 LPFC_SLI_INTF_FAMILY_LNCR_A0))
11683 exp_wqcq_pages = false;
11685 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
11686 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
11688 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
11689 phba->enab_exp_wqcq_pages = 1;
11691 phba->enab_exp_wqcq_pages = 0;
11693 * Check if the SLI port supports MDS Diagnostics
11695 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
11696 phba->mds_diags_support = 1;
11698 phba->mds_diags_support = 0;
11704 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
11705 * @pdev: pointer to PCI device
11706 * @pid: pointer to PCI device identifier
11708 * This routine is to be called to attach a device with SLI-3 interface spec
11709 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
11710 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
11711 * information of the device and driver to see if the driver state that it can
11712 * support this kind of device. If the match is successful, the driver core
11713 * invokes this routine. If this routine determines it can claim the HBA, it
11714 * does all the initialization that it needs to do to handle the HBA properly.
11717 * 0 - driver can claim the device
11718 * negative value - driver can not claim the device
11721 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
11723 struct lpfc_hba *phba;
11724 struct lpfc_vport *vport = NULL;
11725 struct Scsi_Host *shost = NULL;
11727 uint32_t cfg_mode, intr_mode;
11729 /* Allocate memory for HBA structure */
11730 phba = lpfc_hba_alloc(pdev);
11734 /* Perform generic PCI device enabling operation */
11735 error = lpfc_enable_pci_dev(phba);
11737 goto out_free_phba;
11739 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
11740 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
11742 goto out_disable_pci_dev;
11744 /* Set up SLI-3 specific device PCI memory space */
11745 error = lpfc_sli_pci_mem_setup(phba);
11747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11748 "1402 Failed to set up pci memory space.\n");
11749 goto out_disable_pci_dev;
11752 /* Set up SLI-3 specific device driver resources */
11753 error = lpfc_sli_driver_resource_setup(phba);
11755 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11756 "1404 Failed to set up driver resource.\n");
11757 goto out_unset_pci_mem_s3;
11760 /* Initialize and populate the iocb list per host */
11762 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
11764 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11765 "1405 Failed to initialize iocb list.\n");
11766 goto out_unset_driver_resource_s3;
11769 /* Set up common device driver resources */
11770 error = lpfc_setup_driver_resource_phase2(phba);
11772 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11773 "1406 Failed to set up driver resource.\n");
11774 goto out_free_iocb_list;
11777 /* Get the default values for Model Name and Description */
11778 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
11780 /* Create SCSI host to the physical port */
11781 error = lpfc_create_shost(phba);
11783 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11784 "1407 Failed to create scsi host.\n");
11785 goto out_unset_driver_resource;
11788 /* Configure sysfs attributes */
11789 vport = phba->pport;
11790 error = lpfc_alloc_sysfs_attr(vport);
11792 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11793 "1476 Failed to allocate sysfs attr\n");
11794 goto out_destroy_shost;
11797 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
11798 /* Now, trying to enable interrupt and bring up the device */
11799 cfg_mode = phba->cfg_use_msi;
11801 /* Put device to a known state before enabling interrupt */
11802 lpfc_stop_port(phba);
11803 /* Configure and enable interrupt */
11804 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
11805 if (intr_mode == LPFC_INTR_ERROR) {
11806 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11807 "0431 Failed to enable interrupt.\n");
11809 goto out_free_sysfs_attr;
11811 /* SLI-3 HBA setup */
11812 if (lpfc_sli_hba_setup(phba)) {
11813 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11814 "1477 Failed to set up hba\n");
11816 goto out_remove_device;
11819 /* Wait 50ms for the interrupts of previous mailbox commands */
11821 /* Check active interrupts on message signaled interrupts */
11822 if (intr_mode == 0 ||
11823 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
11824 /* Log the current active interrupt mode */
11825 phba->intr_mode = intr_mode;
11826 lpfc_log_intr_mode(phba, intr_mode);
11829 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11830 "0447 Configure interrupt mode (%d) "
11831 "failed active interrupt test.\n",
11833 /* Disable the current interrupt mode */
11834 lpfc_sli_disable_intr(phba);
11835 /* Try next level of interrupt mode */
11836 cfg_mode = --intr_mode;
11840 /* Perform post initialization setup */
11841 lpfc_post_init_setup(phba);
11843 /* Check if there are static vports to be created. */
11844 lpfc_create_static_vport(phba);
11849 lpfc_unset_hba(phba);
11850 out_free_sysfs_attr:
11851 lpfc_free_sysfs_attr(vport);
11853 lpfc_destroy_shost(phba);
11854 out_unset_driver_resource:
11855 lpfc_unset_driver_resource_phase2(phba);
11856 out_free_iocb_list:
11857 lpfc_free_iocb_list(phba);
11858 out_unset_driver_resource_s3:
11859 lpfc_sli_driver_resource_unset(phba);
11860 out_unset_pci_mem_s3:
11861 lpfc_sli_pci_mem_unset(phba);
11862 out_disable_pci_dev:
11863 lpfc_disable_pci_dev(phba);
11865 scsi_host_put(shost);
11867 lpfc_hba_free(phba);
11872 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
11873 * @pdev: pointer to PCI device
11875 * This routine is to be called to disattach a device with SLI-3 interface
11876 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
11877 * removed from PCI bus, it performs all the necessary cleanup for the HBA
11878 * device to be removed from the PCI subsystem properly.
11881 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
11883 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11884 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
11885 struct lpfc_vport **vports;
11886 struct lpfc_hba *phba = vport->phba;
11889 spin_lock_irq(&phba->hbalock);
11890 vport->load_flag |= FC_UNLOADING;
11891 spin_unlock_irq(&phba->hbalock);
11893 lpfc_free_sysfs_attr(vport);
11895 /* Release all the vports against this physical port */
11896 vports = lpfc_create_vport_work_array(phba);
11897 if (vports != NULL)
11898 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
11899 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
11901 fc_vport_terminate(vports[i]->fc_vport);
11903 lpfc_destroy_vport_work_array(phba, vports);
11905 /* Remove FC host and then SCSI host with the physical port */
11906 fc_remove_host(shost);
11907 scsi_remove_host(shost);
11909 lpfc_cleanup(vport);
11912 * Bring down the SLI Layer. This step disable all interrupts,
11913 * clears the rings, discards all mailbox commands, and resets
11917 /* HBA interrupt will be disabled after this call */
11918 lpfc_sli_hba_down(phba);
11919 /* Stop kthread signal shall trigger work_done one more time */
11920 kthread_stop(phba->worker_thread);
11921 /* Final cleanup of txcmplq and reset the HBA */
11922 lpfc_sli_brdrestart(phba);
11924 kfree(phba->vpi_bmask);
11925 kfree(phba->vpi_ids);
11927 lpfc_stop_hba_timers(phba);
11928 spin_lock_irq(&phba->port_list_lock);
11929 list_del_init(&vport->listentry);
11930 spin_unlock_irq(&phba->port_list_lock);
11932 lpfc_debugfs_terminate(vport);
11934 /* Disable SR-IOV if enabled */
11935 if (phba->cfg_sriov_nr_virtfn)
11936 pci_disable_sriov(pdev);
11938 /* Disable interrupt */
11939 lpfc_sli_disable_intr(phba);
11941 scsi_host_put(shost);
11944 * Call scsi_free before mem_free since scsi bufs are released to their
11945 * corresponding pools here.
11947 lpfc_scsi_free(phba);
11948 lpfc_free_iocb_list(phba);
11950 lpfc_mem_free_all(phba);
11952 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
11953 phba->hbqslimp.virt, phba->hbqslimp.phys);
11955 /* Free resources associated with SLI2 interface */
11956 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
11957 phba->slim2p.virt, phba->slim2p.phys);
11959 /* unmap adapter SLIM and Control Registers */
11960 iounmap(phba->ctrl_regs_memmap_p);
11961 iounmap(phba->slim_memmap_p);
11963 lpfc_hba_free(phba);
11965 pci_release_mem_regions(pdev);
11966 pci_disable_device(pdev);
11970 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
11971 * @pdev: pointer to PCI device
11972 * @msg: power management message
11974 * This routine is to be called from the kernel's PCI subsystem to support
11975 * system Power Management (PM) to device with SLI-3 interface spec. When
11976 * PM invokes this method, it quiesces the device by stopping the driver's
11977 * worker thread for the device, turning off device's interrupt and DMA,
11978 * and bring the device offline. Note that as the driver implements the
11979 * minimum PM requirements to a power-aware driver's PM support for the
11980 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
11981 * to the suspend() method call will be treated as SUSPEND and the driver will
11982 * fully reinitialize its device during resume() method call, the driver will
11983 * set device to PCI_D3hot state in PCI config space instead of setting it
11984 * according to the @msg provided by the PM.
11987 * 0 - driver suspended the device
11991 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
11993 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11994 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11996 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11997 "0473 PCI device Power Management suspend.\n");
11999 /* Bring down the device */
12000 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12001 lpfc_offline(phba);
12002 kthread_stop(phba->worker_thread);
12004 /* Disable interrupt from device */
12005 lpfc_sli_disable_intr(phba);
12007 /* Save device state to PCI config space */
12008 pci_save_state(pdev);
12009 pci_set_power_state(pdev, PCI_D3hot);
12015 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
12016 * @pdev: pointer to PCI device
12018 * This routine is to be called from the kernel's PCI subsystem to support
12019 * system Power Management (PM) to device with SLI-3 interface spec. When PM
12020 * invokes this method, it restores the device's PCI config space state and
12021 * fully reinitializes the device and brings it online. Note that as the
12022 * driver implements the minimum PM requirements to a power-aware driver's
12023 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
12024 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
12025 * driver will fully reinitialize its device during resume() method call,
12026 * the device will be set to PCI_D0 directly in PCI config space before
12027 * restoring the state.
12030 * 0 - driver suspended the device
12034 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
12036 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12037 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12038 uint32_t intr_mode;
12041 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12042 "0452 PCI device Power Management resume.\n");
12044 /* Restore device state from PCI config space */
12045 pci_set_power_state(pdev, PCI_D0);
12046 pci_restore_state(pdev);
12049 * As the new kernel behavior of pci_restore_state() API call clears
12050 * device saved_state flag, need to save the restored state again.
12052 pci_save_state(pdev);
12054 if (pdev->is_busmaster)
12055 pci_set_master(pdev);
12057 /* Startup the kernel thread for this host adapter. */
12058 phba->worker_thread = kthread_run(lpfc_do_work, phba,
12059 "lpfc_worker_%d", phba->brd_no);
12060 if (IS_ERR(phba->worker_thread)) {
12061 error = PTR_ERR(phba->worker_thread);
12062 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12063 "0434 PM resume failed to start worker "
12064 "thread: error=x%x.\n", error);
12068 /* Configure and enable interrupt */
12069 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12070 if (intr_mode == LPFC_INTR_ERROR) {
12071 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12072 "0430 PM resume Failed to enable interrupt\n");
12075 phba->intr_mode = intr_mode;
12077 /* Restart HBA and bring it online */
12078 lpfc_sli_brdrestart(phba);
12081 /* Log the current active interrupt mode */
12082 lpfc_log_intr_mode(phba, phba->intr_mode);
12088 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
12089 * @phba: pointer to lpfc hba data structure.
12091 * This routine is called to prepare the SLI3 device for PCI slot recover. It
12092 * aborts all the outstanding SCSI I/Os to the pci device.
12095 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
12097 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12098 "2723 PCI channel I/O abort preparing for recovery\n");
12101 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
12102 * and let the SCSI mid-layer to retry them to recover.
12104 lpfc_sli_abort_fcp_rings(phba);
12108 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
12109 * @phba: pointer to lpfc hba data structure.
12111 * This routine is called to prepare the SLI3 device for PCI slot reset. It
12112 * disables the device interrupt and pci device, and aborts the internal FCP
12116 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
12118 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12119 "2710 PCI channel disable preparing for reset\n");
12121 /* Block any management I/Os to the device */
12122 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
12124 /* Block all SCSI devices' I/Os on the host */
12125 lpfc_scsi_dev_block(phba);
12127 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
12128 lpfc_sli_flush_fcp_rings(phba);
12130 /* stop all timers */
12131 lpfc_stop_hba_timers(phba);
12133 /* Disable interrupt and pci device */
12134 lpfc_sli_disable_intr(phba);
12135 pci_disable_device(phba->pcidev);
12139 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
12140 * @phba: pointer to lpfc hba data structure.
12142 * This routine is called to prepare the SLI3 device for PCI slot permanently
12143 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
12147 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
12149 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12150 "2711 PCI channel permanent disable for failure\n");
12151 /* Block all SCSI devices' I/Os on the host */
12152 lpfc_scsi_dev_block(phba);
12154 /* stop all timers */
12155 lpfc_stop_hba_timers(phba);
12157 /* Clean up all driver's outstanding SCSI I/Os */
12158 lpfc_sli_flush_fcp_rings(phba);
12162 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
12163 * @pdev: pointer to PCI device.
12164 * @state: the current PCI connection state.
12166 * This routine is called from the PCI subsystem for I/O error handling to
12167 * device with SLI-3 interface spec. This function is called by the PCI
12168 * subsystem after a PCI bus error affecting this device has been detected.
12169 * When this function is invoked, it will need to stop all the I/Os and
12170 * interrupt(s) to the device. Once that is done, it will return
12171 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
12175 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
12176 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
12177 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12179 static pci_ers_result_t
12180 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
12182 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12183 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12186 case pci_channel_io_normal:
12187 /* Non-fatal error, prepare for recovery */
12188 lpfc_sli_prep_dev_for_recover(phba);
12189 return PCI_ERS_RESULT_CAN_RECOVER;
12190 case pci_channel_io_frozen:
12191 /* Fatal error, prepare for slot reset */
12192 lpfc_sli_prep_dev_for_reset(phba);
12193 return PCI_ERS_RESULT_NEED_RESET;
12194 case pci_channel_io_perm_failure:
12195 /* Permanent failure, prepare for device down */
12196 lpfc_sli_prep_dev_for_perm_failure(phba);
12197 return PCI_ERS_RESULT_DISCONNECT;
12199 /* Unknown state, prepare and request slot reset */
12200 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12201 "0472 Unknown PCI error state: x%x\n", state);
12202 lpfc_sli_prep_dev_for_reset(phba);
12203 return PCI_ERS_RESULT_NEED_RESET;
12208 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
12209 * @pdev: pointer to PCI device.
12211 * This routine is called from the PCI subsystem for error handling to
12212 * device with SLI-3 interface spec. This is called after PCI bus has been
12213 * reset to restart the PCI card from scratch, as if from a cold-boot.
12214 * During the PCI subsystem error recovery, after driver returns
12215 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
12216 * recovery and then call this routine before calling the .resume method
12217 * to recover the device. This function will initialize the HBA device,
12218 * enable the interrupt, but it will just put the HBA to offline state
12219 * without passing any I/O traffic.
12222 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
12223 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12225 static pci_ers_result_t
12226 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
12228 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12229 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12230 struct lpfc_sli *psli = &phba->sli;
12231 uint32_t intr_mode;
12233 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
12234 if (pci_enable_device_mem(pdev)) {
12235 printk(KERN_ERR "lpfc: Cannot re-enable "
12236 "PCI device after reset.\n");
12237 return PCI_ERS_RESULT_DISCONNECT;
12240 pci_restore_state(pdev);
12243 * As the new kernel behavior of pci_restore_state() API call clears
12244 * device saved_state flag, need to save the restored state again.
12246 pci_save_state(pdev);
12248 if (pdev->is_busmaster)
12249 pci_set_master(pdev);
12251 spin_lock_irq(&phba->hbalock);
12252 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
12253 spin_unlock_irq(&phba->hbalock);
12255 /* Configure and enable interrupt */
12256 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12257 if (intr_mode == LPFC_INTR_ERROR) {
12258 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12259 "0427 Cannot re-enable interrupt after "
12261 return PCI_ERS_RESULT_DISCONNECT;
12263 phba->intr_mode = intr_mode;
12265 /* Take device offline, it will perform cleanup */
12266 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12267 lpfc_offline(phba);
12268 lpfc_sli_brdrestart(phba);
12270 /* Log the current active interrupt mode */
12271 lpfc_log_intr_mode(phba, phba->intr_mode);
12273 return PCI_ERS_RESULT_RECOVERED;
12277 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
12278 * @pdev: pointer to PCI device
12280 * This routine is called from the PCI subsystem for error handling to device
12281 * with SLI-3 interface spec. It is called when kernel error recovery tells
12282 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
12283 * error recovery. After this call, traffic can start to flow from this device
12287 lpfc_io_resume_s3(struct pci_dev *pdev)
12289 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12290 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12292 /* Bring device online, it will be no-op for non-fatal error resume */
12297 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
12298 * @phba: pointer to lpfc hba data structure.
12300 * returns the number of ELS/CT IOCBs to reserve
12303 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
12305 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
12307 if (phba->sli_rev == LPFC_SLI_REV4) {
12308 if (max_xri <= 100)
12310 else if (max_xri <= 256)
12312 else if (max_xri <= 512)
12314 else if (max_xri <= 1024)
12316 else if (max_xri <= 1536)
12318 else if (max_xri <= 2048)
12327 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
12328 * @phba: pointer to lpfc hba data structure.
12330 * returns the number of ELS/CT + NVMET IOCBs to reserve
12333 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
12335 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
12337 if (phba->nvmet_support)
12338 max_xri += LPFC_NVMET_BUF_POST;
12344 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
12345 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
12346 const struct firmware *fw)
12348 if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) ||
12349 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
12350 magic_number != MAGIC_NUMER_G6) ||
12351 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
12352 magic_number != MAGIC_NUMER_G7))
12353 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12354 "3030 This firmware version is not supported on "
12355 "this HBA model. Device:%x Magic:%x Type:%x "
12356 "ID:%x Size %d %zd\n",
12357 phba->pcidev->device, magic_number, ftype, fid,
12360 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12361 "3022 FW Download failed. Device:%x Magic:%x Type:%x "
12362 "ID:%x Size %d %zd\n",
12363 phba->pcidev->device, magic_number, ftype, fid,
12369 * lpfc_write_firmware - attempt to write a firmware image to the port
12370 * @fw: pointer to firmware image returned from request_firmware.
12371 * @phba: pointer to lpfc hba data structure.
12375 lpfc_write_firmware(const struct firmware *fw, void *context)
12377 struct lpfc_hba *phba = (struct lpfc_hba *)context;
12378 char fwrev[FW_REV_STR_SIZE];
12379 struct lpfc_grp_hdr *image;
12380 struct list_head dma_buffer_list;
12382 struct lpfc_dmabuf *dmabuf, *next;
12383 uint32_t offset = 0, temp_offset = 0;
12384 uint32_t magic_number, ftype, fid, fsize;
12386 /* It can be null in no-wait mode, sanity check */
12391 image = (struct lpfc_grp_hdr *)fw->data;
12393 magic_number = be32_to_cpu(image->magic_number);
12394 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
12395 fid = bf_get_be32(lpfc_grp_hdr_id, image);
12396 fsize = be32_to_cpu(image->size);
12398 INIT_LIST_HEAD(&dma_buffer_list);
12399 lpfc_decode_firmware_rev(phba, fwrev, 1);
12400 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
12401 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12402 "3023 Updating Firmware, Current Version:%s "
12403 "New Version:%s\n",
12404 fwrev, image->revision);
12405 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
12406 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
12412 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
12416 if (!dmabuf->virt) {
12421 list_add_tail(&dmabuf->list, &dma_buffer_list);
12423 while (offset < fw->size) {
12424 temp_offset = offset;
12425 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
12426 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
12427 memcpy(dmabuf->virt,
12428 fw->data + temp_offset,
12429 fw->size - temp_offset);
12430 temp_offset = fw->size;
12433 memcpy(dmabuf->virt, fw->data + temp_offset,
12435 temp_offset += SLI4_PAGE_SIZE;
12437 rc = lpfc_wr_object(phba, &dma_buffer_list,
12438 (fw->size - offset), &offset);
12440 lpfc_log_write_firmware_error(phba, offset,
12441 magic_number, ftype, fid, fsize, fw);
12447 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12448 "3029 Skipped Firmware update, Current "
12449 "Version:%s New Version:%s\n",
12450 fwrev, image->revision);
12453 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
12454 list_del(&dmabuf->list);
12455 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
12456 dmabuf->virt, dmabuf->phys);
12459 release_firmware(fw);
12461 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12462 "3024 Firmware update done: %d.\n", rc);
12467 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
12468 * @phba: pointer to lpfc hba data structure.
12470 * This routine is called to perform Linux generic firmware upgrade on device
12471 * that supports such feature.
12474 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
12476 uint8_t file_name[ELX_MODEL_NAME_SIZE];
12478 const struct firmware *fw;
12480 /* Only supported on SLI4 interface type 2 for now */
12481 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
12482 LPFC_SLI_INTF_IF_TYPE_2)
12485 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
12487 if (fw_upgrade == INT_FW_UPGRADE) {
12488 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
12489 file_name, &phba->pcidev->dev,
12490 GFP_KERNEL, (void *)phba,
12491 lpfc_write_firmware);
12492 } else if (fw_upgrade == RUN_FW_UPGRADE) {
12493 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
12495 lpfc_write_firmware(fw, (void *)phba);
12504 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
12505 * @pdev: pointer to PCI device
12506 * @pid: pointer to PCI device identifier
12508 * This routine is called from the kernel's PCI subsystem to device with
12509 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
12510 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
12511 * information of the device and driver to see if the driver state that it
12512 * can support this kind of device. If the match is successful, the driver
12513 * core invokes this routine. If this routine determines it can claim the HBA,
12514 * it does all the initialization that it needs to do to handle the HBA
12518 * 0 - driver can claim the device
12519 * negative value - driver can not claim the device
12522 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
12524 struct lpfc_hba *phba;
12525 struct lpfc_vport *vport = NULL;
12526 struct Scsi_Host *shost = NULL;
12528 uint32_t cfg_mode, intr_mode;
12530 /* Allocate memory for HBA structure */
12531 phba = lpfc_hba_alloc(pdev);
12535 /* Perform generic PCI device enabling operation */
12536 error = lpfc_enable_pci_dev(phba);
12538 goto out_free_phba;
12540 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
12541 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
12543 goto out_disable_pci_dev;
12545 /* Set up SLI-4 specific device PCI memory space */
12546 error = lpfc_sli4_pci_mem_setup(phba);
12548 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12549 "1410 Failed to set up pci memory space.\n");
12550 goto out_disable_pci_dev;
12553 /* Set up SLI-4 Specific device driver resources */
12554 error = lpfc_sli4_driver_resource_setup(phba);
12556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12557 "1412 Failed to set up driver resource.\n");
12558 goto out_unset_pci_mem_s4;
12561 INIT_LIST_HEAD(&phba->active_rrq_list);
12562 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
12564 /* Set up common device driver resources */
12565 error = lpfc_setup_driver_resource_phase2(phba);
12567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12568 "1414 Failed to set up driver resource.\n");
12569 goto out_unset_driver_resource_s4;
12572 /* Get the default values for Model Name and Description */
12573 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
12575 /* Now, trying to enable interrupt and bring up the device */
12576 cfg_mode = phba->cfg_use_msi;
12578 /* Put device to a known state before enabling interrupt */
12579 phba->pport = NULL;
12580 lpfc_stop_port(phba);
12582 /* Configure and enable interrupt */
12583 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
12584 if (intr_mode == LPFC_INTR_ERROR) {
12585 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12586 "0426 Failed to enable interrupt.\n");
12588 goto out_unset_driver_resource;
12590 /* Default to single EQ for non-MSI-X */
12591 if (phba->intr_type != MSIX) {
12592 phba->cfg_irq_chann = 1;
12593 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12594 if (phba->nvmet_support)
12595 phba->cfg_nvmet_mrq = 1;
12598 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
12600 /* Create SCSI host to the physical port */
12601 error = lpfc_create_shost(phba);
12603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12604 "1415 Failed to create scsi host.\n");
12605 goto out_disable_intr;
12607 vport = phba->pport;
12608 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
12610 /* Configure sysfs attributes */
12611 error = lpfc_alloc_sysfs_attr(vport);
12613 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12614 "1416 Failed to allocate sysfs attr\n");
12615 goto out_destroy_shost;
12618 /* Set up SLI-4 HBA */
12619 if (lpfc_sli4_hba_setup(phba)) {
12620 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12621 "1421 Failed to set up hba\n");
12623 goto out_free_sysfs_attr;
12626 /* Log the current active interrupt mode */
12627 phba->intr_mode = intr_mode;
12628 lpfc_log_intr_mode(phba, intr_mode);
12630 /* Perform post initialization setup */
12631 lpfc_post_init_setup(phba);
12633 /* NVME support in FW earlier in the driver load corrects the
12634 * FC4 type making a check for nvme_support unnecessary.
12636 if (phba->nvmet_support == 0) {
12637 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12638 /* Create NVME binding with nvme_fc_transport. This
12639 * ensures the vport is initialized. If the localport
12640 * create fails, it should not unload the driver to
12641 * support field issues.
12643 error = lpfc_nvme_create_localport(vport);
12645 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12646 "6004 NVME registration "
12647 "failed, error x%x\n",
12653 /* check for firmware upgrade or downgrade */
12654 if (phba->cfg_request_firmware_upgrade)
12655 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
12657 /* Check if there are static vports to be created. */
12658 lpfc_create_static_vport(phba);
12660 /* Enable RAS FW log support */
12661 lpfc_sli4_ras_setup(phba);
12665 out_free_sysfs_attr:
12666 lpfc_free_sysfs_attr(vport);
12668 lpfc_destroy_shost(phba);
12670 lpfc_sli4_disable_intr(phba);
12671 out_unset_driver_resource:
12672 lpfc_unset_driver_resource_phase2(phba);
12673 out_unset_driver_resource_s4:
12674 lpfc_sli4_driver_resource_unset(phba);
12675 out_unset_pci_mem_s4:
12676 lpfc_sli4_pci_mem_unset(phba);
12677 out_disable_pci_dev:
12678 lpfc_disable_pci_dev(phba);
12680 scsi_host_put(shost);
12682 lpfc_hba_free(phba);
12687 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
12688 * @pdev: pointer to PCI device
12690 * This routine is called from the kernel's PCI subsystem to device with
12691 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
12692 * removed from PCI bus, it performs all the necessary cleanup for the HBA
12693 * device to be removed from the PCI subsystem properly.
12696 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
12698 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12699 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
12700 struct lpfc_vport **vports;
12701 struct lpfc_hba *phba = vport->phba;
12704 /* Mark the device unloading flag */
12705 spin_lock_irq(&phba->hbalock);
12706 vport->load_flag |= FC_UNLOADING;
12707 spin_unlock_irq(&phba->hbalock);
12709 /* Free the HBA sysfs attributes */
12710 lpfc_free_sysfs_attr(vport);
12712 /* Release all the vports against this physical port */
12713 vports = lpfc_create_vport_work_array(phba);
12714 if (vports != NULL)
12715 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
12716 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
12718 fc_vport_terminate(vports[i]->fc_vport);
12720 lpfc_destroy_vport_work_array(phba, vports);
12722 /* Remove FC host and then SCSI host with the physical port */
12723 fc_remove_host(shost);
12724 scsi_remove_host(shost);
12726 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
12727 * localports are destroyed after to cleanup all transport memory.
12729 lpfc_cleanup(vport);
12730 lpfc_nvmet_destroy_targetport(phba);
12731 lpfc_nvme_destroy_localport(vport);
12733 /* De-allocate multi-XRI pools */
12734 if (phba->cfg_xri_rebalancing)
12735 lpfc_destroy_multixri_pools(phba);
12738 * Bring down the SLI Layer. This step disables all interrupts,
12739 * clears the rings, discards all mailbox commands, and resets
12740 * the HBA FCoE function.
12742 lpfc_debugfs_terminate(vport);
12744 lpfc_stop_hba_timers(phba);
12745 spin_lock_irq(&phba->port_list_lock);
12746 list_del_init(&vport->listentry);
12747 spin_unlock_irq(&phba->port_list_lock);
12749 /* Perform scsi free before driver resource_unset since scsi
12750 * buffers are released to their corresponding pools here.
12752 lpfc_io_free(phba);
12753 lpfc_free_iocb_list(phba);
12754 lpfc_sli4_hba_unset(phba);
12756 lpfc_unset_driver_resource_phase2(phba);
12757 lpfc_sli4_driver_resource_unset(phba);
12759 /* Unmap adapter Control and Doorbell registers */
12760 lpfc_sli4_pci_mem_unset(phba);
12762 /* Release PCI resources and disable device's PCI function */
12763 scsi_host_put(shost);
12764 lpfc_disable_pci_dev(phba);
12766 /* Finally, free the driver's device data structure */
12767 lpfc_hba_free(phba);
12773 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
12774 * @pdev: pointer to PCI device
12775 * @msg: power management message
12777 * This routine is called from the kernel's PCI subsystem to support system
12778 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
12779 * this method, it quiesces the device by stopping the driver's worker
12780 * thread for the device, turning off device's interrupt and DMA, and bring
12781 * the device offline. Note that as the driver implements the minimum PM
12782 * requirements to a power-aware driver's PM support for suspend/resume -- all
12783 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
12784 * method call will be treated as SUSPEND and the driver will fully
12785 * reinitialize its device during resume() method call, the driver will set
12786 * device to PCI_D3hot state in PCI config space instead of setting it
12787 * according to the @msg provided by the PM.
12790 * 0 - driver suspended the device
12794 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
12796 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12797 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12799 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12800 "2843 PCI device Power Management suspend.\n");
12802 /* Bring down the device */
12803 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12804 lpfc_offline(phba);
12805 kthread_stop(phba->worker_thread);
12807 /* Disable interrupt from device */
12808 lpfc_sli4_disable_intr(phba);
12809 lpfc_sli4_queue_destroy(phba);
12811 /* Save device state to PCI config space */
12812 pci_save_state(pdev);
12813 pci_set_power_state(pdev, PCI_D3hot);
12819 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
12820 * @pdev: pointer to PCI device
12822 * This routine is called from the kernel's PCI subsystem to support system
12823 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
12824 * this method, it restores the device's PCI config space state and fully
12825 * reinitializes the device and brings it online. Note that as the driver
12826 * implements the minimum PM requirements to a power-aware driver's PM for
12827 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
12828 * to the suspend() method call will be treated as SUSPEND and the driver
12829 * will fully reinitialize its device during resume() method call, the device
12830 * will be set to PCI_D0 directly in PCI config space before restoring the
12834 * 0 - driver suspended the device
12838 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
12840 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12841 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12842 uint32_t intr_mode;
12845 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12846 "0292 PCI device Power Management resume.\n");
12848 /* Restore device state from PCI config space */
12849 pci_set_power_state(pdev, PCI_D0);
12850 pci_restore_state(pdev);
12853 * As the new kernel behavior of pci_restore_state() API call clears
12854 * device saved_state flag, need to save the restored state again.
12856 pci_save_state(pdev);
12858 if (pdev->is_busmaster)
12859 pci_set_master(pdev);
12861 /* Startup the kernel thread for this host adapter. */
12862 phba->worker_thread = kthread_run(lpfc_do_work, phba,
12863 "lpfc_worker_%d", phba->brd_no);
12864 if (IS_ERR(phba->worker_thread)) {
12865 error = PTR_ERR(phba->worker_thread);
12866 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12867 "0293 PM resume failed to start worker "
12868 "thread: error=x%x.\n", error);
12872 /* Configure and enable interrupt */
12873 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
12874 if (intr_mode == LPFC_INTR_ERROR) {
12875 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12876 "0294 PM resume Failed to enable interrupt\n");
12879 phba->intr_mode = intr_mode;
12881 /* Restart HBA and bring it online */
12882 lpfc_sli_brdrestart(phba);
12885 /* Log the current active interrupt mode */
12886 lpfc_log_intr_mode(phba, phba->intr_mode);
12892 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
12893 * @phba: pointer to lpfc hba data structure.
12895 * This routine is called to prepare the SLI4 device for PCI slot recover. It
12896 * aborts all the outstanding SCSI I/Os to the pci device.
12899 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
12901 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12902 "2828 PCI channel I/O abort preparing for recovery\n");
12904 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
12905 * and let the SCSI mid-layer to retry them to recover.
12907 lpfc_sli_abort_fcp_rings(phba);
12911 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
12912 * @phba: pointer to lpfc hba data structure.
12914 * This routine is called to prepare the SLI4 device for PCI slot reset. It
12915 * disables the device interrupt and pci device, and aborts the internal FCP
12919 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
12921 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12922 "2826 PCI channel disable preparing for reset\n");
12924 /* Block any management I/Os to the device */
12925 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
12927 /* Block all SCSI devices' I/Os on the host */
12928 lpfc_scsi_dev_block(phba);
12930 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
12931 lpfc_sli_flush_fcp_rings(phba);
12933 /* Flush the outstanding NVME IOs if fc4 type enabled. */
12934 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
12935 lpfc_sli_flush_nvme_rings(phba);
12937 /* stop all timers */
12938 lpfc_stop_hba_timers(phba);
12940 /* Disable interrupt and pci device */
12941 lpfc_sli4_disable_intr(phba);
12942 lpfc_sli4_queue_destroy(phba);
12943 pci_disable_device(phba->pcidev);
12947 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
12948 * @phba: pointer to lpfc hba data structure.
12950 * This routine is called to prepare the SLI4 device for PCI slot permanently
12951 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
12955 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
12957 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12958 "2827 PCI channel permanent disable for failure\n");
12960 /* Block all SCSI devices' I/Os on the host */
12961 lpfc_scsi_dev_block(phba);
12963 /* stop all timers */
12964 lpfc_stop_hba_timers(phba);
12966 /* Clean up all driver's outstanding SCSI I/Os */
12967 lpfc_sli_flush_fcp_rings(phba);
12969 /* Flush the outstanding NVME IOs if fc4 type enabled. */
12970 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
12971 lpfc_sli_flush_nvme_rings(phba);
12975 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
12976 * @pdev: pointer to PCI device.
12977 * @state: the current PCI connection state.
12979 * This routine is called from the PCI subsystem for error handling to device
12980 * with SLI-4 interface spec. This function is called by the PCI subsystem
12981 * after a PCI bus error affecting this device has been detected. When this
12982 * function is invoked, it will need to stop all the I/Os and interrupt(s)
12983 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
12984 * for the PCI subsystem to perform proper recovery as desired.
12987 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
12988 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12990 static pci_ers_result_t
12991 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
12993 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12994 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12997 case pci_channel_io_normal:
12998 /* Non-fatal error, prepare for recovery */
12999 lpfc_sli4_prep_dev_for_recover(phba);
13000 return PCI_ERS_RESULT_CAN_RECOVER;
13001 case pci_channel_io_frozen:
13002 /* Fatal error, prepare for slot reset */
13003 lpfc_sli4_prep_dev_for_reset(phba);
13004 return PCI_ERS_RESULT_NEED_RESET;
13005 case pci_channel_io_perm_failure:
13006 /* Permanent failure, prepare for device down */
13007 lpfc_sli4_prep_dev_for_perm_failure(phba);
13008 return PCI_ERS_RESULT_DISCONNECT;
13010 /* Unknown state, prepare and request slot reset */
13011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13012 "2825 Unknown PCI error state: x%x\n", state);
13013 lpfc_sli4_prep_dev_for_reset(phba);
13014 return PCI_ERS_RESULT_NEED_RESET;
13019 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
13020 * @pdev: pointer to PCI device.
13022 * This routine is called from the PCI subsystem for error handling to device
13023 * with SLI-4 interface spec. It is called after PCI bus has been reset to
13024 * restart the PCI card from scratch, as if from a cold-boot. During the
13025 * PCI subsystem error recovery, after the driver returns
13026 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
13027 * recovery and then call this routine before calling the .resume method to
13028 * recover the device. This function will initialize the HBA device, enable
13029 * the interrupt, but it will just put the HBA to offline state without
13030 * passing any I/O traffic.
13033 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
13034 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13036 static pci_ers_result_t
13037 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
13039 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13040 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13041 struct lpfc_sli *psli = &phba->sli;
13042 uint32_t intr_mode;
13044 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
13045 if (pci_enable_device_mem(pdev)) {
13046 printk(KERN_ERR "lpfc: Cannot re-enable "
13047 "PCI device after reset.\n");
13048 return PCI_ERS_RESULT_DISCONNECT;
13051 pci_restore_state(pdev);
13054 * As the new kernel behavior of pci_restore_state() API call clears
13055 * device saved_state flag, need to save the restored state again.
13057 pci_save_state(pdev);
13059 if (pdev->is_busmaster)
13060 pci_set_master(pdev);
13062 spin_lock_irq(&phba->hbalock);
13063 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
13064 spin_unlock_irq(&phba->hbalock);
13066 /* Configure and enable interrupt */
13067 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13068 if (intr_mode == LPFC_INTR_ERROR) {
13069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13070 "2824 Cannot re-enable interrupt after "
13072 return PCI_ERS_RESULT_DISCONNECT;
13074 phba->intr_mode = intr_mode;
13076 /* Log the current active interrupt mode */
13077 lpfc_log_intr_mode(phba, phba->intr_mode);
13079 return PCI_ERS_RESULT_RECOVERED;
13083 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
13084 * @pdev: pointer to PCI device
13086 * This routine is called from the PCI subsystem for error handling to device
13087 * with SLI-4 interface spec. It is called when kernel error recovery tells
13088 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
13089 * error recovery. After this call, traffic can start to flow from this device
13093 lpfc_io_resume_s4(struct pci_dev *pdev)
13095 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13096 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13099 * In case of slot reset, as function reset is performed through
13100 * mailbox command which needs DMA to be enabled, this operation
13101 * has to be moved to the io resume phase. Taking device offline
13102 * will perform the necessary cleanup.
13104 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
13105 /* Perform device reset */
13106 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13107 lpfc_offline(phba);
13108 lpfc_sli_brdrestart(phba);
13109 /* Bring the device back online */
13115 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
13116 * @pdev: pointer to PCI device
13117 * @pid: pointer to PCI device identifier
13119 * This routine is to be registered to the kernel's PCI subsystem. When an
13120 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
13121 * at PCI device-specific information of the device and driver to see if the
13122 * driver state that it can support this kind of device. If the match is
13123 * successful, the driver core invokes this routine. This routine dispatches
13124 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
13125 * do all the initialization that it needs to do to handle the HBA device
13129 * 0 - driver can claim the device
13130 * negative value - driver can not claim the device
13133 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
13136 struct lpfc_sli_intf intf;
13138 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
13141 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
13142 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
13143 rc = lpfc_pci_probe_one_s4(pdev, pid);
13145 rc = lpfc_pci_probe_one_s3(pdev, pid);
13151 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
13152 * @pdev: pointer to PCI device
13154 * This routine is to be registered to the kernel's PCI subsystem. When an
13155 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
13156 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
13157 * remove routine, which will perform all the necessary cleanup for the
13158 * device to be removed from the PCI subsystem properly.
13161 lpfc_pci_remove_one(struct pci_dev *pdev)
13163 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13164 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13166 switch (phba->pci_dev_grp) {
13167 case LPFC_PCI_DEV_LP:
13168 lpfc_pci_remove_one_s3(pdev);
13170 case LPFC_PCI_DEV_OC:
13171 lpfc_pci_remove_one_s4(pdev);
13174 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13175 "1424 Invalid PCI device group: 0x%x\n",
13176 phba->pci_dev_grp);
13183 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
13184 * @pdev: pointer to PCI device
13185 * @msg: power management message
13187 * This routine is to be registered to the kernel's PCI subsystem to support
13188 * system Power Management (PM). When PM invokes this method, it dispatches
13189 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
13190 * suspend the device.
13193 * 0 - driver suspended the device
13197 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
13199 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13200 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13203 switch (phba->pci_dev_grp) {
13204 case LPFC_PCI_DEV_LP:
13205 rc = lpfc_pci_suspend_one_s3(pdev, msg);
13207 case LPFC_PCI_DEV_OC:
13208 rc = lpfc_pci_suspend_one_s4(pdev, msg);
13211 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13212 "1425 Invalid PCI device group: 0x%x\n",
13213 phba->pci_dev_grp);
13220 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
13221 * @pdev: pointer to PCI device
13223 * This routine is to be registered to the kernel's PCI subsystem to support
13224 * system Power Management (PM). When PM invokes this method, it dispatches
13225 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
13226 * resume the device.
13229 * 0 - driver suspended the device
13233 lpfc_pci_resume_one(struct pci_dev *pdev)
13235 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13236 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13239 switch (phba->pci_dev_grp) {
13240 case LPFC_PCI_DEV_LP:
13241 rc = lpfc_pci_resume_one_s3(pdev);
13243 case LPFC_PCI_DEV_OC:
13244 rc = lpfc_pci_resume_one_s4(pdev);
13247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13248 "1426 Invalid PCI device group: 0x%x\n",
13249 phba->pci_dev_grp);
13256 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
13257 * @pdev: pointer to PCI device.
13258 * @state: the current PCI connection state.
13260 * This routine is registered to the PCI subsystem for error handling. This
13261 * function is called by the PCI subsystem after a PCI bus error affecting
13262 * this device has been detected. When this routine is invoked, it dispatches
13263 * the action to the proper SLI-3 or SLI-4 device error detected handling
13264 * routine, which will perform the proper error detected operation.
13267 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13268 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13270 static pci_ers_result_t
13271 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
13273 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13274 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13275 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13277 switch (phba->pci_dev_grp) {
13278 case LPFC_PCI_DEV_LP:
13279 rc = lpfc_io_error_detected_s3(pdev, state);
13281 case LPFC_PCI_DEV_OC:
13282 rc = lpfc_io_error_detected_s4(pdev, state);
13285 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13286 "1427 Invalid PCI device group: 0x%x\n",
13287 phba->pci_dev_grp);
13294 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
13295 * @pdev: pointer to PCI device.
13297 * This routine is registered to the PCI subsystem for error handling. This
13298 * function is called after PCI bus has been reset to restart the PCI card
13299 * from scratch, as if from a cold-boot. When this routine is invoked, it
13300 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
13301 * routine, which will perform the proper device reset.
13304 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
13305 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13307 static pci_ers_result_t
13308 lpfc_io_slot_reset(struct pci_dev *pdev)
13310 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13311 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13312 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13314 switch (phba->pci_dev_grp) {
13315 case LPFC_PCI_DEV_LP:
13316 rc = lpfc_io_slot_reset_s3(pdev);
13318 case LPFC_PCI_DEV_OC:
13319 rc = lpfc_io_slot_reset_s4(pdev);
13322 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13323 "1428 Invalid PCI device group: 0x%x\n",
13324 phba->pci_dev_grp);
13331 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
13332 * @pdev: pointer to PCI device
13334 * This routine is registered to the PCI subsystem for error handling. It
13335 * is called when kernel error recovery tells the lpfc driver that it is
13336 * OK to resume normal PCI operation after PCI bus error recovery. When
13337 * this routine is invoked, it dispatches the action to the proper SLI-3
13338 * or SLI-4 device io_resume routine, which will resume the device operation.
13341 lpfc_io_resume(struct pci_dev *pdev)
13343 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13344 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13346 switch (phba->pci_dev_grp) {
13347 case LPFC_PCI_DEV_LP:
13348 lpfc_io_resume_s3(pdev);
13350 case LPFC_PCI_DEV_OC:
13351 lpfc_io_resume_s4(pdev);
13354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13355 "1429 Invalid PCI device group: 0x%x\n",
13356 phba->pci_dev_grp);
13363 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
13364 * @phba: pointer to lpfc hba data structure.
13366 * This routine checks to see if OAS is supported for this adapter. If
13367 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
13368 * the enable oas flag is cleared and the pool created for OAS device data
13373 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
13376 if (!phba->cfg_EnableXLane)
13379 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
13383 if (phba->device_data_mem_pool)
13384 mempool_destroy(phba->device_data_mem_pool);
13385 phba->device_data_mem_pool = NULL;
13392 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
13393 * @phba: pointer to lpfc hba data structure.
13395 * This routine checks to see if RAS is supported by the adapter. Check the
13396 * function through which RAS support enablement is to be done.
13399 lpfc_sli4_ras_init(struct lpfc_hba *phba)
13401 switch (phba->pcidev->device) {
13402 case PCI_DEVICE_ID_LANCER_G6_FC:
13403 case PCI_DEVICE_ID_LANCER_G7_FC:
13404 phba->ras_fwlog.ras_hwsupport = true;
13405 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
13406 phba->cfg_ras_fwlog_buffsize)
13407 phba->ras_fwlog.ras_enabled = true;
13409 phba->ras_fwlog.ras_enabled = false;
13412 phba->ras_fwlog.ras_hwsupport = false;
13417 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
13419 static const struct pci_error_handlers lpfc_err_handler = {
13420 .error_detected = lpfc_io_error_detected,
13421 .slot_reset = lpfc_io_slot_reset,
13422 .resume = lpfc_io_resume,
13425 static struct pci_driver lpfc_driver = {
13426 .name = LPFC_DRIVER_NAME,
13427 .id_table = lpfc_id_table,
13428 .probe = lpfc_pci_probe_one,
13429 .remove = lpfc_pci_remove_one,
13430 .shutdown = lpfc_pci_remove_one,
13431 .suspend = lpfc_pci_suspend_one,
13432 .resume = lpfc_pci_resume_one,
13433 .err_handler = &lpfc_err_handler,
13436 static const struct file_operations lpfc_mgmt_fop = {
13437 .owner = THIS_MODULE,
13440 static struct miscdevice lpfc_mgmt_dev = {
13441 .minor = MISC_DYNAMIC_MINOR,
13442 .name = "lpfcmgmt",
13443 .fops = &lpfc_mgmt_fop,
13447 * lpfc_init - lpfc module initialization routine
13449 * This routine is to be invoked when the lpfc module is loaded into the
13450 * kernel. The special kernel macro module_init() is used to indicate the
13451 * role of this routine to the kernel as lpfc module entry point.
13455 * -ENOMEM - FC attach transport failed
13456 * all others - failed
13463 printk(LPFC_MODULE_DESC "\n");
13464 printk(LPFC_COPYRIGHT "\n");
13466 error = misc_register(&lpfc_mgmt_dev);
13468 printk(KERN_ERR "Could not register lpfcmgmt device, "
13469 "misc_register returned with status %d", error);
13471 lpfc_transport_functions.vport_create = lpfc_vport_create;
13472 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
13473 lpfc_transport_template =
13474 fc_attach_transport(&lpfc_transport_functions);
13475 if (lpfc_transport_template == NULL)
13477 lpfc_vport_transport_template =
13478 fc_attach_transport(&lpfc_vport_transport_functions);
13479 if (lpfc_vport_transport_template == NULL) {
13480 fc_release_transport(lpfc_transport_template);
13483 lpfc_nvme_cmd_template();
13484 lpfc_nvmet_cmd_template();
13486 /* Initialize in case vector mapping is needed */
13487 lpfc_present_cpu = num_present_cpus();
13489 error = pci_register_driver(&lpfc_driver);
13491 fc_release_transport(lpfc_transport_template);
13492 fc_release_transport(lpfc_vport_transport_template);
13499 * lpfc_exit - lpfc module removal routine
13501 * This routine is invoked when the lpfc module is removed from the kernel.
13502 * The special kernel macro module_exit() is used to indicate the role of
13503 * this routine to the kernel as lpfc module exit point.
13508 misc_deregister(&lpfc_mgmt_dev);
13509 pci_unregister_driver(&lpfc_driver);
13510 fc_release_transport(lpfc_transport_template);
13511 fc_release_transport(lpfc_vport_transport_template);
13512 if (_dump_buf_data) {
13513 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
13514 "_dump_buf_data at 0x%p\n",
13515 (1L << _dump_buf_data_order), _dump_buf_data);
13516 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
13519 if (_dump_buf_dif) {
13520 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
13521 "_dump_buf_dif at 0x%p\n",
13522 (1L << _dump_buf_dif_order), _dump_buf_dif);
13523 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
13525 idr_destroy(&lpfc_hba_index);
13528 module_init(lpfc_init);
13529 module_exit(lpfc_exit);
13530 MODULE_LICENSE("GPL");
13531 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
13532 MODULE_AUTHOR("Broadcom");
13533 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);