1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/kthread.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
33 #include <linux/ctype.h>
34 #include <linux/aer.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/miscdevice.h>
38 #include <linux/percpu.h>
39 #include <linux/msi.h>
40 #include <linux/irq.h>
41 #include <linux/bitops.h>
42 #include <linux/crash_dump.h>
43 #include <linux/cpu.h>
44 #include <linux/cpuhotplug.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_host.h>
49 #include <scsi/scsi_transport_fc.h>
50 #include <scsi/scsi_tcq.h>
51 #include <scsi/fc/fc_fs.h>
53 #include <linux/nvme-fc-driver.h>
58 #include "lpfc_sli4.h"
60 #include "lpfc_disc.h"
62 #include "lpfc_scsi.h"
63 #include "lpfc_nvme.h"
64 #include "lpfc_nvmet.h"
65 #include "lpfc_logmsg.h"
66 #include "lpfc_crtn.h"
67 #include "lpfc_vport.h"
68 #include "lpfc_version.h"
71 static enum cpuhp_state lpfc_cpuhp_state;
72 /* Used when mapping IRQ vectors in a driver centric manner */
73 static uint32_t lpfc_present_cpu;
75 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
76 static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
77 static void lpfc_cpuhp_add(struct lpfc_hba *phba);
78 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
79 static int lpfc_post_rcv_buf(struct lpfc_hba *);
80 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
81 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
82 static int lpfc_setup_endian_order(struct lpfc_hba *);
83 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
84 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
85 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
86 static void lpfc_init_sgl_list(struct lpfc_hba *);
87 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
88 static void lpfc_free_active_sgl(struct lpfc_hba *);
89 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
90 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
91 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
92 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
93 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
94 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
95 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
96 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
97 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
98 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
100 static struct scsi_transport_template *lpfc_transport_template = NULL;
101 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
102 static DEFINE_IDR(lpfc_hba_index);
103 #define LPFC_NVMET_BUF_POST 254
106 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
107 * @phba: pointer to lpfc hba data structure.
109 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
110 * mailbox command. It retrieves the revision information from the HBA and
111 * collects the Vital Product Data (VPD) about the HBA for preparing the
112 * configuration of the HBA.
116 * -ERESTART - requests the SLI layer to reset the HBA and try again.
117 * Any other value - indicates an error.
120 lpfc_config_port_prep(struct lpfc_hba *phba)
122 lpfc_vpd_t *vp = &phba->vpd;
126 char *lpfc_vpd_data = NULL;
128 static char licensed[56] =
129 "key unlock for use with gnu public licensed code only\0";
130 static int init_key = 1;
132 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
134 phba->link_state = LPFC_HBA_ERROR;
139 phba->link_state = LPFC_INIT_MBX_CMDS;
141 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
143 uint32_t *ptext = (uint32_t *) licensed;
145 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
146 *ptext = cpu_to_be32(*ptext);
150 lpfc_read_nv(phba, pmb);
151 memset((char*)mb->un.varRDnvp.rsvd3, 0,
152 sizeof (mb->un.varRDnvp.rsvd3));
153 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
156 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
158 if (rc != MBX_SUCCESS) {
159 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
160 "0324 Config Port initialization "
161 "error, mbxCmd x%x READ_NVPARM, "
163 mb->mbxCommand, mb->mbxStatus);
164 mempool_free(pmb, phba->mbox_mem_pool);
167 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
169 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
174 * Clear all option bits except LPFC_SLI3_BG_ENABLED,
175 * which was already set in lpfc_get_cfgparam()
177 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
179 /* Setup and issue mailbox READ REV command */
180 lpfc_read_rev(phba, pmb);
181 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
182 if (rc != MBX_SUCCESS) {
183 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
184 "0439 Adapter failed to init, mbxCmd x%x "
185 "READ_REV, mbxStatus x%x\n",
186 mb->mbxCommand, mb->mbxStatus);
187 mempool_free( pmb, phba->mbox_mem_pool);
193 * The value of rr must be 1 since the driver set the cv field to 1.
194 * This setting requires the FW to set all revision fields.
196 if (mb->un.varRdRev.rr == 0) {
198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
199 "0440 Adapter failed to init, READ_REV has "
200 "missing revision information.\n");
201 mempool_free(pmb, phba->mbox_mem_pool);
205 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
206 mempool_free(pmb, phba->mbox_mem_pool);
210 /* Save information as VPD data */
212 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
213 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
214 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
215 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
216 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
217 vp->rev.biuRev = mb->un.varRdRev.biuRev;
218 vp->rev.smRev = mb->un.varRdRev.smRev;
219 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
220 vp->rev.endecRev = mb->un.varRdRev.endecRev;
221 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
222 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
223 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
224 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
225 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
226 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
228 /* If the sli feature level is less then 9, we must
229 * tear down all RPIs and VPIs on link down if NPIV
232 if (vp->rev.feaLevelHigh < 9)
233 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
235 if (lpfc_is_LC_HBA(phba->pcidev->device))
236 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
237 sizeof (phba->RandomData));
239 /* Get adapter VPD information */
240 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
244 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
245 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
247 if (rc != MBX_SUCCESS) {
248 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
249 "0441 VPD not present on adapter, "
250 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
251 mb->mbxCommand, mb->mbxStatus);
252 mb->un.varDmp.word_cnt = 0;
254 /* dump mem may return a zero when finished or we got a
255 * mailbox error, either way we are done.
257 if (mb->un.varDmp.word_cnt == 0)
259 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
260 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
261 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
262 lpfc_vpd_data + offset,
263 mb->un.varDmp.word_cnt);
264 offset += mb->un.varDmp.word_cnt;
265 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
266 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
268 kfree(lpfc_vpd_data);
270 mempool_free(pmb, phba->mbox_mem_pool);
275 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
276 * @phba: pointer to lpfc hba data structure.
277 * @pmboxq: pointer to the driver internal queue element for mailbox command.
279 * This is the completion handler for driver's configuring asynchronous event
280 * mailbox command to the device. If the mailbox command returns successfully,
281 * it will set internal async event support flag to 1; otherwise, it will
282 * set internal async event support flag to 0.
285 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
287 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
288 phba->temp_sensor_support = 1;
290 phba->temp_sensor_support = 0;
291 mempool_free(pmboxq, phba->mbox_mem_pool);
296 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
297 * @phba: pointer to lpfc hba data structure.
298 * @pmboxq: pointer to the driver internal queue element for mailbox command.
300 * This is the completion handler for dump mailbox command for getting
301 * wake up parameters. When this command complete, the response contain
302 * Option rom version of the HBA. This function translate the version number
303 * into a human readable string and store it in OptionROMVersion.
306 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
309 uint32_t prog_id_word;
311 /* character array used for decoding dist type. */
312 char dist_char[] = "nabx";
314 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
315 mempool_free(pmboxq, phba->mbox_mem_pool);
319 prg = (struct prog_id *) &prog_id_word;
321 /* word 7 contain option rom version */
322 prog_id_word = pmboxq->u.mb.un.varWords[7];
324 /* Decode the Option rom version word to a readable string */
326 dist = dist_char[prg->dist];
328 if ((prg->dist == 3) && (prg->num == 0))
329 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
330 prg->ver, prg->rev, prg->lev);
332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
333 prg->ver, prg->rev, prg->lev,
335 mempool_free(pmboxq, phba->mbox_mem_pool);
340 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
341 * cfg_soft_wwnn, cfg_soft_wwpn
342 * @vport: pointer to lpfc vport data structure.
349 lpfc_update_vport_wwn(struct lpfc_vport *vport)
351 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
352 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
354 /* If the soft name exists then update it using the service params */
355 if (vport->phba->cfg_soft_wwnn)
356 u64_to_wwn(vport->phba->cfg_soft_wwnn,
357 vport->fc_sparam.nodeName.u.wwn);
358 if (vport->phba->cfg_soft_wwpn)
359 u64_to_wwn(vport->phba->cfg_soft_wwpn,
360 vport->fc_sparam.portName.u.wwn);
363 * If the name is empty or there exists a soft name
364 * then copy the service params name, otherwise use the fc name
366 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
367 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
368 sizeof(struct lpfc_name));
370 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
371 sizeof(struct lpfc_name));
374 * If the port name has changed, then set the Param changes flag
377 if (vport->fc_portname.u.wwn[0] != 0 &&
378 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
379 sizeof(struct lpfc_name)))
380 vport->vport_flag |= FAWWPN_PARAM_CHG;
382 if (vport->fc_portname.u.wwn[0] == 0 ||
383 vport->phba->cfg_soft_wwpn ||
384 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
385 vport->vport_flag & FAWWPN_SET) {
386 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
387 sizeof(struct lpfc_name));
388 vport->vport_flag &= ~FAWWPN_SET;
389 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
390 vport->vport_flag |= FAWWPN_SET;
393 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
394 sizeof(struct lpfc_name));
398 * lpfc_config_port_post - Perform lpfc initialization after config port
399 * @phba: pointer to lpfc hba data structure.
401 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
402 * command call. It performs all internal resource and state setups on the
403 * port: post IOCB buffers, enable appropriate host interrupt attentions,
404 * ELS ring timers, etc.
408 * Any other value - error.
411 lpfc_config_port_post(struct lpfc_hba *phba)
413 struct lpfc_vport *vport = phba->pport;
414 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
417 struct lpfc_dmabuf *mp;
418 struct lpfc_sli *psli = &phba->sli;
419 uint32_t status, timeout;
423 spin_lock_irq(&phba->hbalock);
425 * If the Config port completed correctly the HBA is not
426 * over heated any more.
428 if (phba->over_temp_state == HBA_OVER_TEMP)
429 phba->over_temp_state = HBA_NORMAL_TEMP;
430 spin_unlock_irq(&phba->hbalock);
432 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
434 phba->link_state = LPFC_HBA_ERROR;
439 /* Get login parameters for NID. */
440 rc = lpfc_read_sparam(phba, pmb, 0);
442 mempool_free(pmb, phba->mbox_mem_pool);
447 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
448 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
449 "0448 Adapter failed init, mbxCmd x%x "
450 "READ_SPARM mbxStatus x%x\n",
451 mb->mbxCommand, mb->mbxStatus);
452 phba->link_state = LPFC_HBA_ERROR;
453 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
454 mempool_free(pmb, phba->mbox_mem_pool);
455 lpfc_mbuf_free(phba, mp->virt, mp->phys);
460 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
462 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
463 lpfc_mbuf_free(phba, mp->virt, mp->phys);
466 lpfc_update_vport_wwn(vport);
468 /* Update the fc_host data structures with new wwn. */
469 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
470 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
471 fc_host_max_npiv_vports(shost) = phba->max_vpi;
473 /* If no serial number in VPD data, use low 6 bytes of WWNN */
474 /* This should be consolidated into parse_vpd ? - mr */
475 if (phba->SerialNumber[0] == 0) {
478 outptr = &vport->fc_nodename.u.s.IEEE[0];
479 for (i = 0; i < 12; i++) {
481 j = ((status & 0xf0) >> 4);
483 phba->SerialNumber[i] =
484 (char)((uint8_t) 0x30 + (uint8_t) j);
486 phba->SerialNumber[i] =
487 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
491 phba->SerialNumber[i] =
492 (char)((uint8_t) 0x30 + (uint8_t) j);
494 phba->SerialNumber[i] =
495 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
499 lpfc_read_config(phba, pmb);
501 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
503 "0453 Adapter failed to init, mbxCmd x%x "
504 "READ_CONFIG, mbxStatus x%x\n",
505 mb->mbxCommand, mb->mbxStatus);
506 phba->link_state = LPFC_HBA_ERROR;
507 mempool_free( pmb, phba->mbox_mem_pool);
511 /* Check if the port is disabled */
512 lpfc_sli_read_link_ste(phba);
514 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
515 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
516 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
517 "3359 HBA queue depth changed from %d to %d\n",
518 phba->cfg_hba_queue_depth,
519 mb->un.varRdConfig.max_xri);
520 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
523 phba->lmt = mb->un.varRdConfig.lmt;
525 /* Get the default values for Model Name and Description */
526 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
528 phba->link_state = LPFC_LINK_DOWN;
530 /* Only process IOCBs on ELS ring till hba_state is READY */
531 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
532 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
533 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
534 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
536 /* Post receive buffers for desired rings */
537 if (phba->sli_rev != 3)
538 lpfc_post_rcv_buf(phba);
541 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
543 if (phba->intr_type == MSIX) {
544 rc = lpfc_config_msi(phba, pmb);
546 mempool_free(pmb, phba->mbox_mem_pool);
549 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
550 if (rc != MBX_SUCCESS) {
551 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
552 "0352 Config MSI mailbox command "
553 "failed, mbxCmd x%x, mbxStatus x%x\n",
554 pmb->u.mb.mbxCommand,
555 pmb->u.mb.mbxStatus);
556 mempool_free(pmb, phba->mbox_mem_pool);
561 spin_lock_irq(&phba->hbalock);
562 /* Initialize ERATT handling flag */
563 phba->hba_flag &= ~HBA_ERATT_HANDLED;
565 /* Enable appropriate host interrupts */
566 if (lpfc_readl(phba->HCregaddr, &status)) {
567 spin_unlock_irq(&phba->hbalock);
570 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
571 if (psli->num_rings > 0)
572 status |= HC_R0INT_ENA;
573 if (psli->num_rings > 1)
574 status |= HC_R1INT_ENA;
575 if (psli->num_rings > 2)
576 status |= HC_R2INT_ENA;
577 if (psli->num_rings > 3)
578 status |= HC_R3INT_ENA;
580 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
581 (phba->cfg_poll & DISABLE_FCP_RING_INT))
582 status &= ~(HC_R0INT_ENA);
584 writel(status, phba->HCregaddr);
585 readl(phba->HCregaddr); /* flush */
586 spin_unlock_irq(&phba->hbalock);
588 /* Set up ring-0 (ELS) timer */
589 timeout = phba->fc_ratov * 2;
590 mod_timer(&vport->els_tmofunc,
591 jiffies + msecs_to_jiffies(1000 * timeout));
592 /* Set up heart beat (HB) timer */
593 mod_timer(&phba->hb_tmofunc,
594 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
595 phba->hb_outstanding = 0;
596 phba->last_completion_time = jiffies;
597 /* Set up error attention (ERATT) polling timer */
598 mod_timer(&phba->eratt_poll,
599 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
601 if (phba->hba_flag & LINK_DISABLED) {
602 lpfc_printf_log(phba,
604 "2598 Adapter Link is disabled.\n");
605 lpfc_down_link(phba, pmb);
606 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
607 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
608 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
609 lpfc_printf_log(phba,
611 "2599 Adapter failed to issue DOWN_LINK"
612 " mbox command rc 0x%x\n", rc);
614 mempool_free(pmb, phba->mbox_mem_pool);
617 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
618 mempool_free(pmb, phba->mbox_mem_pool);
619 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
623 /* MBOX buffer will be freed in mbox compl */
624 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
626 phba->link_state = LPFC_HBA_ERROR;
630 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
631 pmb->mbox_cmpl = lpfc_config_async_cmpl;
632 pmb->vport = phba->pport;
633 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
635 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
636 lpfc_printf_log(phba,
639 "0456 Adapter failed to issue "
640 "ASYNCEVT_ENABLE mbox status x%x\n",
642 mempool_free(pmb, phba->mbox_mem_pool);
645 /* Get Option rom version */
646 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
648 phba->link_state = LPFC_HBA_ERROR;
652 lpfc_dump_wakeup_param(phba, pmb);
653 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
654 pmb->vport = phba->pport;
655 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
657 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
659 "to get Option ROM version status x%x\n", rc);
660 mempool_free(pmb, phba->mbox_mem_pool);
667 * lpfc_hba_init_link - Initialize the FC link
668 * @phba: pointer to lpfc hba data structure.
669 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
671 * This routine will issue the INIT_LINK mailbox command call.
672 * It is available to other drivers through the lpfc_hba data
673 * structure for use as a delayed link up mechanism with the
674 * module parameter lpfc_suppress_link_up.
678 * Any other value - error
681 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
683 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
687 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
688 * @phba: pointer to lpfc hba data structure.
689 * @fc_topology: desired fc topology.
690 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
692 * This routine will issue the INIT_LINK mailbox command call.
693 * It is available to other drivers through the lpfc_hba data
694 * structure for use as a delayed link up mechanism with the
695 * module parameter lpfc_suppress_link_up.
699 * Any other value - error
702 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
705 struct lpfc_vport *vport = phba->pport;
710 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
712 phba->link_state = LPFC_HBA_ERROR;
718 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
719 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
720 !(phba->lmt & LMT_1Gb)) ||
721 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
722 !(phba->lmt & LMT_2Gb)) ||
723 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
724 !(phba->lmt & LMT_4Gb)) ||
725 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
726 !(phba->lmt & LMT_8Gb)) ||
727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
728 !(phba->lmt & LMT_10Gb)) ||
729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
730 !(phba->lmt & LMT_16Gb)) ||
731 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
732 !(phba->lmt & LMT_32Gb)) ||
733 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
734 !(phba->lmt & LMT_64Gb))) {
735 /* Reset link speed to auto */
736 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
737 "1302 Invalid speed for this board:%d "
738 "Reset link speed to auto.\n",
739 phba->cfg_link_speed);
740 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
742 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
743 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
744 if (phba->sli_rev < LPFC_SLI_REV4)
745 lpfc_set_loopback_flag(phba);
746 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
747 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
749 "0498 Adapter failed to init, mbxCmd x%x "
750 "INIT_LINK, mbxStatus x%x\n",
751 mb->mbxCommand, mb->mbxStatus);
752 if (phba->sli_rev <= LPFC_SLI_REV3) {
753 /* Clear all interrupt enable conditions */
754 writel(0, phba->HCregaddr);
755 readl(phba->HCregaddr); /* flush */
756 /* Clear all pending interrupts */
757 writel(0xffffffff, phba->HAregaddr);
758 readl(phba->HAregaddr); /* flush */
760 phba->link_state = LPFC_HBA_ERROR;
761 if (rc != MBX_BUSY || flag == MBX_POLL)
762 mempool_free(pmb, phba->mbox_mem_pool);
765 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
766 if (flag == MBX_POLL)
767 mempool_free(pmb, phba->mbox_mem_pool);
773 * lpfc_hba_down_link - this routine downs the FC link
774 * @phba: pointer to lpfc hba data structure.
775 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
777 * This routine will issue the DOWN_LINK mailbox command call.
778 * It is available to other drivers through the lpfc_hba data
779 * structure for use to stop the link.
783 * Any other value - error
786 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
791 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
793 phba->link_state = LPFC_HBA_ERROR;
797 lpfc_printf_log(phba,
799 "0491 Adapter Link is disabled.\n");
800 lpfc_down_link(phba, pmb);
801 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
802 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
803 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
804 lpfc_printf_log(phba,
806 "2522 Adapter failed to issue DOWN_LINK"
807 " mbox command rc 0x%x\n", rc);
809 mempool_free(pmb, phba->mbox_mem_pool);
812 if (flag == MBX_POLL)
813 mempool_free(pmb, phba->mbox_mem_pool);
819 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
820 * @phba: pointer to lpfc HBA data structure.
822 * This routine will do LPFC uninitialization before the HBA is reset when
823 * bringing down the SLI Layer.
827 * Any other value - error.
830 lpfc_hba_down_prep(struct lpfc_hba *phba)
832 struct lpfc_vport **vports;
835 if (phba->sli_rev <= LPFC_SLI_REV3) {
836 /* Disable interrupts */
837 writel(0, phba->HCregaddr);
838 readl(phba->HCregaddr); /* flush */
841 if (phba->pport->load_flag & FC_UNLOADING)
842 lpfc_cleanup_discovery_resources(phba->pport);
844 vports = lpfc_create_vport_work_array(phba);
846 for (i = 0; i <= phba->max_vports &&
847 vports[i] != NULL; i++)
848 lpfc_cleanup_discovery_resources(vports[i]);
849 lpfc_destroy_vport_work_array(phba, vports);
855 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
856 * rspiocb which got deferred
858 * @phba: pointer to lpfc HBA data structure.
860 * This routine will cleanup completed slow path events after HBA is reset
861 * when bringing down the SLI Layer.
868 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
870 struct lpfc_iocbq *rspiocbq;
871 struct hbq_dmabuf *dmabuf;
872 struct lpfc_cq_event *cq_event;
874 spin_lock_irq(&phba->hbalock);
875 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
876 spin_unlock_irq(&phba->hbalock);
878 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
879 /* Get the response iocb from the head of work queue */
880 spin_lock_irq(&phba->hbalock);
881 list_remove_head(&phba->sli4_hba.sp_queue_event,
882 cq_event, struct lpfc_cq_event, list);
883 spin_unlock_irq(&phba->hbalock);
885 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
886 case CQE_CODE_COMPL_WQE:
887 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
889 lpfc_sli_release_iocbq(phba, rspiocbq);
891 case CQE_CODE_RECEIVE:
892 case CQE_CODE_RECEIVE_V1:
893 dmabuf = container_of(cq_event, struct hbq_dmabuf,
895 lpfc_in_buf_free(phba, &dmabuf->dbuf);
901 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
902 * @phba: pointer to lpfc HBA data structure.
904 * This routine will cleanup posted ELS buffers after the HBA is reset
905 * when bringing down the SLI Layer.
912 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
914 struct lpfc_sli *psli = &phba->sli;
915 struct lpfc_sli_ring *pring;
916 struct lpfc_dmabuf *mp, *next_mp;
920 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
921 lpfc_sli_hbqbuf_free_all(phba);
923 /* Cleanup preposted buffers on the ELS ring */
924 pring = &psli->sli3_ring[LPFC_ELS_RING];
925 spin_lock_irq(&phba->hbalock);
926 list_splice_init(&pring->postbufq, &buflist);
927 spin_unlock_irq(&phba->hbalock);
930 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
933 lpfc_mbuf_free(phba, mp->virt, mp->phys);
937 spin_lock_irq(&phba->hbalock);
938 pring->postbufq_cnt -= count;
939 spin_unlock_irq(&phba->hbalock);
944 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
945 * @phba: pointer to lpfc HBA data structure.
947 * This routine will cleanup the txcmplq after the HBA is reset when bringing
948 * down the SLI Layer.
954 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
956 struct lpfc_sli *psli = &phba->sli;
957 struct lpfc_queue *qp = NULL;
958 struct lpfc_sli_ring *pring;
959 LIST_HEAD(completions);
961 struct lpfc_iocbq *piocb, *next_iocb;
963 if (phba->sli_rev != LPFC_SLI_REV4) {
964 for (i = 0; i < psli->num_rings; i++) {
965 pring = &psli->sli3_ring[i];
966 spin_lock_irq(&phba->hbalock);
967 /* At this point in time the HBA is either reset or DOA
968 * Nothing should be on txcmplq as it will
971 list_splice_init(&pring->txcmplq, &completions);
972 pring->txcmplq_cnt = 0;
973 spin_unlock_irq(&phba->hbalock);
975 lpfc_sli_abort_iocb_ring(phba, pring);
977 /* Cancel all the IOCBs from the completions list */
978 lpfc_sli_cancel_iocbs(phba, &completions,
979 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
982 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
986 spin_lock_irq(&pring->ring_lock);
987 list_for_each_entry_safe(piocb, next_iocb,
988 &pring->txcmplq, list)
989 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
990 list_splice_init(&pring->txcmplq, &completions);
991 pring->txcmplq_cnt = 0;
992 spin_unlock_irq(&pring->ring_lock);
993 lpfc_sli_abort_iocb_ring(phba, pring);
995 /* Cancel all the IOCBs from the completions list */
996 lpfc_sli_cancel_iocbs(phba, &completions,
997 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1001 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1003 * @phba: pointer to lpfc HBA data structure.
1005 * This routine will do uninitialization after the HBA is reset when bring
1006 * down the SLI Layer.
1010 * Any other value - error.
1013 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1015 lpfc_hba_free_post_buf(phba);
1016 lpfc_hba_clean_txcmplq(phba);
1021 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1022 * @phba: pointer to lpfc HBA data structure.
1024 * This routine will do uninitialization after the HBA is reset when bring
1025 * down the SLI Layer.
1029 * Any other value - error.
1032 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1034 struct lpfc_io_buf *psb, *psb_next;
1035 struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
1036 struct lpfc_sli4_hdw_queue *qp;
1038 LIST_HEAD(nvme_aborts);
1039 LIST_HEAD(nvmet_aborts);
1040 struct lpfc_sglq *sglq_entry = NULL;
1044 lpfc_sli_hbqbuf_free_all(phba);
1045 lpfc_hba_clean_txcmplq(phba);
1047 /* At this point in time the HBA is either reset or DOA. Either
1048 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1049 * on the lpfc_els_sgl_list so that it can either be freed if the
1050 * driver is unloading or reposted if the driver is restarting
1053 spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */
1055 /* sgl_list_lock required because worker thread uses this
1058 spin_lock(&phba->sli4_hba.sgl_list_lock);
1059 list_for_each_entry(sglq_entry,
1060 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1061 sglq_entry->state = SGL_FREED;
1063 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1064 &phba->sli4_hba.lpfc_els_sgl_list);
1067 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1069 /* abts_xxxx_buf_list_lock required because worker thread uses this
1073 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1074 qp = &phba->sli4_hba.hdwq[idx];
1076 spin_lock(&qp->abts_io_buf_list_lock);
1077 list_splice_init(&qp->lpfc_abts_io_buf_list,
1080 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1082 psb->status = IOSTAT_SUCCESS;
1085 spin_lock(&qp->io_buf_list_put_lock);
1086 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1087 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1088 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1089 qp->abts_scsi_io_bufs = 0;
1090 qp->abts_nvme_io_bufs = 0;
1091 spin_unlock(&qp->io_buf_list_put_lock);
1092 spin_unlock(&qp->abts_io_buf_list_lock);
1094 spin_unlock_irq(&phba->hbalock);
1096 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1097 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1098 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1100 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1101 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1102 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
1103 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1107 lpfc_sli4_free_sp_events(phba);
1112 * lpfc_hba_down_post - Wrapper func for hba down post routine
1113 * @phba: pointer to lpfc HBA data structure.
1115 * This routine wraps the actual SLI3 or SLI4 routine for performing
1116 * uninitialization after the HBA is reset when bring down the SLI Layer.
1120 * Any other value - error.
1123 lpfc_hba_down_post(struct lpfc_hba *phba)
1125 return (*phba->lpfc_hba_down_post)(phba);
1129 * lpfc_hb_timeout - The HBA-timer timeout handler
1130 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1132 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1133 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1134 * work-port-events bitmap and the worker thread is notified. This timeout
1135 * event will be used by the worker thread to invoke the actual timeout
1136 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1137 * be performed in the timeout handler and the HBA timeout event bit shall
1138 * be cleared by the worker thread after it has taken the event bitmap out.
1141 lpfc_hb_timeout(struct timer_list *t)
1143 struct lpfc_hba *phba;
1144 uint32_t tmo_posted;
1145 unsigned long iflag;
1147 phba = from_timer(phba, t, hb_tmofunc);
1149 /* Check for heart beat timeout conditions */
1150 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1151 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1153 phba->pport->work_port_events |= WORKER_HB_TMO;
1154 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1156 /* Tell the worker thread there is work to do */
1158 lpfc_worker_wake_up(phba);
1163 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1164 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1166 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1167 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1168 * work-port-events bitmap and the worker thread is notified. This timeout
1169 * event will be used by the worker thread to invoke the actual timeout
1170 * handler routine, lpfc_rrq_handler. Any periodical operations will
1171 * be performed in the timeout handler and the RRQ timeout event bit shall
1172 * be cleared by the worker thread after it has taken the event bitmap out.
1175 lpfc_rrq_timeout(struct timer_list *t)
1177 struct lpfc_hba *phba;
1178 unsigned long iflag;
1180 phba = from_timer(phba, t, rrq_tmr);
1181 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1182 if (!(phba->pport->load_flag & FC_UNLOADING))
1183 phba->hba_flag |= HBA_RRQ_ACTIVE;
1185 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1186 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1188 if (!(phba->pport->load_flag & FC_UNLOADING))
1189 lpfc_worker_wake_up(phba);
1193 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1194 * @phba: pointer to lpfc hba data structure.
1195 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1197 * This is the callback function to the lpfc heart-beat mailbox command.
1198 * If configured, the lpfc driver issues the heart-beat mailbox command to
1199 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1200 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1201 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1202 * heart-beat outstanding state. Once the mailbox command comes back and
1203 * no error conditions detected, the heart-beat mailbox command timer is
1204 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1205 * state is cleared for the next heart-beat. If the timer expired with the
1206 * heart-beat outstanding state set, the driver will put the HBA offline.
1209 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1211 unsigned long drvr_flag;
1213 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1214 phba->hb_outstanding = 0;
1215 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1217 /* Check and reset heart-beat timer is necessary */
1218 mempool_free(pmboxq, phba->mbox_mem_pool);
1219 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1220 !(phba->link_state == LPFC_HBA_ERROR) &&
1221 !(phba->pport->load_flag & FC_UNLOADING))
1222 mod_timer(&phba->hb_tmofunc,
1224 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1229 lpfc_hb_eq_delay_work(struct work_struct *work)
1231 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1232 struct lpfc_hba, eq_delay_work);
1233 struct lpfc_eq_intr_info *eqi, *eqi_new;
1234 struct lpfc_queue *eq, *eq_next;
1235 unsigned char *ena_delay = NULL;
1239 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1242 if (phba->link_state == LPFC_HBA_ERROR ||
1243 phba->pport->fc_flag & FC_OFFLINE_MODE)
1246 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1251 for (i = 0; i < phba->cfg_irq_chann; i++) {
1252 /* Get the EQ corresponding to the IRQ vector */
1253 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1256 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1257 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1258 ena_delay[eq->last_cpu] = 1;
1262 for_each_present_cpu(i) {
1263 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1265 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1266 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1267 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1274 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1275 if (unlikely(eq->last_cpu != i)) {
1276 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1278 list_move_tail(&eq->cpu_list, &eqi_new->list);
1281 if (usdelay != eq->q_mode)
1282 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1290 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1291 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1295 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1296 * @phba: pointer to lpfc hba data structure.
1298 * For each heartbeat, this routine does some heuristic methods to adjust
1299 * XRI distribution. The goal is to fully utilize free XRIs.
1301 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1306 hwq_count = phba->cfg_hdw_queue;
1307 for (i = 0; i < hwq_count; i++) {
1308 /* Adjust XRIs in private pool */
1309 lpfc_adjust_pvt_pool_count(phba, i);
1311 /* Adjust high watermark */
1312 lpfc_adjust_high_watermark(phba, i);
1314 #ifdef LPFC_MXP_STAT
1315 /* Snapshot pbl, pvt and busy count */
1316 lpfc_snapshot_mxp(phba, i);
1322 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1323 * @phba: pointer to lpfc hba data structure.
1325 * This is the actual HBA-timer timeout handler to be invoked by the worker
1326 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1327 * handler performs any periodic operations needed for the device. If such
1328 * periodic event has already been attended to either in the interrupt handler
1329 * or by processing slow-ring or fast-ring events within the HBA-timer
1330 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1331 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1332 * is configured and there is no heart-beat mailbox command outstanding, a
1333 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1334 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1338 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1340 struct lpfc_vport **vports;
1341 LPFC_MBOXQ_t *pmboxq;
1342 struct lpfc_dmabuf *buf_ptr;
1344 struct lpfc_sli *psli = &phba->sli;
1345 LIST_HEAD(completions);
1347 if (phba->cfg_xri_rebalancing) {
1348 /* Multi-XRI pools handler */
1349 lpfc_hb_mxp_handler(phba);
1352 vports = lpfc_create_vport_work_array(phba);
1354 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1355 lpfc_rcv_seq_check_edtov(vports[i]);
1356 lpfc_fdmi_change_check(vports[i]);
1358 lpfc_destroy_vport_work_array(phba, vports);
1360 if ((phba->link_state == LPFC_HBA_ERROR) ||
1361 (phba->pport->load_flag & FC_UNLOADING) ||
1362 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1365 spin_lock_irq(&phba->pport->work_port_lock);
1367 if (time_after(phba->last_completion_time +
1368 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1370 spin_unlock_irq(&phba->pport->work_port_lock);
1371 if (!phba->hb_outstanding)
1372 mod_timer(&phba->hb_tmofunc,
1374 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1376 mod_timer(&phba->hb_tmofunc,
1378 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1381 spin_unlock_irq(&phba->pport->work_port_lock);
1383 if (phba->elsbuf_cnt &&
1384 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1385 spin_lock_irq(&phba->hbalock);
1386 list_splice_init(&phba->elsbuf, &completions);
1387 phba->elsbuf_cnt = 0;
1388 phba->elsbuf_prev_cnt = 0;
1389 spin_unlock_irq(&phba->hbalock);
1391 while (!list_empty(&completions)) {
1392 list_remove_head(&completions, buf_ptr,
1393 struct lpfc_dmabuf, list);
1394 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1398 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1400 /* If there is no heart beat outstanding, issue a heartbeat command */
1401 if (phba->cfg_enable_hba_heartbeat) {
1402 if (!phba->hb_outstanding) {
1403 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1404 (list_empty(&psli->mboxq))) {
1405 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1408 mod_timer(&phba->hb_tmofunc,
1410 msecs_to_jiffies(1000 *
1411 LPFC_HB_MBOX_INTERVAL));
1415 lpfc_heart_beat(phba, pmboxq);
1416 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1417 pmboxq->vport = phba->pport;
1418 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1421 if (retval != MBX_BUSY &&
1422 retval != MBX_SUCCESS) {
1423 mempool_free(pmboxq,
1424 phba->mbox_mem_pool);
1425 mod_timer(&phba->hb_tmofunc,
1427 msecs_to_jiffies(1000 *
1428 LPFC_HB_MBOX_INTERVAL));
1431 phba->skipped_hb = 0;
1432 phba->hb_outstanding = 1;
1433 } else if (time_before_eq(phba->last_completion_time,
1434 phba->skipped_hb)) {
1435 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1436 "2857 Last completion time not "
1437 " updated in %d ms\n",
1438 jiffies_to_msecs(jiffies
1439 - phba->last_completion_time));
1441 phba->skipped_hb = jiffies;
1443 mod_timer(&phba->hb_tmofunc,
1445 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1449 * If heart beat timeout called with hb_outstanding set
1450 * we need to give the hb mailbox cmd a chance to
1453 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1454 "0459 Adapter heartbeat still out"
1455 "standing:last compl time was %d ms.\n",
1456 jiffies_to_msecs(jiffies
1457 - phba->last_completion_time));
1458 mod_timer(&phba->hb_tmofunc,
1460 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1463 mod_timer(&phba->hb_tmofunc,
1465 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1470 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1471 * @phba: pointer to lpfc hba data structure.
1473 * This routine is called to bring the HBA offline when HBA hardware error
1474 * other than Port Error 6 has been detected.
1477 lpfc_offline_eratt(struct lpfc_hba *phba)
1479 struct lpfc_sli *psli = &phba->sli;
1481 spin_lock_irq(&phba->hbalock);
1482 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1483 spin_unlock_irq(&phba->hbalock);
1484 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1487 lpfc_reset_barrier(phba);
1488 spin_lock_irq(&phba->hbalock);
1489 lpfc_sli_brdreset(phba);
1490 spin_unlock_irq(&phba->hbalock);
1491 lpfc_hba_down_post(phba);
1492 lpfc_sli_brdready(phba, HS_MBRDY);
1493 lpfc_unblock_mgmt_io(phba);
1494 phba->link_state = LPFC_HBA_ERROR;
1499 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1500 * @phba: pointer to lpfc hba data structure.
1502 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1503 * other than Port Error 6 has been detected.
1506 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1508 spin_lock_irq(&phba->hbalock);
1509 phba->link_state = LPFC_HBA_ERROR;
1510 spin_unlock_irq(&phba->hbalock);
1512 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1513 lpfc_sli_flush_io_rings(phba);
1515 lpfc_hba_down_post(phba);
1516 lpfc_unblock_mgmt_io(phba);
1520 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1521 * @phba: pointer to lpfc hba data structure.
1523 * This routine is invoked to handle the deferred HBA hardware error
1524 * conditions. This type of error is indicated by HBA by setting ER1
1525 * and another ER bit in the host status register. The driver will
1526 * wait until the ER1 bit clears before handling the error condition.
1529 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1531 uint32_t old_host_status = phba->work_hs;
1532 struct lpfc_sli *psli = &phba->sli;
1534 /* If the pci channel is offline, ignore possible errors,
1535 * since we cannot communicate with the pci card anyway.
1537 if (pci_channel_offline(phba->pcidev)) {
1538 spin_lock_irq(&phba->hbalock);
1539 phba->hba_flag &= ~DEFER_ERATT;
1540 spin_unlock_irq(&phba->hbalock);
1544 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1545 "0479 Deferred Adapter Hardware Error "
1546 "Data: x%x x%x x%x\n",
1548 phba->work_status[0], phba->work_status[1]);
1550 spin_lock_irq(&phba->hbalock);
1551 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1552 spin_unlock_irq(&phba->hbalock);
1556 * Firmware stops when it triggred erratt. That could cause the I/Os
1557 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1558 * SCSI layer retry it after re-establishing link.
1560 lpfc_sli_abort_fcp_rings(phba);
1563 * There was a firmware error. Take the hba offline and then
1564 * attempt to restart it.
1566 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1569 /* Wait for the ER1 bit to clear.*/
1570 while (phba->work_hs & HS_FFER1) {
1572 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1573 phba->work_hs = UNPLUG_ERR ;
1576 /* If driver is unloading let the worker thread continue */
1577 if (phba->pport->load_flag & FC_UNLOADING) {
1584 * This is to ptrotect against a race condition in which
1585 * first write to the host attention register clear the
1586 * host status register.
1588 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1589 phba->work_hs = old_host_status & ~HS_FFER1;
1591 spin_lock_irq(&phba->hbalock);
1592 phba->hba_flag &= ~DEFER_ERATT;
1593 spin_unlock_irq(&phba->hbalock);
1594 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1595 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1599 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1601 struct lpfc_board_event_header board_event;
1602 struct Scsi_Host *shost;
1604 board_event.event_type = FC_REG_BOARD_EVENT;
1605 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1606 shost = lpfc_shost_from_vport(phba->pport);
1607 fc_host_post_vendor_event(shost, fc_get_event_number(),
1608 sizeof(board_event),
1609 (char *) &board_event,
1614 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1615 * @phba: pointer to lpfc hba data structure.
1617 * This routine is invoked to handle the following HBA hardware error
1619 * 1 - HBA error attention interrupt
1620 * 2 - DMA ring index out of range
1621 * 3 - Mailbox command came back as unknown
1624 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1626 struct lpfc_vport *vport = phba->pport;
1627 struct lpfc_sli *psli = &phba->sli;
1628 uint32_t event_data;
1629 unsigned long temperature;
1630 struct temp_event temp_event_data;
1631 struct Scsi_Host *shost;
1633 /* If the pci channel is offline, ignore possible errors,
1634 * since we cannot communicate with the pci card anyway.
1636 if (pci_channel_offline(phba->pcidev)) {
1637 spin_lock_irq(&phba->hbalock);
1638 phba->hba_flag &= ~DEFER_ERATT;
1639 spin_unlock_irq(&phba->hbalock);
1643 /* If resets are disabled then leave the HBA alone and return */
1644 if (!phba->cfg_enable_hba_reset)
1647 /* Send an internal error event to mgmt application */
1648 lpfc_board_errevt_to_mgmt(phba);
1650 if (phba->hba_flag & DEFER_ERATT)
1651 lpfc_handle_deferred_eratt(phba);
1653 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1654 if (phba->work_hs & HS_FFER6)
1655 /* Re-establishing Link */
1656 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1657 "1301 Re-establishing Link "
1658 "Data: x%x x%x x%x\n",
1659 phba->work_hs, phba->work_status[0],
1660 phba->work_status[1]);
1661 if (phba->work_hs & HS_FFER8)
1662 /* Device Zeroization */
1663 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1664 "2861 Host Authentication device "
1665 "zeroization Data:x%x x%x x%x\n",
1666 phba->work_hs, phba->work_status[0],
1667 phba->work_status[1]);
1669 spin_lock_irq(&phba->hbalock);
1670 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1671 spin_unlock_irq(&phba->hbalock);
1674 * Firmware stops when it triggled erratt with HS_FFER6.
1675 * That could cause the I/Os dropped by the firmware.
1676 * Error iocb (I/O) on txcmplq and let the SCSI layer
1677 * retry it after re-establishing link.
1679 lpfc_sli_abort_fcp_rings(phba);
1682 * There was a firmware error. Take the hba offline and then
1683 * attempt to restart it.
1685 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1687 lpfc_sli_brdrestart(phba);
1688 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1689 lpfc_unblock_mgmt_io(phba);
1692 lpfc_unblock_mgmt_io(phba);
1693 } else if (phba->work_hs & HS_CRIT_TEMP) {
1694 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1695 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1696 temp_event_data.event_code = LPFC_CRIT_TEMP;
1697 temp_event_data.data = (uint32_t)temperature;
1699 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1700 "0406 Adapter maximum temperature exceeded "
1701 "(%ld), taking this port offline "
1702 "Data: x%x x%x x%x\n",
1703 temperature, phba->work_hs,
1704 phba->work_status[0], phba->work_status[1]);
1706 shost = lpfc_shost_from_vport(phba->pport);
1707 fc_host_post_vendor_event(shost, fc_get_event_number(),
1708 sizeof(temp_event_data),
1709 (char *) &temp_event_data,
1710 SCSI_NL_VID_TYPE_PCI
1711 | PCI_VENDOR_ID_EMULEX);
1713 spin_lock_irq(&phba->hbalock);
1714 phba->over_temp_state = HBA_OVER_TEMP;
1715 spin_unlock_irq(&phba->hbalock);
1716 lpfc_offline_eratt(phba);
1719 /* The if clause above forces this code path when the status
1720 * failure is a value other than FFER6. Do not call the offline
1721 * twice. This is the adapter hardware error path.
1723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1724 "0457 Adapter Hardware Error "
1725 "Data: x%x x%x x%x\n",
1727 phba->work_status[0], phba->work_status[1]);
1729 event_data = FC_REG_DUMP_EVENT;
1730 shost = lpfc_shost_from_vport(vport);
1731 fc_host_post_vendor_event(shost, fc_get_event_number(),
1732 sizeof(event_data), (char *) &event_data,
1733 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1735 lpfc_offline_eratt(phba);
1741 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1742 * @phba: pointer to lpfc hba data structure.
1743 * @mbx_action: flag for mailbox shutdown action.
1745 * This routine is invoked to perform an SLI4 port PCI function reset in
1746 * response to port status register polling attention. It waits for port
1747 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1748 * During this process, interrupt vectors are freed and later requested
1749 * for handling possible port resource change.
1752 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1758 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1759 LPFC_SLI_INTF_IF_TYPE_2) {
1761 * On error status condition, driver need to wait for port
1762 * ready before performing reset.
1764 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1769 /* need reset: attempt for port recovery */
1771 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1772 "2887 Reset Needed: Attempting Port "
1774 lpfc_offline_prep(phba, mbx_action);
1775 lpfc_sli_flush_io_rings(phba);
1777 /* release interrupt for possible resource change */
1778 lpfc_sli4_disable_intr(phba);
1779 rc = lpfc_sli_brdrestart(phba);
1781 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1782 "6309 Failed to restart board\n");
1785 /* request and enable interrupt */
1786 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1787 if (intr_mode == LPFC_INTR_ERROR) {
1788 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1789 "3175 Failed to enable interrupt\n");
1792 phba->intr_mode = intr_mode;
1793 rc = lpfc_online(phba);
1795 lpfc_unblock_mgmt_io(phba);
1801 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1802 * @phba: pointer to lpfc hba data structure.
1804 * This routine is invoked to handle the SLI4 HBA hardware error attention
1808 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1810 struct lpfc_vport *vport = phba->pport;
1811 uint32_t event_data;
1812 struct Scsi_Host *shost;
1814 struct lpfc_register portstat_reg = {0};
1815 uint32_t reg_err1, reg_err2;
1816 uint32_t uerrlo_reg, uemasklo_reg;
1817 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1818 bool en_rn_msg = true;
1819 struct temp_event temp_event_data;
1820 struct lpfc_register portsmphr_reg;
1823 /* If the pci channel is offline, ignore possible errors, since
1824 * we cannot communicate with the pci card anyway.
1826 if (pci_channel_offline(phba->pcidev)) {
1827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1828 "3166 pci channel is offline\n");
1829 lpfc_sli4_offline_eratt(phba);
1833 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1834 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1836 case LPFC_SLI_INTF_IF_TYPE_0:
1837 pci_rd_rc1 = lpfc_readl(
1838 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1840 pci_rd_rc2 = lpfc_readl(
1841 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1843 /* consider PCI bus read error as pci_channel_offline */
1844 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1846 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1847 lpfc_sli4_offline_eratt(phba);
1850 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1851 "7623 Checking UE recoverable");
1853 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1854 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1855 &portsmphr_reg.word0))
1858 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1860 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1861 LPFC_PORT_SEM_UE_RECOVERABLE)
1863 /*Sleep for 1Sec, before checking SEMAPHORE */
1867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1868 "4827 smphr_port_status x%x : Waited %dSec",
1869 smphr_port_status, i);
1871 /* Recoverable UE, reset the HBA device */
1872 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1873 LPFC_PORT_SEM_UE_RECOVERABLE) {
1874 for (i = 0; i < 20; i++) {
1876 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1877 &portsmphr_reg.word0) &&
1878 (LPFC_POST_STAGE_PORT_READY ==
1879 bf_get(lpfc_port_smphr_port_status,
1881 rc = lpfc_sli4_port_sta_fn_reset(phba,
1882 LPFC_MBX_NO_WAIT, en_rn_msg);
1885 lpfc_printf_log(phba,
1887 "4215 Failed to recover UE");
1892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1893 "7624 Firmware not ready: Failing UE recovery,"
1894 " waited %dSec", i);
1895 phba->link_state = LPFC_HBA_ERROR;
1898 case LPFC_SLI_INTF_IF_TYPE_2:
1899 case LPFC_SLI_INTF_IF_TYPE_6:
1900 pci_rd_rc1 = lpfc_readl(
1901 phba->sli4_hba.u.if_type2.STATUSregaddr,
1902 &portstat_reg.word0);
1903 /* consider PCI bus read error as pci_channel_offline */
1904 if (pci_rd_rc1 == -EIO) {
1905 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1906 "3151 PCI bus read access failure: x%x\n",
1907 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1908 lpfc_sli4_offline_eratt(phba);
1911 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1912 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1913 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1914 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1915 "2889 Port Overtemperature event, "
1916 "taking port offline Data: x%x x%x\n",
1917 reg_err1, reg_err2);
1919 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
1920 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1921 temp_event_data.event_code = LPFC_CRIT_TEMP;
1922 temp_event_data.data = 0xFFFFFFFF;
1924 shost = lpfc_shost_from_vport(phba->pport);
1925 fc_host_post_vendor_event(shost, fc_get_event_number(),
1926 sizeof(temp_event_data),
1927 (char *)&temp_event_data,
1928 SCSI_NL_VID_TYPE_PCI
1929 | PCI_VENDOR_ID_EMULEX);
1931 spin_lock_irq(&phba->hbalock);
1932 phba->over_temp_state = HBA_OVER_TEMP;
1933 spin_unlock_irq(&phba->hbalock);
1934 lpfc_sli4_offline_eratt(phba);
1937 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1938 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
1939 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1940 "3143 Port Down: Firmware Update "
1943 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1944 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1945 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1946 "3144 Port Down: Debug Dump\n");
1947 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1948 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1949 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1950 "3145 Port Down: Provisioning\n");
1952 /* If resets are disabled then leave the HBA alone and return */
1953 if (!phba->cfg_enable_hba_reset)
1956 /* Check port status register for function reset */
1957 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
1960 /* don't report event on forced debug dump */
1961 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1962 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1967 /* fall through for not able to recover */
1968 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1969 "3152 Unrecoverable error\n");
1970 phba->link_state = LPFC_HBA_ERROR;
1972 case LPFC_SLI_INTF_IF_TYPE_1:
1976 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1977 "3123 Report dump event to upper layer\n");
1978 /* Send an internal error event to mgmt application */
1979 lpfc_board_errevt_to_mgmt(phba);
1981 event_data = FC_REG_DUMP_EVENT;
1982 shost = lpfc_shost_from_vport(vport);
1983 fc_host_post_vendor_event(shost, fc_get_event_number(),
1984 sizeof(event_data), (char *) &event_data,
1985 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1989 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1990 * @phba: pointer to lpfc HBA data structure.
1992 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1993 * routine from the API jump table function pointer from the lpfc_hba struct.
1997 * Any other value - error.
2000 lpfc_handle_eratt(struct lpfc_hba *phba)
2002 (*phba->lpfc_handle_eratt)(phba);
2006 * lpfc_handle_latt - The HBA link event handler
2007 * @phba: pointer to lpfc hba data structure.
2009 * This routine is invoked from the worker thread to handle a HBA host
2010 * attention link event. SLI3 only.
2013 lpfc_handle_latt(struct lpfc_hba *phba)
2015 struct lpfc_vport *vport = phba->pport;
2016 struct lpfc_sli *psli = &phba->sli;
2018 volatile uint32_t control;
2019 struct lpfc_dmabuf *mp;
2022 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2025 goto lpfc_handle_latt_err_exit;
2028 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2031 goto lpfc_handle_latt_free_pmb;
2034 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2037 goto lpfc_handle_latt_free_mp;
2040 /* Cleanup any outstanding ELS commands */
2041 lpfc_els_flush_all_cmd(phba);
2043 psli->slistat.link_event++;
2044 lpfc_read_topology(phba, pmb, mp);
2045 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2047 /* Block ELS IOCBs until we have processed this mbox command */
2048 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2049 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2050 if (rc == MBX_NOT_FINISHED) {
2052 goto lpfc_handle_latt_free_mbuf;
2055 /* Clear Link Attention in HA REG */
2056 spin_lock_irq(&phba->hbalock);
2057 writel(HA_LATT, phba->HAregaddr);
2058 readl(phba->HAregaddr); /* flush */
2059 spin_unlock_irq(&phba->hbalock);
2063 lpfc_handle_latt_free_mbuf:
2064 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2065 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2066 lpfc_handle_latt_free_mp:
2068 lpfc_handle_latt_free_pmb:
2069 mempool_free(pmb, phba->mbox_mem_pool);
2070 lpfc_handle_latt_err_exit:
2071 /* Enable Link attention interrupts */
2072 spin_lock_irq(&phba->hbalock);
2073 psli->sli_flag |= LPFC_PROCESS_LA;
2074 control = readl(phba->HCregaddr);
2075 control |= HC_LAINT_ENA;
2076 writel(control, phba->HCregaddr);
2077 readl(phba->HCregaddr); /* flush */
2079 /* Clear Link Attention in HA REG */
2080 writel(HA_LATT, phba->HAregaddr);
2081 readl(phba->HAregaddr); /* flush */
2082 spin_unlock_irq(&phba->hbalock);
2083 lpfc_linkdown(phba);
2084 phba->link_state = LPFC_HBA_ERROR;
2086 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2087 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2093 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2094 * @phba: pointer to lpfc hba data structure.
2095 * @vpd: pointer to the vital product data.
2096 * @len: length of the vital product data in bytes.
2098 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2099 * an array of characters. In this routine, the ModelName, ProgramType, and
2100 * ModelDesc, etc. fields of the phba data structure will be populated.
2103 * 0 - pointer to the VPD passed in is NULL
2107 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2109 uint8_t lenlo, lenhi;
2119 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2120 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2121 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2123 while (!finished && (index < (len - 4))) {
2124 switch (vpd[index]) {
2132 i = ((((unsigned short)lenhi) << 8) + lenlo);
2141 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2142 if (Length > len - index)
2143 Length = len - index;
2144 while (Length > 0) {
2145 /* Look for Serial Number */
2146 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2153 phba->SerialNumber[j++] = vpd[index++];
2157 phba->SerialNumber[j] = 0;
2160 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2161 phba->vpd_flag |= VPD_MODEL_DESC;
2168 phba->ModelDesc[j++] = vpd[index++];
2172 phba->ModelDesc[j] = 0;
2175 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2176 phba->vpd_flag |= VPD_MODEL_NAME;
2183 phba->ModelName[j++] = vpd[index++];
2187 phba->ModelName[j] = 0;
2190 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2191 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2198 phba->ProgramType[j++] = vpd[index++];
2202 phba->ProgramType[j] = 0;
2205 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2206 phba->vpd_flag |= VPD_PORT;
2213 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2214 (phba->sli4_hba.pport_name_sta ==
2215 LPFC_SLI4_PPNAME_GET)) {
2219 phba->Port[j++] = vpd[index++];
2223 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2224 (phba->sli4_hba.pport_name_sta ==
2225 LPFC_SLI4_PPNAME_NON))
2252 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2253 * @phba: pointer to lpfc hba data structure.
2254 * @mdp: pointer to the data structure to hold the derived model name.
2255 * @descp: pointer to the data structure to hold the derived description.
2257 * This routine retrieves HBA's description based on its registered PCI device
2258 * ID. The @descp passed into this function points to an array of 256 chars. It
2259 * shall be returned with the model name, maximum speed, and the host bus type.
2260 * The @mdp passed into this function points to an array of 80 chars. When the
2261 * function returns, the @mdp will be filled with the model name.
2264 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2267 uint16_t dev_id = phba->pcidev->device;
2270 int oneConnect = 0; /* default is not a oneConnect */
2275 } m = {"<Unknown>", "", ""};
2277 if (mdp && mdp[0] != '\0'
2278 && descp && descp[0] != '\0')
2281 if (phba->lmt & LMT_64Gb)
2283 else if (phba->lmt & LMT_32Gb)
2285 else if (phba->lmt & LMT_16Gb)
2287 else if (phba->lmt & LMT_10Gb)
2289 else if (phba->lmt & LMT_8Gb)
2291 else if (phba->lmt & LMT_4Gb)
2293 else if (phba->lmt & LMT_2Gb)
2295 else if (phba->lmt & LMT_1Gb)
2303 case PCI_DEVICE_ID_FIREFLY:
2304 m = (typeof(m)){"LP6000", "PCI",
2305 "Obsolete, Unsupported Fibre Channel Adapter"};
2307 case PCI_DEVICE_ID_SUPERFLY:
2308 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2309 m = (typeof(m)){"LP7000", "PCI", ""};
2311 m = (typeof(m)){"LP7000E", "PCI", ""};
2312 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2314 case PCI_DEVICE_ID_DRAGONFLY:
2315 m = (typeof(m)){"LP8000", "PCI",
2316 "Obsolete, Unsupported Fibre Channel Adapter"};
2318 case PCI_DEVICE_ID_CENTAUR:
2319 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2320 m = (typeof(m)){"LP9002", "PCI", ""};
2322 m = (typeof(m)){"LP9000", "PCI", ""};
2323 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2325 case PCI_DEVICE_ID_RFLY:
2326 m = (typeof(m)){"LP952", "PCI",
2327 "Obsolete, Unsupported Fibre Channel Adapter"};
2329 case PCI_DEVICE_ID_PEGASUS:
2330 m = (typeof(m)){"LP9802", "PCI-X",
2331 "Obsolete, Unsupported Fibre Channel Adapter"};
2333 case PCI_DEVICE_ID_THOR:
2334 m = (typeof(m)){"LP10000", "PCI-X",
2335 "Obsolete, Unsupported Fibre Channel Adapter"};
2337 case PCI_DEVICE_ID_VIPER:
2338 m = (typeof(m)){"LPX1000", "PCI-X",
2339 "Obsolete, Unsupported Fibre Channel Adapter"};
2341 case PCI_DEVICE_ID_PFLY:
2342 m = (typeof(m)){"LP982", "PCI-X",
2343 "Obsolete, Unsupported Fibre Channel Adapter"};
2345 case PCI_DEVICE_ID_TFLY:
2346 m = (typeof(m)){"LP1050", "PCI-X",
2347 "Obsolete, Unsupported Fibre Channel Adapter"};
2349 case PCI_DEVICE_ID_HELIOS:
2350 m = (typeof(m)){"LP11000", "PCI-X2",
2351 "Obsolete, Unsupported Fibre Channel Adapter"};
2353 case PCI_DEVICE_ID_HELIOS_SCSP:
2354 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2355 "Obsolete, Unsupported Fibre Channel Adapter"};
2357 case PCI_DEVICE_ID_HELIOS_DCSP:
2358 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2359 "Obsolete, Unsupported Fibre Channel Adapter"};
2361 case PCI_DEVICE_ID_NEPTUNE:
2362 m = (typeof(m)){"LPe1000", "PCIe",
2363 "Obsolete, Unsupported Fibre Channel Adapter"};
2365 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2366 m = (typeof(m)){"LPe1000-SP", "PCIe",
2367 "Obsolete, Unsupported Fibre Channel Adapter"};
2369 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2370 m = (typeof(m)){"LPe1002-SP", "PCIe",
2371 "Obsolete, Unsupported Fibre Channel Adapter"};
2373 case PCI_DEVICE_ID_BMID:
2374 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2376 case PCI_DEVICE_ID_BSMB:
2377 m = (typeof(m)){"LP111", "PCI-X2",
2378 "Obsolete, Unsupported Fibre Channel Adapter"};
2380 case PCI_DEVICE_ID_ZEPHYR:
2381 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2383 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2384 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2386 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2387 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2390 case PCI_DEVICE_ID_ZMID:
2391 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2393 case PCI_DEVICE_ID_ZSMB:
2394 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2396 case PCI_DEVICE_ID_LP101:
2397 m = (typeof(m)){"LP101", "PCI-X",
2398 "Obsolete, Unsupported Fibre Channel Adapter"};
2400 case PCI_DEVICE_ID_LP10000S:
2401 m = (typeof(m)){"LP10000-S", "PCI",
2402 "Obsolete, Unsupported Fibre Channel Adapter"};
2404 case PCI_DEVICE_ID_LP11000S:
2405 m = (typeof(m)){"LP11000-S", "PCI-X2",
2406 "Obsolete, Unsupported Fibre Channel Adapter"};
2408 case PCI_DEVICE_ID_LPE11000S:
2409 m = (typeof(m)){"LPe11000-S", "PCIe",
2410 "Obsolete, Unsupported Fibre Channel Adapter"};
2412 case PCI_DEVICE_ID_SAT:
2413 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2415 case PCI_DEVICE_ID_SAT_MID:
2416 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2418 case PCI_DEVICE_ID_SAT_SMB:
2419 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2421 case PCI_DEVICE_ID_SAT_DCSP:
2422 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2424 case PCI_DEVICE_ID_SAT_SCSP:
2425 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2427 case PCI_DEVICE_ID_SAT_S:
2428 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2430 case PCI_DEVICE_ID_HORNET:
2431 m = (typeof(m)){"LP21000", "PCIe",
2432 "Obsolete, Unsupported FCoE Adapter"};
2435 case PCI_DEVICE_ID_PROTEUS_VF:
2436 m = (typeof(m)){"LPev12000", "PCIe IOV",
2437 "Obsolete, Unsupported Fibre Channel Adapter"};
2439 case PCI_DEVICE_ID_PROTEUS_PF:
2440 m = (typeof(m)){"LPev12000", "PCIe IOV",
2441 "Obsolete, Unsupported Fibre Channel Adapter"};
2443 case PCI_DEVICE_ID_PROTEUS_S:
2444 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2445 "Obsolete, Unsupported Fibre Channel Adapter"};
2447 case PCI_DEVICE_ID_TIGERSHARK:
2449 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2451 case PCI_DEVICE_ID_TOMCAT:
2453 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2455 case PCI_DEVICE_ID_FALCON:
2456 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2457 "EmulexSecure Fibre"};
2459 case PCI_DEVICE_ID_BALIUS:
2460 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2461 "Obsolete, Unsupported Fibre Channel Adapter"};
2463 case PCI_DEVICE_ID_LANCER_FC:
2464 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2466 case PCI_DEVICE_ID_LANCER_FC_VF:
2467 m = (typeof(m)){"LPe16000", "PCIe",
2468 "Obsolete, Unsupported Fibre Channel Adapter"};
2470 case PCI_DEVICE_ID_LANCER_FCOE:
2472 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2474 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2476 m = (typeof(m)){"OCe15100", "PCIe",
2477 "Obsolete, Unsupported FCoE"};
2479 case PCI_DEVICE_ID_LANCER_G6_FC:
2480 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2482 case PCI_DEVICE_ID_LANCER_G7_FC:
2483 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2485 case PCI_DEVICE_ID_SKYHAWK:
2486 case PCI_DEVICE_ID_SKYHAWK_VF:
2488 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2491 m = (typeof(m)){"Unknown", "", ""};
2495 if (mdp && mdp[0] == '\0')
2496 snprintf(mdp, 79,"%s", m.name);
2498 * oneConnect hba requires special processing, they are all initiators
2499 * and we put the port number on the end
2501 if (descp && descp[0] == '\0') {
2503 snprintf(descp, 255,
2504 "Emulex OneConnect %s, %s Initiator %s",
2507 else if (max_speed == 0)
2508 snprintf(descp, 255,
2510 m.name, m.bus, m.function);
2512 snprintf(descp, 255,
2513 "Emulex %s %d%s %s %s",
2514 m.name, max_speed, (GE) ? "GE" : "Gb",
2520 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2521 * @phba: pointer to lpfc hba data structure.
2522 * @pring: pointer to a IOCB ring.
2523 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2525 * This routine posts a given number of IOCBs with the associated DMA buffer
2526 * descriptors specified by the cnt argument to the given IOCB ring.
2529 * The number of IOCBs NOT able to be posted to the IOCB ring.
2532 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2535 struct lpfc_iocbq *iocb;
2536 struct lpfc_dmabuf *mp1, *mp2;
2538 cnt += pring->missbufcnt;
2540 /* While there are buffers to post */
2542 /* Allocate buffer for command iocb */
2543 iocb = lpfc_sli_get_iocbq(phba);
2545 pring->missbufcnt = cnt;
2550 /* 2 buffers can be posted per command */
2551 /* Allocate buffer to post */
2552 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2554 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2555 if (!mp1 || !mp1->virt) {
2557 lpfc_sli_release_iocbq(phba, iocb);
2558 pring->missbufcnt = cnt;
2562 INIT_LIST_HEAD(&mp1->list);
2563 /* Allocate buffer to post */
2565 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2567 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2569 if (!mp2 || !mp2->virt) {
2571 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2573 lpfc_sli_release_iocbq(phba, iocb);
2574 pring->missbufcnt = cnt;
2578 INIT_LIST_HEAD(&mp2->list);
2583 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2584 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2585 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2586 icmd->ulpBdeCount = 1;
2589 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2590 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2591 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2593 icmd->ulpBdeCount = 2;
2596 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2599 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2601 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2605 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2609 lpfc_sli_release_iocbq(phba, iocb);
2610 pring->missbufcnt = cnt;
2613 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2615 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2617 pring->missbufcnt = 0;
2622 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2623 * @phba: pointer to lpfc hba data structure.
2625 * This routine posts initial receive IOCB buffers to the ELS ring. The
2626 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2627 * set to 64 IOCBs. SLI3 only.
2630 * 0 - success (currently always success)
2633 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2635 struct lpfc_sli *psli = &phba->sli;
2637 /* Ring 0, ELS / CT buffers */
2638 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2639 /* Ring 2 - FCP no buffers needed */
2644 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2647 * lpfc_sha_init - Set up initial array of hash table entries
2648 * @HashResultPointer: pointer to an array as hash table.
2650 * This routine sets up the initial values to the array of hash table entries
2654 lpfc_sha_init(uint32_t * HashResultPointer)
2656 HashResultPointer[0] = 0x67452301;
2657 HashResultPointer[1] = 0xEFCDAB89;
2658 HashResultPointer[2] = 0x98BADCFE;
2659 HashResultPointer[3] = 0x10325476;
2660 HashResultPointer[4] = 0xC3D2E1F0;
2664 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2665 * @HashResultPointer: pointer to an initial/result hash table.
2666 * @HashWorkingPointer: pointer to an working hash table.
2668 * This routine iterates an initial hash table pointed by @HashResultPointer
2669 * with the values from the working hash table pointeed by @HashWorkingPointer.
2670 * The results are putting back to the initial hash table, returned through
2671 * the @HashResultPointer as the result hash table.
2674 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2678 uint32_t A, B, C, D, E;
2681 HashWorkingPointer[t] =
2683 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2685 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2686 } while (++t <= 79);
2688 A = HashResultPointer[0];
2689 B = HashResultPointer[1];
2690 C = HashResultPointer[2];
2691 D = HashResultPointer[3];
2692 E = HashResultPointer[4];
2696 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2697 } else if (t < 40) {
2698 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2699 } else if (t < 60) {
2700 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2702 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2704 TEMP += S(5, A) + E + HashWorkingPointer[t];
2710 } while (++t <= 79);
2712 HashResultPointer[0] += A;
2713 HashResultPointer[1] += B;
2714 HashResultPointer[2] += C;
2715 HashResultPointer[3] += D;
2716 HashResultPointer[4] += E;
2721 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2722 * @RandomChallenge: pointer to the entry of host challenge random number array.
2723 * @HashWorking: pointer to the entry of the working hash array.
2725 * This routine calculates the working hash array referred by @HashWorking
2726 * from the challenge random numbers associated with the host, referred by
2727 * @RandomChallenge. The result is put into the entry of the working hash
2728 * array and returned by reference through @HashWorking.
2731 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2733 *HashWorking = (*RandomChallenge ^ *HashWorking);
2737 * lpfc_hba_init - Perform special handling for LC HBA initialization
2738 * @phba: pointer to lpfc hba data structure.
2739 * @hbainit: pointer to an array of unsigned 32-bit integers.
2741 * This routine performs the special handling for LC HBA initialization.
2744 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2747 uint32_t *HashWorking;
2748 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2750 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2754 HashWorking[0] = HashWorking[78] = *pwwnn++;
2755 HashWorking[1] = HashWorking[79] = *pwwnn;
2757 for (t = 0; t < 7; t++)
2758 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2760 lpfc_sha_init(hbainit);
2761 lpfc_sha_iterate(hbainit, HashWorking);
2766 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2767 * @vport: pointer to a virtual N_Port data structure.
2769 * This routine performs the necessary cleanups before deleting the @vport.
2770 * It invokes the discovery state machine to perform necessary state
2771 * transitions and to release the ndlps associated with the @vport. Note,
2772 * the physical port is treated as @vport 0.
2775 lpfc_cleanup(struct lpfc_vport *vport)
2777 struct lpfc_hba *phba = vport->phba;
2778 struct lpfc_nodelist *ndlp, *next_ndlp;
2781 if (phba->link_state > LPFC_LINK_DOWN)
2782 lpfc_port_link_failure(vport);
2784 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2785 if (!NLP_CHK_NODE_ACT(ndlp)) {
2786 ndlp = lpfc_enable_node(vport, ndlp,
2787 NLP_STE_UNUSED_NODE);
2790 spin_lock_irq(&phba->ndlp_lock);
2791 NLP_SET_FREE_REQ(ndlp);
2792 spin_unlock_irq(&phba->ndlp_lock);
2793 /* Trigger the release of the ndlp memory */
2797 spin_lock_irq(&phba->ndlp_lock);
2798 if (NLP_CHK_FREE_REQ(ndlp)) {
2799 /* The ndlp should not be in memory free mode already */
2800 spin_unlock_irq(&phba->ndlp_lock);
2803 /* Indicate request for freeing ndlp memory */
2804 NLP_SET_FREE_REQ(ndlp);
2805 spin_unlock_irq(&phba->ndlp_lock);
2807 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2808 ndlp->nlp_DID == Fabric_DID) {
2809 /* Just free up ndlp with Fabric_DID for vports */
2814 /* take care of nodes in unused state before the state
2815 * machine taking action.
2817 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2822 if (ndlp->nlp_type & NLP_FABRIC)
2823 lpfc_disc_state_machine(vport, ndlp, NULL,
2824 NLP_EVT_DEVICE_RECOVERY);
2826 lpfc_disc_state_machine(vport, ndlp, NULL,
2830 /* At this point, ALL ndlp's should be gone
2831 * because of the previous NLP_EVT_DEVICE_RM.
2832 * Lets wait for this to happen, if needed.
2834 while (!list_empty(&vport->fc_nodes)) {
2836 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2837 "0233 Nodelist not empty\n");
2838 list_for_each_entry_safe(ndlp, next_ndlp,
2839 &vport->fc_nodes, nlp_listp) {
2840 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2842 "0282 did:x%x ndlp:x%px "
2843 "usgmap:x%x refcnt:%d\n",
2844 ndlp->nlp_DID, (void *)ndlp,
2846 kref_read(&ndlp->kref));
2851 /* Wait for any activity on ndlps to settle */
2854 lpfc_cleanup_vports_rrqs(vport, NULL);
2858 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2859 * @vport: pointer to a virtual N_Port data structure.
2861 * This routine stops all the timers associated with a @vport. This function
2862 * is invoked before disabling or deleting a @vport. Note that the physical
2863 * port is treated as @vport 0.
2866 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2868 del_timer_sync(&vport->els_tmofunc);
2869 del_timer_sync(&vport->delayed_disc_tmo);
2870 lpfc_can_disctmo(vport);
2875 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2876 * @phba: pointer to lpfc hba data structure.
2878 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2879 * caller of this routine should already hold the host lock.
2882 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2884 /* Clear pending FCF rediscovery wait flag */
2885 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2887 /* Now, try to stop the timer */
2888 del_timer(&phba->fcf.redisc_wait);
2892 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2893 * @phba: pointer to lpfc hba data structure.
2895 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2896 * checks whether the FCF rediscovery wait timer is pending with the host
2897 * lock held before proceeding with disabling the timer and clearing the
2898 * wait timer pendig flag.
2901 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2903 spin_lock_irq(&phba->hbalock);
2904 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2905 /* FCF rediscovery timer already fired or stopped */
2906 spin_unlock_irq(&phba->hbalock);
2909 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2910 /* Clear failover in progress flags */
2911 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2912 spin_unlock_irq(&phba->hbalock);
2916 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2917 * @phba: pointer to lpfc hba data structure.
2919 * This routine stops all the timers associated with a HBA. This function is
2920 * invoked before either putting a HBA offline or unloading the driver.
2923 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2926 lpfc_stop_vport_timers(phba->pport);
2927 cancel_delayed_work_sync(&phba->eq_delay_work);
2928 del_timer_sync(&phba->sli.mbox_tmo);
2929 del_timer_sync(&phba->fabric_block_timer);
2930 del_timer_sync(&phba->eratt_poll);
2931 del_timer_sync(&phba->hb_tmofunc);
2932 if (phba->sli_rev == LPFC_SLI_REV4) {
2933 del_timer_sync(&phba->rrq_tmr);
2934 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2936 phba->hb_outstanding = 0;
2938 switch (phba->pci_dev_grp) {
2939 case LPFC_PCI_DEV_LP:
2940 /* Stop any LightPulse device specific driver timers */
2941 del_timer_sync(&phba->fcp_poll_timer);
2943 case LPFC_PCI_DEV_OC:
2944 /* Stop any OneConnect device specific driver timers */
2945 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2948 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2949 "0297 Invalid device group (x%x)\n",
2957 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2958 * @phba: pointer to lpfc hba data structure.
2960 * This routine marks a HBA's management interface as blocked. Once the HBA's
2961 * management interface is marked as blocked, all the user space access to
2962 * the HBA, whether they are from sysfs interface or libdfc interface will
2963 * all be blocked. The HBA is set to block the management interface when the
2964 * driver prepares the HBA interface for online or offline.
2967 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
2969 unsigned long iflag;
2970 uint8_t actcmd = MBX_HEARTBEAT;
2971 unsigned long timeout;
2973 spin_lock_irqsave(&phba->hbalock, iflag);
2974 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2975 spin_unlock_irqrestore(&phba->hbalock, iflag);
2976 if (mbx_action == LPFC_MBX_NO_WAIT)
2978 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2979 spin_lock_irqsave(&phba->hbalock, iflag);
2980 if (phba->sli.mbox_active) {
2981 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2982 /* Determine how long we might wait for the active mailbox
2983 * command to be gracefully completed by firmware.
2985 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2986 phba->sli.mbox_active) * 1000) + jiffies;
2988 spin_unlock_irqrestore(&phba->hbalock, iflag);
2990 /* Wait for the outstnading mailbox command to complete */
2991 while (phba->sli.mbox_active) {
2992 /* Check active mailbox complete status every 2ms */
2994 if (time_after(jiffies, timeout)) {
2995 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2996 "2813 Mgmt IO is Blocked %x "
2997 "- mbox cmd %x still active\n",
2998 phba->sli.sli_flag, actcmd);
3005 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3006 * @phba: pointer to lpfc hba data structure.
3008 * Allocate RPIs for all active remote nodes. This is needed whenever
3009 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3010 * is to fixup the temporary rpi assignments.
3013 lpfc_sli4_node_prep(struct lpfc_hba *phba)
3015 struct lpfc_nodelist *ndlp, *next_ndlp;
3016 struct lpfc_vport **vports;
3018 unsigned long flags;
3020 if (phba->sli_rev != LPFC_SLI_REV4)
3023 vports = lpfc_create_vport_work_array(phba);
3027 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3028 if (vports[i]->load_flag & FC_UNLOADING)
3031 list_for_each_entry_safe(ndlp, next_ndlp,
3032 &vports[i]->fc_nodes,
3034 if (!NLP_CHK_NODE_ACT(ndlp))
3036 rpi = lpfc_sli4_alloc_rpi(phba);
3037 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3038 spin_lock_irqsave(&phba->ndlp_lock, flags);
3039 NLP_CLR_NODE_ACT(ndlp);
3040 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3043 ndlp->nlp_rpi = rpi;
3044 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3045 LOG_NODE | LOG_DISCOVERY,
3046 "0009 Assign RPI x%x to ndlp x%px "
3047 "DID:x%06x flg:x%x map:x%x\n",
3048 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3049 ndlp->nlp_flag, ndlp->nlp_usg_map);
3052 lpfc_destroy_vport_work_array(phba, vports);
3056 * lpfc_create_expedite_pool - create expedite pool
3057 * @phba: pointer to lpfc hba data structure.
3059 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3060 * to expedite pool. Mark them as expedite.
3062 static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3064 struct lpfc_sli4_hdw_queue *qp;
3065 struct lpfc_io_buf *lpfc_ncmd;
3066 struct lpfc_io_buf *lpfc_ncmd_next;
3067 struct lpfc_epd_pool *epd_pool;
3068 unsigned long iflag;
3070 epd_pool = &phba->epd_pool;
3071 qp = &phba->sli4_hba.hdwq[0];
3073 spin_lock_init(&epd_pool->lock);
3074 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3075 spin_lock(&epd_pool->lock);
3076 INIT_LIST_HEAD(&epd_pool->list);
3077 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3078 &qp->lpfc_io_buf_list_put, list) {
3079 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3080 lpfc_ncmd->expedite = true;
3083 if (epd_pool->count >= XRI_BATCH)
3086 spin_unlock(&epd_pool->lock);
3087 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3091 * lpfc_destroy_expedite_pool - destroy expedite pool
3092 * @phba: pointer to lpfc hba data structure.
3094 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3095 * of HWQ 0. Clear the mark.
3097 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3099 struct lpfc_sli4_hdw_queue *qp;
3100 struct lpfc_io_buf *lpfc_ncmd;
3101 struct lpfc_io_buf *lpfc_ncmd_next;
3102 struct lpfc_epd_pool *epd_pool;
3103 unsigned long iflag;
3105 epd_pool = &phba->epd_pool;
3106 qp = &phba->sli4_hba.hdwq[0];
3108 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3109 spin_lock(&epd_pool->lock);
3110 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3111 &epd_pool->list, list) {
3112 list_move_tail(&lpfc_ncmd->list,
3113 &qp->lpfc_io_buf_list_put);
3114 lpfc_ncmd->flags = false;
3118 spin_unlock(&epd_pool->lock);
3119 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3123 * lpfc_create_multixri_pools - create multi-XRI pools
3124 * @phba: pointer to lpfc hba data structure.
3126 * This routine initialize public, private per HWQ. Then, move XRIs from
3127 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3130 void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3135 struct lpfc_io_buf *lpfc_ncmd;
3136 struct lpfc_io_buf *lpfc_ncmd_next;
3137 unsigned long iflag;
3138 struct lpfc_sli4_hdw_queue *qp;
3139 struct lpfc_multixri_pool *multixri_pool;
3140 struct lpfc_pbl_pool *pbl_pool;
3141 struct lpfc_pvt_pool *pvt_pool;
3143 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3144 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3145 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3146 phba->sli4_hba.io_xri_cnt);
3148 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3149 lpfc_create_expedite_pool(phba);
3151 hwq_count = phba->cfg_hdw_queue;
3152 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3154 for (i = 0; i < hwq_count; i++) {
3155 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3157 if (!multixri_pool) {
3158 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3159 "1238 Failed to allocate memory for "
3162 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3163 lpfc_destroy_expedite_pool(phba);
3167 qp = &phba->sli4_hba.hdwq[j];
3168 kfree(qp->p_multixri_pool);
3171 phba->cfg_xri_rebalancing = 0;
3175 qp = &phba->sli4_hba.hdwq[i];
3176 qp->p_multixri_pool = multixri_pool;
3178 multixri_pool->xri_limit = count_per_hwq;
3179 multixri_pool->rrb_next_hwqid = i;
3181 /* Deal with public free xri pool */
3182 pbl_pool = &multixri_pool->pbl_pool;
3183 spin_lock_init(&pbl_pool->lock);
3184 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3185 spin_lock(&pbl_pool->lock);
3186 INIT_LIST_HEAD(&pbl_pool->list);
3187 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3188 &qp->lpfc_io_buf_list_put, list) {
3189 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3193 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3194 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3195 pbl_pool->count, i);
3196 spin_unlock(&pbl_pool->lock);
3197 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3199 /* Deal with private free xri pool */
3200 pvt_pool = &multixri_pool->pvt_pool;
3201 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3202 pvt_pool->low_watermark = XRI_BATCH;
3203 spin_lock_init(&pvt_pool->lock);
3204 spin_lock_irqsave(&pvt_pool->lock, iflag);
3205 INIT_LIST_HEAD(&pvt_pool->list);
3206 pvt_pool->count = 0;
3207 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3212 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3213 * @phba: pointer to lpfc hba data structure.
3215 * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3217 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3221 struct lpfc_io_buf *lpfc_ncmd;
3222 struct lpfc_io_buf *lpfc_ncmd_next;
3223 unsigned long iflag;
3224 struct lpfc_sli4_hdw_queue *qp;
3225 struct lpfc_multixri_pool *multixri_pool;
3226 struct lpfc_pbl_pool *pbl_pool;
3227 struct lpfc_pvt_pool *pvt_pool;
3229 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3230 lpfc_destroy_expedite_pool(phba);
3232 if (!(phba->pport->load_flag & FC_UNLOADING))
3233 lpfc_sli_flush_io_rings(phba);
3235 hwq_count = phba->cfg_hdw_queue;
3237 for (i = 0; i < hwq_count; i++) {
3238 qp = &phba->sli4_hba.hdwq[i];
3239 multixri_pool = qp->p_multixri_pool;
3243 qp->p_multixri_pool = NULL;
3245 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3247 /* Deal with public free xri pool */
3248 pbl_pool = &multixri_pool->pbl_pool;
3249 spin_lock(&pbl_pool->lock);
3251 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3252 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3253 pbl_pool->count, i);
3255 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3256 &pbl_pool->list, list) {
3257 list_move_tail(&lpfc_ncmd->list,
3258 &qp->lpfc_io_buf_list_put);
3263 INIT_LIST_HEAD(&pbl_pool->list);
3264 pbl_pool->count = 0;
3266 spin_unlock(&pbl_pool->lock);
3268 /* Deal with private free xri pool */
3269 pvt_pool = &multixri_pool->pvt_pool;
3270 spin_lock(&pvt_pool->lock);
3272 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3273 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3274 pvt_pool->count, i);
3276 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3277 &pvt_pool->list, list) {
3278 list_move_tail(&lpfc_ncmd->list,
3279 &qp->lpfc_io_buf_list_put);
3284 INIT_LIST_HEAD(&pvt_pool->list);
3285 pvt_pool->count = 0;
3287 spin_unlock(&pvt_pool->lock);
3288 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3290 kfree(multixri_pool);
3295 * lpfc_online - Initialize and bring a HBA online
3296 * @phba: pointer to lpfc hba data structure.
3298 * This routine initializes the HBA and brings a HBA online. During this
3299 * process, the management interface is blocked to prevent user space access
3300 * to the HBA interfering with the driver initialization.
3307 lpfc_online(struct lpfc_hba *phba)
3309 struct lpfc_vport *vport;
3310 struct lpfc_vport **vports;
3312 bool vpis_cleared = false;
3316 vport = phba->pport;
3318 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3321 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3322 "0458 Bring Adapter online\n");
3324 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3326 if (phba->sli_rev == LPFC_SLI_REV4) {
3327 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3328 lpfc_unblock_mgmt_io(phba);
3331 spin_lock_irq(&phba->hbalock);
3332 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3333 vpis_cleared = true;
3334 spin_unlock_irq(&phba->hbalock);
3336 /* Reestablish the local initiator port.
3337 * The offline process destroyed the previous lport.
3339 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3340 !phba->nvmet_support) {
3341 error = lpfc_nvme_create_localport(phba->pport);
3343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3344 "6132 NVME restore reg failed "
3345 "on nvmei error x%x\n", error);
3348 lpfc_sli_queue_init(phba);
3349 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3350 lpfc_unblock_mgmt_io(phba);
3355 vports = lpfc_create_vport_work_array(phba);
3356 if (vports != NULL) {
3357 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3358 struct Scsi_Host *shost;
3359 shost = lpfc_shost_from_vport(vports[i]);
3360 spin_lock_irq(shost->host_lock);
3361 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3362 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3363 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3364 if (phba->sli_rev == LPFC_SLI_REV4) {
3365 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3366 if ((vpis_cleared) &&
3367 (vports[i]->port_type !=
3368 LPFC_PHYSICAL_PORT))
3371 spin_unlock_irq(shost->host_lock);
3374 lpfc_destroy_vport_work_array(phba, vports);
3376 if (phba->cfg_xri_rebalancing)
3377 lpfc_create_multixri_pools(phba);
3379 lpfc_cpuhp_add(phba);
3381 lpfc_unblock_mgmt_io(phba);
3386 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3387 * @phba: pointer to lpfc hba data structure.
3389 * This routine marks a HBA's management interface as not blocked. Once the
3390 * HBA's management interface is marked as not blocked, all the user space
3391 * access to the HBA, whether they are from sysfs interface or libdfc
3392 * interface will be allowed. The HBA is set to block the management interface
3393 * when the driver prepares the HBA interface for online or offline and then
3394 * set to unblock the management interface afterwards.
3397 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3399 unsigned long iflag;
3401 spin_lock_irqsave(&phba->hbalock, iflag);
3402 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3403 spin_unlock_irqrestore(&phba->hbalock, iflag);
3407 * lpfc_offline_prep - Prepare a HBA to be brought offline
3408 * @phba: pointer to lpfc hba data structure.
3410 * This routine is invoked to prepare a HBA to be brought offline. It performs
3411 * unregistration login to all the nodes on all vports and flushes the mailbox
3412 * queue to make it ready to be brought offline.
3415 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3417 struct lpfc_vport *vport = phba->pport;
3418 struct lpfc_nodelist *ndlp, *next_ndlp;
3419 struct lpfc_vport **vports;
3420 struct Scsi_Host *shost;
3423 if (vport->fc_flag & FC_OFFLINE_MODE)
3426 lpfc_block_mgmt_io(phba, mbx_action);
3428 lpfc_linkdown(phba);
3430 /* Issue an unreg_login to all nodes on all vports */
3431 vports = lpfc_create_vport_work_array(phba);
3432 if (vports != NULL) {
3433 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3434 if (vports[i]->load_flag & FC_UNLOADING)
3436 shost = lpfc_shost_from_vport(vports[i]);
3437 spin_lock_irq(shost->host_lock);
3438 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3439 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3440 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3441 spin_unlock_irq(shost->host_lock);
3443 shost = lpfc_shost_from_vport(vports[i]);
3444 list_for_each_entry_safe(ndlp, next_ndlp,
3445 &vports[i]->fc_nodes,
3447 if ((!NLP_CHK_NODE_ACT(ndlp)) ||
3448 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3449 /* Driver must assume RPI is invalid for
3450 * any unused or inactive node.
3452 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3456 if (ndlp->nlp_type & NLP_FABRIC) {
3457 lpfc_disc_state_machine(vports[i], ndlp,
3458 NULL, NLP_EVT_DEVICE_RECOVERY);
3459 lpfc_disc_state_machine(vports[i], ndlp,
3460 NULL, NLP_EVT_DEVICE_RM);
3462 spin_lock_irq(shost->host_lock);
3463 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3464 spin_unlock_irq(shost->host_lock);
3466 * Whenever an SLI4 port goes offline, free the
3467 * RPI. Get a new RPI when the adapter port
3468 * comes back online.
3470 if (phba->sli_rev == LPFC_SLI_REV4) {
3471 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3472 LOG_NODE | LOG_DISCOVERY,
3473 "0011 Free RPI x%x on "
3474 "ndlp:x%px did x%x "
3476 ndlp->nlp_rpi, ndlp,
3479 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3480 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3482 lpfc_unreg_rpi(vports[i], ndlp);
3486 lpfc_destroy_vport_work_array(phba, vports);
3488 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3491 flush_workqueue(phba->wq);
3495 * lpfc_offline - Bring a HBA offline
3496 * @phba: pointer to lpfc hba data structure.
3498 * This routine actually brings a HBA offline. It stops all the timers
3499 * associated with the HBA, brings down the SLI layer, and eventually
3500 * marks the HBA as in offline state for the upper layer protocol.
3503 lpfc_offline(struct lpfc_hba *phba)
3505 struct Scsi_Host *shost;
3506 struct lpfc_vport **vports;
3509 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3512 /* stop port and all timers associated with this hba */
3513 lpfc_stop_port(phba);
3515 /* Tear down the local and target port registrations. The
3516 * nvme transports need to cleanup.
3518 lpfc_nvmet_destroy_targetport(phba);
3519 lpfc_nvme_destroy_localport(phba->pport);
3521 vports = lpfc_create_vport_work_array(phba);
3523 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3524 lpfc_stop_vport_timers(vports[i]);
3525 lpfc_destroy_vport_work_array(phba, vports);
3526 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3527 "0460 Bring Adapter offline\n");
3528 /* Bring down the SLI Layer and cleanup. The HBA is offline
3530 lpfc_sli_hba_down(phba);
3531 spin_lock_irq(&phba->hbalock);
3533 spin_unlock_irq(&phba->hbalock);
3534 vports = lpfc_create_vport_work_array(phba);
3536 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3537 shost = lpfc_shost_from_vport(vports[i]);
3538 spin_lock_irq(shost->host_lock);
3539 vports[i]->work_port_events = 0;
3540 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3541 spin_unlock_irq(shost->host_lock);
3543 lpfc_destroy_vport_work_array(phba, vports);
3544 __lpfc_cpuhp_remove(phba);
3546 if (phba->cfg_xri_rebalancing)
3547 lpfc_destroy_multixri_pools(phba);
3551 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3552 * @phba: pointer to lpfc hba data structure.
3554 * This routine is to free all the SCSI buffers and IOCBs from the driver
3555 * list back to kernel. It is called from lpfc_pci_remove_one to free
3556 * the internal resources before the device is removed from the system.
3559 lpfc_scsi_free(struct lpfc_hba *phba)
3561 struct lpfc_io_buf *sb, *sb_next;
3563 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3566 spin_lock_irq(&phba->hbalock);
3568 /* Release all the lpfc_scsi_bufs maintained by this host. */
3570 spin_lock(&phba->scsi_buf_list_put_lock);
3571 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3573 list_del(&sb->list);
3574 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3577 phba->total_scsi_bufs--;
3579 spin_unlock(&phba->scsi_buf_list_put_lock);
3581 spin_lock(&phba->scsi_buf_list_get_lock);
3582 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3584 list_del(&sb->list);
3585 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3588 phba->total_scsi_bufs--;
3590 spin_unlock(&phba->scsi_buf_list_get_lock);
3591 spin_unlock_irq(&phba->hbalock);
3595 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
3596 * @phba: pointer to lpfc hba data structure.
3598 * This routine is to free all the IO buffers and IOCBs from the driver
3599 * list back to kernel. It is called from lpfc_pci_remove_one to free
3600 * the internal resources before the device is removed from the system.
3603 lpfc_io_free(struct lpfc_hba *phba)
3605 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3606 struct lpfc_sli4_hdw_queue *qp;
3609 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3610 qp = &phba->sli4_hba.hdwq[idx];
3611 /* Release all the lpfc_nvme_bufs maintained by this host. */
3612 spin_lock(&qp->io_buf_list_put_lock);
3613 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3614 &qp->lpfc_io_buf_list_put,
3616 list_del(&lpfc_ncmd->list);
3618 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3619 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3620 if (phba->cfg_xpsgl && !phba->nvmet_support)
3621 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3622 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3624 qp->total_io_bufs--;
3626 spin_unlock(&qp->io_buf_list_put_lock);
3628 spin_lock(&qp->io_buf_list_get_lock);
3629 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3630 &qp->lpfc_io_buf_list_get,
3632 list_del(&lpfc_ncmd->list);
3634 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3635 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3636 if (phba->cfg_xpsgl && !phba->nvmet_support)
3637 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3638 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3640 qp->total_io_bufs--;
3642 spin_unlock(&qp->io_buf_list_get_lock);
3647 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
3648 * @phba: pointer to lpfc hba data structure.
3650 * This routine first calculates the sizes of the current els and allocated
3651 * scsi sgl lists, and then goes through all sgls to updates the physical
3652 * XRIs assigned due to port function reset. During port initialization, the
3653 * current els and allocated scsi sgl lists are 0s.
3656 * 0 - successful (for now, it always returns 0)
3659 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3661 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3662 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3663 LIST_HEAD(els_sgl_list);
3667 * update on pci function's els xri-sgl list
3669 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3671 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3672 /* els xri-sgl expanded */
3673 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3674 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3675 "3157 ELS xri-sgl count increased from "
3676 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3678 /* allocate the additional els sgls */
3679 for (i = 0; i < xri_cnt; i++) {
3680 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3682 if (sglq_entry == NULL) {
3683 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3684 "2562 Failure to allocate an "
3685 "ELS sgl entry:%d\n", i);
3689 sglq_entry->buff_type = GEN_BUFF_TYPE;
3690 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3692 if (sglq_entry->virt == NULL) {
3694 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3695 "2563 Failure to allocate an "
3696 "ELS mbuf:%d\n", i);
3700 sglq_entry->sgl = sglq_entry->virt;
3701 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3702 sglq_entry->state = SGL_FREED;
3703 list_add_tail(&sglq_entry->list, &els_sgl_list);
3705 spin_lock_irq(&phba->hbalock);
3706 spin_lock(&phba->sli4_hba.sgl_list_lock);
3707 list_splice_init(&els_sgl_list,
3708 &phba->sli4_hba.lpfc_els_sgl_list);
3709 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3710 spin_unlock_irq(&phba->hbalock);
3711 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3712 /* els xri-sgl shrinked */
3713 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3714 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3715 "3158 ELS xri-sgl count decreased from "
3716 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3718 spin_lock_irq(&phba->hbalock);
3719 spin_lock(&phba->sli4_hba.sgl_list_lock);
3720 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3722 /* release extra els sgls from list */
3723 for (i = 0; i < xri_cnt; i++) {
3724 list_remove_head(&els_sgl_list,
3725 sglq_entry, struct lpfc_sglq, list);
3727 __lpfc_mbuf_free(phba, sglq_entry->virt,
3732 list_splice_init(&els_sgl_list,
3733 &phba->sli4_hba.lpfc_els_sgl_list);
3734 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3735 spin_unlock_irq(&phba->hbalock);
3737 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3738 "3163 ELS xri-sgl count unchanged: %d\n",
3740 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3742 /* update xris to els sgls on the list */
3744 sglq_entry_next = NULL;
3745 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3746 &phba->sli4_hba.lpfc_els_sgl_list, list) {
3747 lxri = lpfc_sli4_next_xritag(phba);
3748 if (lxri == NO_XRI) {
3749 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3750 "2400 Failed to allocate xri for "
3755 sglq_entry->sli4_lxritag = lxri;
3756 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3761 lpfc_free_els_sgl_list(phba);
3766 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
3767 * @phba: pointer to lpfc hba data structure.
3769 * This routine first calculates the sizes of the current els and allocated
3770 * scsi sgl lists, and then goes through all sgls to updates the physical
3771 * XRIs assigned due to port function reset. During port initialization, the
3772 * current els and allocated scsi sgl lists are 0s.
3775 * 0 - successful (for now, it always returns 0)
3778 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3780 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3781 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3782 uint16_t nvmet_xri_cnt;
3783 LIST_HEAD(nvmet_sgl_list);
3787 * update on pci function's nvmet xri-sgl list
3789 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3791 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
3792 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3793 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3794 /* els xri-sgl expanded */
3795 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3796 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3797 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3798 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3799 /* allocate the additional nvmet sgls */
3800 for (i = 0; i < xri_cnt; i++) {
3801 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3803 if (sglq_entry == NULL) {
3804 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3805 "6303 Failure to allocate an "
3806 "NVMET sgl entry:%d\n", i);
3810 sglq_entry->buff_type = NVMET_BUFF_TYPE;
3811 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3813 if (sglq_entry->virt == NULL) {
3815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3816 "6304 Failure to allocate an "
3817 "NVMET buf:%d\n", i);
3821 sglq_entry->sgl = sglq_entry->virt;
3822 memset(sglq_entry->sgl, 0,
3823 phba->cfg_sg_dma_buf_size);
3824 sglq_entry->state = SGL_FREED;
3825 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3827 spin_lock_irq(&phba->hbalock);
3828 spin_lock(&phba->sli4_hba.sgl_list_lock);
3829 list_splice_init(&nvmet_sgl_list,
3830 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3831 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3832 spin_unlock_irq(&phba->hbalock);
3833 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3834 /* nvmet xri-sgl shrunk */
3835 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3836 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3837 "6305 NVMET xri-sgl count decreased from "
3838 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3840 spin_lock_irq(&phba->hbalock);
3841 spin_lock(&phba->sli4_hba.sgl_list_lock);
3842 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3844 /* release extra nvmet sgls from list */
3845 for (i = 0; i < xri_cnt; i++) {
3846 list_remove_head(&nvmet_sgl_list,
3847 sglq_entry, struct lpfc_sglq, list);
3849 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3854 list_splice_init(&nvmet_sgl_list,
3855 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3856 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3857 spin_unlock_irq(&phba->hbalock);
3859 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3860 "6306 NVMET xri-sgl count unchanged: %d\n",
3862 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3864 /* update xris to nvmet sgls on the list */
3866 sglq_entry_next = NULL;
3867 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3868 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3869 lxri = lpfc_sli4_next_xritag(phba);
3870 if (lxri == NO_XRI) {
3871 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3872 "6307 Failed to allocate xri for "
3877 sglq_entry->sli4_lxritag = lxri;
3878 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3883 lpfc_free_nvmet_sgl_list(phba);
3888 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
3891 struct lpfc_sli4_hdw_queue *qp;
3892 struct lpfc_io_buf *lpfc_cmd;
3893 struct lpfc_io_buf *iobufp, *prev_iobufp;
3894 int idx, cnt, xri, inserted;
3897 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3898 qp = &phba->sli4_hba.hdwq[idx];
3899 spin_lock_irq(&qp->io_buf_list_get_lock);
3900 spin_lock(&qp->io_buf_list_put_lock);
3902 /* Take everything off the get and put lists */
3903 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
3904 list_splice(&qp->lpfc_io_buf_list_put, &blist);
3905 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
3906 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
3907 cnt += qp->get_io_bufs + qp->put_io_bufs;
3908 qp->get_io_bufs = 0;
3909 qp->put_io_bufs = 0;
3910 qp->total_io_bufs = 0;
3911 spin_unlock(&qp->io_buf_list_put_lock);
3912 spin_unlock_irq(&qp->io_buf_list_get_lock);
3916 * Take IO buffers off blist and put on cbuf sorted by XRI.
3917 * This is because POST_SGL takes a sequential range of XRIs
3918 * to post to the firmware.
3920 for (idx = 0; idx < cnt; idx++) {
3921 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
3925 list_add_tail(&lpfc_cmd->list, cbuf);
3928 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
3931 list_for_each_entry(iobufp, cbuf, list) {
3932 if (xri < iobufp->cur_iocbq.sli4_xritag) {
3934 list_add(&lpfc_cmd->list,
3935 &prev_iobufp->list);
3937 list_add(&lpfc_cmd->list, cbuf);
3941 prev_iobufp = iobufp;
3944 list_add_tail(&lpfc_cmd->list, cbuf);
3950 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
3952 struct lpfc_sli4_hdw_queue *qp;
3953 struct lpfc_io_buf *lpfc_cmd;
3956 qp = phba->sli4_hba.hdwq;
3958 while (!list_empty(cbuf)) {
3959 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3960 list_remove_head(cbuf, lpfc_cmd,
3961 struct lpfc_io_buf, list);
3965 qp = &phba->sli4_hba.hdwq[idx];
3966 lpfc_cmd->hdwq_no = idx;
3967 lpfc_cmd->hdwq = qp;
3968 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
3969 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
3970 spin_lock(&qp->io_buf_list_put_lock);
3971 list_add_tail(&lpfc_cmd->list,
3972 &qp->lpfc_io_buf_list_put);
3974 qp->total_io_bufs++;
3975 spin_unlock(&qp->io_buf_list_put_lock);
3982 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
3983 * @phba: pointer to lpfc hba data structure.
3985 * This routine first calculates the sizes of the current els and allocated
3986 * scsi sgl lists, and then goes through all sgls to updates the physical
3987 * XRIs assigned due to port function reset. During port initialization, the
3988 * current els and allocated scsi sgl lists are 0s.
3991 * 0 - successful (for now, it always returns 0)
3994 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
3996 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
3997 uint16_t i, lxri, els_xri_cnt;
3998 uint16_t io_xri_cnt, io_xri_max;
3999 LIST_HEAD(io_sgl_list);
4003 * update on pci function's allocated nvme xri-sgl list
4006 /* maximum number of xris available for nvme buffers */
4007 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4008 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4009 phba->sli4_hba.io_xri_max = io_xri_max;
4011 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4012 "6074 Current allocated XRI sgl count:%d, "
4013 "maximum XRI count:%d\n",
4014 phba->sli4_hba.io_xri_cnt,
4015 phba->sli4_hba.io_xri_max);
4017 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4019 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4020 /* max nvme xri shrunk below the allocated nvme buffers */
4021 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4022 phba->sli4_hba.io_xri_max;
4023 /* release the extra allocated nvme buffers */
4024 for (i = 0; i < io_xri_cnt; i++) {
4025 list_remove_head(&io_sgl_list, lpfc_ncmd,
4026 struct lpfc_io_buf, list);
4028 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4030 lpfc_ncmd->dma_handle);
4034 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4037 /* update xris associated to remaining allocated nvme buffers */
4039 lpfc_ncmd_next = NULL;
4040 phba->sli4_hba.io_xri_cnt = cnt;
4041 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4042 &io_sgl_list, list) {
4043 lxri = lpfc_sli4_next_xritag(phba);
4044 if (lxri == NO_XRI) {
4045 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4046 "6075 Failed to allocate xri for "
4051 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4052 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4054 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4063 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4064 * @vport: The virtual port for which this call being executed.
4065 * @num_to_allocate: The requested number of buffers to allocate.
4067 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4068 * the nvme buffer contains all the necessary information needed to initiate
4069 * an I/O. After allocating up to @num_to_allocate IO buffers and put
4070 * them on a list, it post them to the port by using SGL block post.
4073 * int - number of IO buffers that were allocated and posted.
4074 * 0 = failure, less than num_to_alloc is a partial failure.
4077 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4079 struct lpfc_io_buf *lpfc_ncmd;
4080 struct lpfc_iocbq *pwqeq;
4081 uint16_t iotag, lxri = 0;
4082 int bcnt, num_posted;
4083 LIST_HEAD(prep_nblist);
4084 LIST_HEAD(post_nblist);
4085 LIST_HEAD(nvme_nblist);
4087 phba->sli4_hba.io_xri_cnt = 0;
4088 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4089 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4093 * Get memory from the pci pool to map the virt space to
4094 * pci bus space for an I/O. The DMA buffer includes the
4095 * number of SGE's necessary to support the sg_tablesize.
4097 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4099 &lpfc_ncmd->dma_handle);
4100 if (!lpfc_ncmd->data) {
4105 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4106 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4109 * 4K Page alignment is CRITICAL to BlockGuard, double
4112 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4113 (((unsigned long)(lpfc_ncmd->data) &
4114 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4115 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
4116 "3369 Memory alignment err: "
4118 (unsigned long)lpfc_ncmd->data);
4119 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4121 lpfc_ncmd->dma_handle);
4127 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4129 lxri = lpfc_sli4_next_xritag(phba);
4130 if (lxri == NO_XRI) {
4131 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4132 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4136 pwqeq = &lpfc_ncmd->cur_iocbq;
4138 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4139 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4141 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4142 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4144 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
4145 "6121 Failed to allocate IOTAG for"
4146 " XRI:0x%x\n", lxri);
4147 lpfc_sli4_free_xri(phba, lxri);
4150 pwqeq->sli4_lxritag = lxri;
4151 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4152 pwqeq->context1 = lpfc_ncmd;
4154 /* Initialize local short-hand pointers. */
4155 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4156 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4157 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4158 spin_lock_init(&lpfc_ncmd->buf_lock);
4160 /* add the nvme buffer to a post list */
4161 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4162 phba->sli4_hba.io_xri_cnt++;
4164 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4165 "6114 Allocate %d out of %d requested new NVME "
4166 "buffers\n", bcnt, num_to_alloc);
4168 /* post the list of nvme buffer sgls to port if available */
4169 if (!list_empty(&post_nblist))
4170 num_posted = lpfc_sli4_post_io_sgl_list(
4171 phba, &post_nblist, bcnt);
4179 lpfc_get_wwpn(struct lpfc_hba *phba)
4183 LPFC_MBOXQ_t *mboxq;
4186 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4189 return (uint64_t)-1;
4191 /* First get WWN of HBA instance */
4192 lpfc_read_nv(phba, mboxq);
4193 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4194 if (rc != MBX_SUCCESS) {
4195 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4196 "6019 Mailbox failed , mbxCmd x%x "
4197 "READ_NV, mbxStatus x%x\n",
4198 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4199 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4200 mempool_free(mboxq, phba->mbox_mem_pool);
4201 return (uint64_t) -1;
4204 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4205 /* wwn is WWPN of HBA instance */
4206 mempool_free(mboxq, phba->mbox_mem_pool);
4207 if (phba->sli_rev == LPFC_SLI_REV4)
4208 return be64_to_cpu(wwn);
4210 return rol64(wwn, 32);
4214 * lpfc_create_port - Create an FC port
4215 * @phba: pointer to lpfc hba data structure.
4216 * @instance: a unique integer ID to this FC port.
4217 * @dev: pointer to the device data structure.
4219 * This routine creates a FC port for the upper layer protocol. The FC port
4220 * can be created on top of either a physical port or a virtual port provided
4221 * by the HBA. This routine also allocates a SCSI host data structure (shost)
4222 * and associates the FC port created before adding the shost into the SCSI
4226 * @vport - pointer to the virtual N_Port data structure.
4227 * NULL - port create failed.
4230 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4232 struct lpfc_vport *vport;
4233 struct Scsi_Host *shost = NULL;
4234 struct scsi_host_template *template;
4238 bool use_no_reset_hba = false;
4241 if (lpfc_no_hba_reset_cnt) {
4242 if (phba->sli_rev < LPFC_SLI_REV4 &&
4243 dev == &phba->pcidev->dev) {
4244 /* Reset the port first */
4245 lpfc_sli_brdrestart(phba);
4246 rc = lpfc_sli_chipset_init(phba);
4250 wwn = lpfc_get_wwpn(phba);
4253 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4254 if (wwn == lpfc_no_hba_reset[i]) {
4255 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4256 "6020 Setting use_no_reset port=%llx\n",
4258 use_no_reset_hba = true;
4263 /* Seed template for SCSI host registration */
4264 if (dev == &phba->pcidev->dev) {
4265 template = &phba->port_template;
4267 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4268 /* Seed physical port template */
4269 memcpy(template, &lpfc_template, sizeof(*template));
4271 if (use_no_reset_hba) {
4272 /* template is for a no reset SCSI Host */
4273 template->max_sectors = 0xffff;
4274 template->eh_host_reset_handler = NULL;
4277 /* Template for all vports this physical port creates */
4278 memcpy(&phba->vport_template, &lpfc_template,
4280 phba->vport_template.max_sectors = 0xffff;
4281 phba->vport_template.shost_attrs = lpfc_vport_attrs;
4282 phba->vport_template.eh_bus_reset_handler = NULL;
4283 phba->vport_template.eh_host_reset_handler = NULL;
4284 phba->vport_template.vendor_id = 0;
4286 /* Initialize the host templates with updated value */
4287 if (phba->sli_rev == LPFC_SLI_REV4) {
4288 template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4289 phba->vport_template.sg_tablesize =
4290 phba->cfg_scsi_seg_cnt;
4292 template->sg_tablesize = phba->cfg_sg_seg_cnt;
4293 phba->vport_template.sg_tablesize =
4294 phba->cfg_sg_seg_cnt;
4298 /* NVMET is for physical port only */
4299 memcpy(template, &lpfc_template_nvme,
4303 template = &phba->vport_template;
4306 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4310 vport = (struct lpfc_vport *) shost->hostdata;
4312 vport->load_flag |= FC_LOADING;
4313 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4314 vport->fc_rscn_flush = 0;
4315 lpfc_get_vport_cfgparam(vport);
4317 /* Adjust value in vport */
4318 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4320 shost->unique_id = instance;
4321 shost->max_id = LPFC_MAX_TARGET;
4322 shost->max_lun = vport->cfg_max_luns;
4323 shost->this_id = -1;
4324 shost->max_cmd_len = 16;
4326 if (phba->sli_rev == LPFC_SLI_REV4) {
4327 if (!phba->cfg_fcp_mq_threshold ||
4328 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4329 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4331 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4332 phba->cfg_fcp_mq_threshold);
4334 shost->dma_boundary =
4335 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4337 if (phba->cfg_xpsgl && !phba->nvmet_support)
4338 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4340 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4342 /* SLI-3 has a limited number of hardware queues (3),
4343 * thus there is only one for FCP processing.
4345 shost->nr_hw_queues = 1;
4348 * Set initial can_queue value since 0 is no longer supported and
4349 * scsi_add_host will fail. This will be adjusted later based on the
4350 * max xri value determined in hba setup.
4352 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4353 if (dev != &phba->pcidev->dev) {
4354 shost->transportt = lpfc_vport_transport_template;
4355 vport->port_type = LPFC_NPIV_PORT;
4357 shost->transportt = lpfc_transport_template;
4358 vport->port_type = LPFC_PHYSICAL_PORT;
4361 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4362 "9081 CreatePort TMPLATE type %x TBLsize %d "
4364 vport->port_type, shost->sg_tablesize,
4365 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4367 /* Initialize all internally managed lists. */
4368 INIT_LIST_HEAD(&vport->fc_nodes);
4369 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4370 spin_lock_init(&vport->work_port_lock);
4372 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4374 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4376 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4378 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4379 lpfc_setup_bg(phba, shost);
4381 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4385 spin_lock_irq(&phba->port_list_lock);
4386 list_add_tail(&vport->listentry, &phba->port_list);
4387 spin_unlock_irq(&phba->port_list_lock);
4391 scsi_host_put(shost);
4397 * destroy_port - destroy an FC port
4398 * @vport: pointer to an lpfc virtual N_Port data structure.
4400 * This routine destroys a FC port from the upper layer protocol. All the
4401 * resources associated with the port are released.
4404 destroy_port(struct lpfc_vport *vport)
4406 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4407 struct lpfc_hba *phba = vport->phba;
4409 lpfc_debugfs_terminate(vport);
4410 fc_remove_host(shost);
4411 scsi_remove_host(shost);
4413 spin_lock_irq(&phba->port_list_lock);
4414 list_del_init(&vport->listentry);
4415 spin_unlock_irq(&phba->port_list_lock);
4417 lpfc_cleanup(vport);
4422 * lpfc_get_instance - Get a unique integer ID
4424 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4425 * uses the kernel idr facility to perform the task.
4428 * instance - a unique integer ID allocated as the new instance.
4429 * -1 - lpfc get instance failed.
4432 lpfc_get_instance(void)
4436 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4437 return ret < 0 ? -1 : ret;
4441 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4442 * @shost: pointer to SCSI host data structure.
4443 * @time: elapsed time of the scan in jiffies.
4445 * This routine is called by the SCSI layer with a SCSI host to determine
4446 * whether the scan host is finished.
4448 * Note: there is no scan_start function as adapter initialization will have
4449 * asynchronously kicked off the link initialization.
4452 * 0 - SCSI host scan is not over yet.
4453 * 1 - SCSI host scan is over.
4455 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4457 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4458 struct lpfc_hba *phba = vport->phba;
4461 spin_lock_irq(shost->host_lock);
4463 if (vport->load_flag & FC_UNLOADING) {
4467 if (time >= msecs_to_jiffies(30 * 1000)) {
4468 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4469 "0461 Scanning longer than 30 "
4470 "seconds. Continuing initialization\n");
4474 if (time >= msecs_to_jiffies(15 * 1000) &&
4475 phba->link_state <= LPFC_LINK_DOWN) {
4476 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4477 "0465 Link down longer than 15 "
4478 "seconds. Continuing initialization\n");
4483 if (vport->port_state != LPFC_VPORT_READY)
4485 if (vport->num_disc_nodes || vport->fc_prli_sent)
4487 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4489 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4495 spin_unlock_irq(shost->host_lock);
4499 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4501 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4502 struct lpfc_hba *phba = vport->phba;
4504 fc_host_supported_speeds(shost) = 0;
4505 if (phba->lmt & LMT_128Gb)
4506 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4507 if (phba->lmt & LMT_64Gb)
4508 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4509 if (phba->lmt & LMT_32Gb)
4510 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4511 if (phba->lmt & LMT_16Gb)
4512 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4513 if (phba->lmt & LMT_10Gb)
4514 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4515 if (phba->lmt & LMT_8Gb)
4516 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4517 if (phba->lmt & LMT_4Gb)
4518 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4519 if (phba->lmt & LMT_2Gb)
4520 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4521 if (phba->lmt & LMT_1Gb)
4522 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4526 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4527 * @shost: pointer to SCSI host data structure.
4529 * This routine initializes a given SCSI host attributes on a FC port. The
4530 * SCSI host can be either on top of a physical port or a virtual port.
4532 void lpfc_host_attrib_init(struct Scsi_Host *shost)
4534 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4535 struct lpfc_hba *phba = vport->phba;
4537 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
4540 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4541 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4542 fc_host_supported_classes(shost) = FC_COS_CLASS3;
4544 memset(fc_host_supported_fc4s(shost), 0,
4545 sizeof(fc_host_supported_fc4s(shost)));
4546 fc_host_supported_fc4s(shost)[2] = 1;
4547 fc_host_supported_fc4s(shost)[7] = 1;
4549 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4550 sizeof fc_host_symbolic_name(shost));
4552 lpfc_host_supported_speeds_set(shost);
4554 fc_host_maxframe_size(shost) =
4555 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4556 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4558 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4560 /* This value is also unchanging */
4561 memset(fc_host_active_fc4s(shost), 0,
4562 sizeof(fc_host_active_fc4s(shost)));
4563 fc_host_active_fc4s(shost)[2] = 1;
4564 fc_host_active_fc4s(shost)[7] = 1;
4566 fc_host_max_npiv_vports(shost) = phba->max_vpi;
4567 spin_lock_irq(shost->host_lock);
4568 vport->load_flag &= ~FC_LOADING;
4569 spin_unlock_irq(shost->host_lock);
4573 * lpfc_stop_port_s3 - Stop SLI3 device port
4574 * @phba: pointer to lpfc hba data structure.
4576 * This routine is invoked to stop an SLI3 device port, it stops the device
4577 * from generating interrupts and stops the device driver's timers for the
4581 lpfc_stop_port_s3(struct lpfc_hba *phba)
4583 /* Clear all interrupt enable conditions */
4584 writel(0, phba->HCregaddr);
4585 readl(phba->HCregaddr); /* flush */
4586 /* Clear all pending interrupts */
4587 writel(0xffffffff, phba->HAregaddr);
4588 readl(phba->HAregaddr); /* flush */
4590 /* Reset some HBA SLI setup states */
4591 lpfc_stop_hba_timers(phba);
4592 phba->pport->work_port_events = 0;
4596 * lpfc_stop_port_s4 - Stop SLI4 device port
4597 * @phba: pointer to lpfc hba data structure.
4599 * This routine is invoked to stop an SLI4 device port, it stops the device
4600 * from generating interrupts and stops the device driver's timers for the
4604 lpfc_stop_port_s4(struct lpfc_hba *phba)
4606 /* Reset some HBA SLI4 setup states */
4607 lpfc_stop_hba_timers(phba);
4609 phba->pport->work_port_events = 0;
4610 phba->sli4_hba.intr_enable = 0;
4614 * lpfc_stop_port - Wrapper function for stopping hba port
4615 * @phba: Pointer to HBA context object.
4617 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4618 * the API jump table function pointer from the lpfc_hba struct.
4621 lpfc_stop_port(struct lpfc_hba *phba)
4623 phba->lpfc_stop_port(phba);
4626 flush_workqueue(phba->wq);
4630 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4631 * @phba: Pointer to hba for which this call is being executed.
4633 * This routine starts the timer waiting for the FCF rediscovery to complete.
4636 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4638 unsigned long fcf_redisc_wait_tmo =
4639 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4640 /* Start fcf rediscovery wait period timer */
4641 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4642 spin_lock_irq(&phba->hbalock);
4643 /* Allow action to new fcf asynchronous event */
4644 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4645 /* Mark the FCF rediscovery pending state */
4646 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4647 spin_unlock_irq(&phba->hbalock);
4651 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
4652 * @ptr: Map to lpfc_hba data structure pointer.
4654 * This routine is invoked when waiting for FCF table rediscover has been
4655 * timed out. If new FCF record(s) has (have) been discovered during the
4656 * wait period, a new FCF event shall be added to the FCOE async event
4657 * list, and then worker thread shall be waked up for processing from the
4658 * worker thread context.
4661 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4663 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4665 /* Don't send FCF rediscovery event if timer cancelled */
4666 spin_lock_irq(&phba->hbalock);
4667 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4668 spin_unlock_irq(&phba->hbalock);
4671 /* Clear FCF rediscovery timer pending flag */
4672 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4673 /* FCF rediscovery event to worker thread */
4674 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4675 spin_unlock_irq(&phba->hbalock);
4676 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4677 "2776 FCF rediscover quiescent timer expired\n");
4678 /* wake up worker thread */
4679 lpfc_worker_wake_up(phba);
4683 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
4684 * @phba: pointer to lpfc hba data structure.
4685 * @acqe_link: pointer to the async link completion queue entry.
4687 * This routine is to parse the SLI4 link-attention link fault code.
4690 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4691 struct lpfc_acqe_link *acqe_link)
4693 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4694 case LPFC_ASYNC_LINK_FAULT_NONE:
4695 case LPFC_ASYNC_LINK_FAULT_LOCAL:
4696 case LPFC_ASYNC_LINK_FAULT_REMOTE:
4697 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
4700 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4701 "0398 Unknown link fault code: x%x\n",
4702 bf_get(lpfc_acqe_link_fault, acqe_link));
4708 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
4709 * @phba: pointer to lpfc hba data structure.
4710 * @acqe_link: pointer to the async link completion queue entry.
4712 * This routine is to parse the SLI4 link attention type and translate it
4713 * into the base driver's link attention type coding.
4715 * Return: Link attention type in terms of base driver's coding.
4718 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4719 struct lpfc_acqe_link *acqe_link)
4723 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4724 case LPFC_ASYNC_LINK_STATUS_DOWN:
4725 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
4726 att_type = LPFC_ATT_LINK_DOWN;
4728 case LPFC_ASYNC_LINK_STATUS_UP:
4729 /* Ignore physical link up events - wait for logical link up */
4730 att_type = LPFC_ATT_RESERVED;
4732 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
4733 att_type = LPFC_ATT_LINK_UP;
4736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4737 "0399 Invalid link attention type: x%x\n",
4738 bf_get(lpfc_acqe_link_status, acqe_link));
4739 att_type = LPFC_ATT_RESERVED;
4746 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
4747 * @phba: pointer to lpfc hba data structure.
4749 * This routine is to get an SLI3 FC port's link speed in Mbps.
4751 * Return: link speed in terms of Mbps.
4754 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4756 uint32_t link_speed;
4758 if (!lpfc_is_link_up(phba))
4761 if (phba->sli_rev <= LPFC_SLI_REV3) {
4762 switch (phba->fc_linkspeed) {
4763 case LPFC_LINK_SPEED_1GHZ:
4766 case LPFC_LINK_SPEED_2GHZ:
4769 case LPFC_LINK_SPEED_4GHZ:
4772 case LPFC_LINK_SPEED_8GHZ:
4775 case LPFC_LINK_SPEED_10GHZ:
4778 case LPFC_LINK_SPEED_16GHZ:
4785 if (phba->sli4_hba.link_state.logical_speed)
4787 phba->sli4_hba.link_state.logical_speed;
4789 link_speed = phba->sli4_hba.link_state.speed;
4795 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
4796 * @phba: pointer to lpfc hba data structure.
4797 * @evt_code: asynchronous event code.
4798 * @speed_code: asynchronous event link speed code.
4800 * This routine is to parse the giving SLI4 async event link speed code into
4801 * value of Mbps for the link speed.
4803 * Return: link speed in terms of Mbps.
4806 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
4809 uint32_t port_speed;
4812 case LPFC_TRAILER_CODE_LINK:
4813 switch (speed_code) {
4814 case LPFC_ASYNC_LINK_SPEED_ZERO:
4817 case LPFC_ASYNC_LINK_SPEED_10MBPS:
4820 case LPFC_ASYNC_LINK_SPEED_100MBPS:
4823 case LPFC_ASYNC_LINK_SPEED_1GBPS:
4826 case LPFC_ASYNC_LINK_SPEED_10GBPS:
4829 case LPFC_ASYNC_LINK_SPEED_20GBPS:
4832 case LPFC_ASYNC_LINK_SPEED_25GBPS:
4835 case LPFC_ASYNC_LINK_SPEED_40GBPS:
4842 case LPFC_TRAILER_CODE_FC:
4843 switch (speed_code) {
4844 case LPFC_FC_LA_SPEED_UNKNOWN:
4847 case LPFC_FC_LA_SPEED_1G:
4850 case LPFC_FC_LA_SPEED_2G:
4853 case LPFC_FC_LA_SPEED_4G:
4856 case LPFC_FC_LA_SPEED_8G:
4859 case LPFC_FC_LA_SPEED_10G:
4862 case LPFC_FC_LA_SPEED_16G:
4865 case LPFC_FC_LA_SPEED_32G:
4868 case LPFC_FC_LA_SPEED_64G:
4871 case LPFC_FC_LA_SPEED_128G:
4872 port_speed = 128000;
4885 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
4886 * @phba: pointer to lpfc hba data structure.
4887 * @acqe_link: pointer to the async link completion queue entry.
4889 * This routine is to handle the SLI4 asynchronous FCoE link event.
4892 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
4893 struct lpfc_acqe_link *acqe_link)
4895 struct lpfc_dmabuf *mp;
4898 struct lpfc_mbx_read_top *la;
4902 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
4903 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
4905 phba->fcoe_eventtag = acqe_link->event_tag;
4906 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4908 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4909 "0395 The mboxq allocation failed\n");
4912 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4914 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4915 "0396 The lpfc_dmabuf allocation failed\n");
4918 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4920 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4921 "0397 The mbuf allocation failed\n");
4922 goto out_free_dmabuf;
4925 /* Cleanup any outstanding ELS commands */
4926 lpfc_els_flush_all_cmd(phba);
4928 /* Block ELS IOCBs until we have done process link event */
4929 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
4931 /* Update link event statistics */
4932 phba->sli.slistat.link_event++;
4934 /* Create lpfc_handle_latt mailbox command from link ACQE */
4935 lpfc_read_topology(phba, pmb, mp);
4936 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
4937 pmb->vport = phba->pport;
4939 /* Keep the link status for extra SLI4 state machine reference */
4940 phba->sli4_hba.link_state.speed =
4941 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
4942 bf_get(lpfc_acqe_link_speed, acqe_link));
4943 phba->sli4_hba.link_state.duplex =
4944 bf_get(lpfc_acqe_link_duplex, acqe_link);
4945 phba->sli4_hba.link_state.status =
4946 bf_get(lpfc_acqe_link_status, acqe_link);
4947 phba->sli4_hba.link_state.type =
4948 bf_get(lpfc_acqe_link_type, acqe_link);
4949 phba->sli4_hba.link_state.number =
4950 bf_get(lpfc_acqe_link_number, acqe_link);
4951 phba->sli4_hba.link_state.fault =
4952 bf_get(lpfc_acqe_link_fault, acqe_link);
4953 phba->sli4_hba.link_state.logical_speed =
4954 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
4956 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4957 "2900 Async FC/FCoE Link event - Speed:%dGBit "
4958 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
4959 "Logical speed:%dMbps Fault:%d\n",
4960 phba->sli4_hba.link_state.speed,
4961 phba->sli4_hba.link_state.topology,
4962 phba->sli4_hba.link_state.status,
4963 phba->sli4_hba.link_state.type,
4964 phba->sli4_hba.link_state.number,
4965 phba->sli4_hba.link_state.logical_speed,
4966 phba->sli4_hba.link_state.fault);
4968 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
4969 * topology info. Note: Optional for non FC-AL ports.
4971 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
4972 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4973 if (rc == MBX_NOT_FINISHED)
4974 goto out_free_dmabuf;
4978 * For FCoE Mode: fill in all the topology information we need and call
4979 * the READ_TOPOLOGY completion routine to continue without actually
4980 * sending the READ_TOPOLOGY mailbox command to the port.
4982 /* Initialize completion status */
4984 mb->mbxStatus = MBX_SUCCESS;
4986 /* Parse port fault information field */
4987 lpfc_sli4_parse_latt_fault(phba, acqe_link);
4989 /* Parse and translate link attention fields */
4990 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
4991 la->eventTag = acqe_link->event_tag;
4992 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
4993 bf_set(lpfc_mbx_read_top_link_spd, la,
4994 (bf_get(lpfc_acqe_link_speed, acqe_link)));
4996 /* Fake the the following irrelvant fields */
4997 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
4998 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
4999 bf_set(lpfc_mbx_read_top_il, la, 0);
5000 bf_set(lpfc_mbx_read_top_pb, la, 0);
5001 bf_set(lpfc_mbx_read_top_fa, la, 0);
5002 bf_set(lpfc_mbx_read_top_mm, la, 0);
5004 /* Invoke the lpfc_handle_latt mailbox command callback function */
5005 lpfc_mbx_cmpl_read_topology(phba, pmb);
5012 mempool_free(pmb, phba->mbox_mem_pool);
5016 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5018 * @phba: pointer to lpfc hba data structure.
5019 * @evt_code: asynchronous event code.
5020 * @speed_code: asynchronous event link speed code.
5022 * This routine is to parse the giving SLI4 async event link speed code into
5023 * value of Read topology link speed.
5025 * Return: link speed in terms of Read topology.
5028 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5032 switch (speed_code) {
5033 case LPFC_FC_LA_SPEED_1G:
5034 port_speed = LPFC_LINK_SPEED_1GHZ;
5036 case LPFC_FC_LA_SPEED_2G:
5037 port_speed = LPFC_LINK_SPEED_2GHZ;
5039 case LPFC_FC_LA_SPEED_4G:
5040 port_speed = LPFC_LINK_SPEED_4GHZ;
5042 case LPFC_FC_LA_SPEED_8G:
5043 port_speed = LPFC_LINK_SPEED_8GHZ;
5045 case LPFC_FC_LA_SPEED_16G:
5046 port_speed = LPFC_LINK_SPEED_16GHZ;
5048 case LPFC_FC_LA_SPEED_32G:
5049 port_speed = LPFC_LINK_SPEED_32GHZ;
5051 case LPFC_FC_LA_SPEED_64G:
5052 port_speed = LPFC_LINK_SPEED_64GHZ;
5054 case LPFC_FC_LA_SPEED_128G:
5055 port_speed = LPFC_LINK_SPEED_128GHZ;
5057 case LPFC_FC_LA_SPEED_256G:
5058 port_speed = LPFC_LINK_SPEED_256GHZ;
5068 #define trunk_link_status(__idx)\
5069 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5070 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
5071 "Link up" : "Link down") : "NA"
5072 /* Did port __idx reported an error */
5073 #define trunk_port_fault(__idx)\
5074 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5075 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
5078 lpfc_update_trunk_link_status(struct lpfc_hba *phba,
5079 struct lpfc_acqe_fc_la *acqe_fc)
5081 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
5082 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
5084 phba->sli4_hba.link_state.speed =
5085 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5086 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5088 phba->sli4_hba.link_state.logical_speed =
5089 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5090 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
5091 phba->fc_linkspeed =
5092 lpfc_async_link_speed_to_read_top(
5094 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5096 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
5097 phba->trunk_link.link0.state =
5098 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
5099 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5100 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
5102 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
5103 phba->trunk_link.link1.state =
5104 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
5105 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5106 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
5108 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
5109 phba->trunk_link.link2.state =
5110 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
5111 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5112 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
5114 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
5115 phba->trunk_link.link3.state =
5116 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
5117 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5118 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
5121 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5122 "2910 Async FC Trunking Event - Speed:%d\n"
5123 "\tLogical speed:%d "
5124 "port0: %s port1: %s port2: %s port3: %s\n",
5125 phba->sli4_hba.link_state.speed,
5126 phba->sli4_hba.link_state.logical_speed,
5127 trunk_link_status(0), trunk_link_status(1),
5128 trunk_link_status(2), trunk_link_status(3));
5131 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5132 "3202 trunk error:0x%x (%s) seen on port0:%s "
5134 * SLI-4: We have only 0xA error codes
5135 * defined as of now. print an appropriate
5136 * message in case driver needs to be updated.
5138 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
5139 "UNDEFINED. update driver." : trunk_errmsg[err],
5140 trunk_port_fault(0), trunk_port_fault(1),
5141 trunk_port_fault(2), trunk_port_fault(3));
5146 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
5147 * @phba: pointer to lpfc hba data structure.
5148 * @acqe_fc: pointer to the async fc completion queue entry.
5150 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
5151 * that the event was received and then issue a read_topology mailbox command so
5152 * that the rest of the driver will treat it the same as SLI3.
5155 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
5157 struct lpfc_dmabuf *mp;
5160 struct lpfc_mbx_read_top *la;
5163 if (bf_get(lpfc_trailer_type, acqe_fc) !=
5164 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
5165 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5166 "2895 Non FC link Event detected.(%d)\n",
5167 bf_get(lpfc_trailer_type, acqe_fc));
5171 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5172 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
5173 lpfc_update_trunk_link_status(phba, acqe_fc);
5177 /* Keep the link status for extra SLI4 state machine reference */
5178 phba->sli4_hba.link_state.speed =
5179 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5180 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5181 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
5182 phba->sli4_hba.link_state.topology =
5183 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
5184 phba->sli4_hba.link_state.status =
5185 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
5186 phba->sli4_hba.link_state.type =
5187 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
5188 phba->sli4_hba.link_state.number =
5189 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
5190 phba->sli4_hba.link_state.fault =
5191 bf_get(lpfc_acqe_link_fault, acqe_fc);
5193 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5194 LPFC_FC_LA_TYPE_LINK_DOWN)
5195 phba->sli4_hba.link_state.logical_speed = 0;
5196 else if (!phba->sli4_hba.conf_trunk)
5197 phba->sli4_hba.link_state.logical_speed =
5198 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5200 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5201 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
5202 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
5203 "%dMbps Fault:%d\n",
5204 phba->sli4_hba.link_state.speed,
5205 phba->sli4_hba.link_state.topology,
5206 phba->sli4_hba.link_state.status,
5207 phba->sli4_hba.link_state.type,
5208 phba->sli4_hba.link_state.number,
5209 phba->sli4_hba.link_state.logical_speed,
5210 phba->sli4_hba.link_state.fault);
5211 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5213 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5214 "2897 The mboxq allocation failed\n");
5217 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5219 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5220 "2898 The lpfc_dmabuf allocation failed\n");
5223 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5225 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5226 "2899 The mbuf allocation failed\n");
5227 goto out_free_dmabuf;
5230 /* Cleanup any outstanding ELS commands */
5231 lpfc_els_flush_all_cmd(phba);
5233 /* Block ELS IOCBs until we have done process link event */
5234 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5236 /* Update link event statistics */
5237 phba->sli.slistat.link_event++;
5239 /* Create lpfc_handle_latt mailbox command from link ACQE */
5240 lpfc_read_topology(phba, pmb, mp);
5241 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5242 pmb->vport = phba->pport;
5244 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
5245 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
5247 switch (phba->sli4_hba.link_state.status) {
5248 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
5249 phba->link_flag |= LS_MDS_LINK_DOWN;
5251 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
5252 phba->link_flag |= LS_MDS_LOOPBACK;
5258 /* Initialize completion status */
5260 mb->mbxStatus = MBX_SUCCESS;
5262 /* Parse port fault information field */
5263 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
5265 /* Parse and translate link attention fields */
5266 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
5267 la->eventTag = acqe_fc->event_tag;
5269 if (phba->sli4_hba.link_state.status ==
5270 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
5271 bf_set(lpfc_mbx_read_top_att_type, la,
5272 LPFC_FC_LA_TYPE_UNEXP_WWPN);
5274 bf_set(lpfc_mbx_read_top_att_type, la,
5275 LPFC_FC_LA_TYPE_LINK_DOWN);
5277 /* Invoke the mailbox command callback function */
5278 lpfc_mbx_cmpl_read_topology(phba, pmb);
5283 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5284 if (rc == MBX_NOT_FINISHED)
5285 goto out_free_dmabuf;
5291 mempool_free(pmb, phba->mbox_mem_pool);
5295 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
5296 * @phba: pointer to lpfc hba data structure.
5297 * @acqe_fc: pointer to the async SLI completion queue entry.
5299 * This routine is to handle the SLI4 asynchronous SLI events.
5302 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
5308 uint8_t operational = 0;
5309 struct temp_event temp_event_data;
5310 struct lpfc_acqe_misconfigured_event *misconfigured;
5311 struct Scsi_Host *shost;
5312 struct lpfc_vport **vports;
5315 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
5317 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5318 "2901 Async SLI event - Type:%d, Event Data: x%08x "
5319 "x%08x x%08x x%08x\n", evt_type,
5320 acqe_sli->event_data1, acqe_sli->event_data2,
5321 acqe_sli->reserved, acqe_sli->trailer);
5323 port_name = phba->Port[0];
5324 if (port_name == 0x00)
5325 port_name = '?'; /* get port name is empty */
5328 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
5329 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5330 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
5331 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5333 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5334 "3190 Over Temperature:%d Celsius- Port Name %c\n",
5335 acqe_sli->event_data1, port_name);
5337 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
5338 shost = lpfc_shost_from_vport(phba->pport);
5339 fc_host_post_vendor_event(shost, fc_get_event_number(),
5340 sizeof(temp_event_data),
5341 (char *)&temp_event_data,
5342 SCSI_NL_VID_TYPE_PCI
5343 | PCI_VENDOR_ID_EMULEX);
5345 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
5346 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5347 temp_event_data.event_code = LPFC_NORMAL_TEMP;
5348 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5350 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5351 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
5352 acqe_sli->event_data1, port_name);
5354 shost = lpfc_shost_from_vport(phba->pport);
5355 fc_host_post_vendor_event(shost, fc_get_event_number(),
5356 sizeof(temp_event_data),
5357 (char *)&temp_event_data,
5358 SCSI_NL_VID_TYPE_PCI
5359 | PCI_VENDOR_ID_EMULEX);
5361 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
5362 misconfigured = (struct lpfc_acqe_misconfigured_event *)
5363 &acqe_sli->event_data1;
5365 /* fetch the status for this port */
5366 switch (phba->sli4_hba.lnk_info.lnk_no) {
5367 case LPFC_LINK_NUMBER_0:
5368 status = bf_get(lpfc_sli_misconfigured_port0_state,
5369 &misconfigured->theEvent);
5370 operational = bf_get(lpfc_sli_misconfigured_port0_op,
5371 &misconfigured->theEvent);
5373 case LPFC_LINK_NUMBER_1:
5374 status = bf_get(lpfc_sli_misconfigured_port1_state,
5375 &misconfigured->theEvent);
5376 operational = bf_get(lpfc_sli_misconfigured_port1_op,
5377 &misconfigured->theEvent);
5379 case LPFC_LINK_NUMBER_2:
5380 status = bf_get(lpfc_sli_misconfigured_port2_state,
5381 &misconfigured->theEvent);
5382 operational = bf_get(lpfc_sli_misconfigured_port2_op,
5383 &misconfigured->theEvent);
5385 case LPFC_LINK_NUMBER_3:
5386 status = bf_get(lpfc_sli_misconfigured_port3_state,
5387 &misconfigured->theEvent);
5388 operational = bf_get(lpfc_sli_misconfigured_port3_op,
5389 &misconfigured->theEvent);
5392 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5394 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
5395 "event: Invalid link %d",
5396 phba->sli4_hba.lnk_info.lnk_no);
5400 /* Skip if optic state unchanged */
5401 if (phba->sli4_hba.lnk_info.optic_state == status)
5405 case LPFC_SLI_EVENT_STATUS_VALID:
5406 sprintf(message, "Physical Link is functional");
5408 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
5409 sprintf(message, "Optics faulted/incorrectly "
5410 "installed/not installed - Reseat optics, "
5411 "if issue not resolved, replace.");
5413 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
5415 "Optics of two types installed - Remove one "
5416 "optic or install matching pair of optics.");
5418 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
5419 sprintf(message, "Incompatible optics - Replace with "
5420 "compatible optics for card to function.");
5422 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
5423 sprintf(message, "Unqualified optics - Replace with "
5424 "Avago optics for Warranty and Technical "
5425 "Support - Link is%s operational",
5426 (operational) ? " not" : "");
5428 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
5429 sprintf(message, "Uncertified optics - Replace with "
5430 "Avago-certified optics to enable link "
5431 "operation - Link is%s operational",
5432 (operational) ? " not" : "");
5435 /* firmware is reporting a status we don't know about */
5436 sprintf(message, "Unknown event status x%02x", status);
5440 /* Issue READ_CONFIG mbox command to refresh supported speeds */
5441 rc = lpfc_sli4_read_config(phba);
5444 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5445 "3194 Unable to retrieve supported "
5446 "speeds, rc = 0x%x\n", rc);
5448 vports = lpfc_create_vport_work_array(phba);
5449 if (vports != NULL) {
5450 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5452 shost = lpfc_shost_from_vport(vports[i]);
5453 lpfc_host_supported_speeds_set(shost);
5456 lpfc_destroy_vport_work_array(phba, vports);
5458 phba->sli4_hba.lnk_info.optic_state = status;
5459 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5460 "3176 Port Name %c %s\n", port_name, message);
5462 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
5463 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5464 "3192 Remote DPort Test Initiated - "
5465 "Event Data1:x%08x Event Data2: x%08x\n",
5466 acqe_sli->event_data1, acqe_sli->event_data2);
5468 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
5469 /* Misconfigured WWN. Reports that the SLI Port is configured
5470 * to use FA-WWN, but the attached device doesn’t support it.
5471 * No driver action is required.
5472 * Event Data1 - N.A, Event Data2 - N.A
5474 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
5475 "2699 Misconfigured FA-WWN - Attached device does "
5476 "not support FA-WWN\n");
5478 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
5479 /* EEPROM failure. No driver action is required */
5480 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5481 "2518 EEPROM failure - "
5482 "Event Data1: x%08x Event Data2: x%08x\n",
5483 acqe_sli->event_data1, acqe_sli->event_data2);
5486 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5487 "3193 Unrecognized SLI event, type: 0x%x",
5494 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
5495 * @vport: pointer to vport data structure.
5497 * This routine is to perform Clear Virtual Link (CVL) on a vport in
5498 * response to a CVL event.
5500 * Return the pointer to the ndlp with the vport if successful, otherwise
5503 static struct lpfc_nodelist *
5504 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
5506 struct lpfc_nodelist *ndlp;
5507 struct Scsi_Host *shost;
5508 struct lpfc_hba *phba;
5515 ndlp = lpfc_findnode_did(vport, Fabric_DID);
5517 /* Cannot find existing Fabric ndlp, so allocate a new one */
5518 ndlp = lpfc_nlp_init(vport, Fabric_DID);
5521 /* Set the node type */
5522 ndlp->nlp_type |= NLP_FABRIC;
5523 /* Put ndlp onto node list */
5524 lpfc_enqueue_node(vport, ndlp);
5525 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5526 /* re-setup ndlp without removing from node list */
5527 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
5531 if ((phba->pport->port_state < LPFC_FLOGI) &&
5532 (phba->pport->port_state != LPFC_VPORT_FAILED))
5534 /* If virtual link is not yet instantiated ignore CVL */
5535 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
5536 && (vport->port_state != LPFC_VPORT_FAILED))
5538 shost = lpfc_shost_from_vport(vport);
5541 lpfc_linkdown_port(vport);
5542 lpfc_cleanup_pending_mbox(vport);
5543 spin_lock_irq(shost->host_lock);
5544 vport->fc_flag |= FC_VPORT_CVL_RCVD;
5545 spin_unlock_irq(shost->host_lock);
5551 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
5552 * @vport: pointer to lpfc hba data structure.
5554 * This routine is to perform Clear Virtual Link (CVL) on all vports in
5555 * response to a FCF dead event.
5558 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
5560 struct lpfc_vport **vports;
5563 vports = lpfc_create_vport_work_array(phba);
5565 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
5566 lpfc_sli4_perform_vport_cvl(vports[i]);
5567 lpfc_destroy_vport_work_array(phba, vports);
5571 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
5572 * @phba: pointer to lpfc hba data structure.
5573 * @acqe_link: pointer to the async fcoe completion queue entry.
5575 * This routine is to handle the SLI4 asynchronous fcoe event.
5578 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
5579 struct lpfc_acqe_fip *acqe_fip)
5581 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
5583 struct lpfc_vport *vport;
5584 struct lpfc_nodelist *ndlp;
5585 struct Scsi_Host *shost;
5586 int active_vlink_present;
5587 struct lpfc_vport **vports;
5590 phba->fc_eventTag = acqe_fip->event_tag;
5591 phba->fcoe_eventtag = acqe_fip->event_tag;
5592 switch (event_type) {
5593 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
5594 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
5595 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
5596 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5598 "2546 New FCF event, evt_tag:x%x, "
5600 acqe_fip->event_tag,
5603 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
5605 "2788 FCF param modified event, "
5606 "evt_tag:x%x, index:x%x\n",
5607 acqe_fip->event_tag,
5609 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5611 * During period of FCF discovery, read the FCF
5612 * table record indexed by the event to update
5613 * FCF roundrobin failover eligible FCF bmask.
5615 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5617 "2779 Read FCF (x%x) for updating "
5618 "roundrobin FCF failover bmask\n",
5620 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
5623 /* If the FCF discovery is in progress, do nothing. */
5624 spin_lock_irq(&phba->hbalock);
5625 if (phba->hba_flag & FCF_TS_INPROG) {
5626 spin_unlock_irq(&phba->hbalock);
5629 /* If fast FCF failover rescan event is pending, do nothing */
5630 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
5631 spin_unlock_irq(&phba->hbalock);
5635 /* If the FCF has been in discovered state, do nothing. */
5636 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
5637 spin_unlock_irq(&phba->hbalock);
5640 spin_unlock_irq(&phba->hbalock);
5642 /* Otherwise, scan the entire FCF table and re-discover SAN */
5643 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5644 "2770 Start FCF table scan per async FCF "
5645 "event, evt_tag:x%x, index:x%x\n",
5646 acqe_fip->event_tag, acqe_fip->index);
5647 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
5648 LPFC_FCOE_FCF_GET_FIRST);
5650 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5651 "2547 Issue FCF scan read FCF mailbox "
5652 "command failed (x%x)\n", rc);
5655 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
5656 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5657 "2548 FCF Table full count 0x%x tag 0x%x\n",
5658 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5659 acqe_fip->event_tag);
5662 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
5663 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5664 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5665 "2549 FCF (x%x) disconnected from network, "
5666 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
5668 * If we are in the middle of FCF failover process, clear
5669 * the corresponding FCF bit in the roundrobin bitmap.
5671 spin_lock_irq(&phba->hbalock);
5672 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
5673 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
5674 spin_unlock_irq(&phba->hbalock);
5675 /* Update FLOGI FCF failover eligible FCF bmask */
5676 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
5679 spin_unlock_irq(&phba->hbalock);
5681 /* If the event is not for currently used fcf do nothing */
5682 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
5686 * Otherwise, request the port to rediscover the entire FCF
5687 * table for a fast recovery from case that the current FCF
5688 * is no longer valid as we are not in the middle of FCF
5689 * failover process already.
5691 spin_lock_irq(&phba->hbalock);
5692 /* Mark the fast failover process in progress */
5693 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
5694 spin_unlock_irq(&phba->hbalock);
5696 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5697 "2771 Start FCF fast failover process due to "
5698 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5699 "\n", acqe_fip->event_tag, acqe_fip->index);
5700 rc = lpfc_sli4_redisc_fcf_table(phba);
5702 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5704 "2772 Issue FCF rediscover mailbox "
5705 "command failed, fail through to FCF "
5707 spin_lock_irq(&phba->hbalock);
5708 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
5709 spin_unlock_irq(&phba->hbalock);
5711 * Last resort will fail over by treating this
5712 * as a link down to FCF registration.
5714 lpfc_sli4_fcf_dead_failthrough(phba);
5716 /* Reset FCF roundrobin bmask for new discovery */
5717 lpfc_sli4_clear_fcf_rr_bmask(phba);
5719 * Handling fast FCF failover to a DEAD FCF event is
5720 * considered equalivant to receiving CVL to all vports.
5722 lpfc_sli4_perform_all_vport_cvl(phba);
5725 case LPFC_FIP_EVENT_TYPE_CVL:
5726 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5727 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5728 "2718 Clear Virtual Link Received for VPI 0x%x"
5729 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
5731 vport = lpfc_find_vport_by_vpid(phba,
5733 ndlp = lpfc_sli4_perform_vport_cvl(vport);
5736 active_vlink_present = 0;
5738 vports = lpfc_create_vport_work_array(phba);
5740 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5742 if ((!(vports[i]->fc_flag &
5743 FC_VPORT_CVL_RCVD)) &&
5744 (vports[i]->port_state > LPFC_FDISC)) {
5745 active_vlink_present = 1;
5749 lpfc_destroy_vport_work_array(phba, vports);
5753 * Don't re-instantiate if vport is marked for deletion.
5754 * If we are here first then vport_delete is going to wait
5755 * for discovery to complete.
5757 if (!(vport->load_flag & FC_UNLOADING) &&
5758 active_vlink_present) {
5760 * If there are other active VLinks present,
5761 * re-instantiate the Vlink using FDISC.
5763 mod_timer(&ndlp->nlp_delayfunc,
5764 jiffies + msecs_to_jiffies(1000));
5765 shost = lpfc_shost_from_vport(vport);
5766 spin_lock_irq(shost->host_lock);
5767 ndlp->nlp_flag |= NLP_DELAY_TMO;
5768 spin_unlock_irq(shost->host_lock);
5769 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5770 vport->port_state = LPFC_FDISC;
5773 * Otherwise, we request port to rediscover
5774 * the entire FCF table for a fast recovery
5775 * from possible case that the current FCF
5776 * is no longer valid if we are not already
5777 * in the FCF failover process.
5779 spin_lock_irq(&phba->hbalock);
5780 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5781 spin_unlock_irq(&phba->hbalock);
5784 /* Mark the fast failover process in progress */
5785 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
5786 spin_unlock_irq(&phba->hbalock);
5787 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5789 "2773 Start FCF failover per CVL, "
5790 "evt_tag:x%x\n", acqe_fip->event_tag);
5791 rc = lpfc_sli4_redisc_fcf_table(phba);
5793 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5795 "2774 Issue FCF rediscover "
5796 "mailbox command failed, "
5797 "through to CVL event\n");
5798 spin_lock_irq(&phba->hbalock);
5799 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
5800 spin_unlock_irq(&phba->hbalock);
5802 * Last resort will be re-try on the
5803 * the current registered FCF entry.
5805 lpfc_retry_pport_discovery(phba);
5808 * Reset FCF roundrobin bmask for new
5811 lpfc_sli4_clear_fcf_rr_bmask(phba);
5815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5816 "0288 Unknown FCoE event type 0x%x event tag "
5817 "0x%x\n", event_type, acqe_fip->event_tag);
5823 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
5824 * @phba: pointer to lpfc hba data structure.
5825 * @acqe_link: pointer to the async dcbx completion queue entry.
5827 * This routine is to handle the SLI4 asynchronous dcbx event.
5830 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
5831 struct lpfc_acqe_dcbx *acqe_dcbx)
5833 phba->fc_eventTag = acqe_dcbx->event_tag;
5834 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5835 "0290 The SLI4 DCBX asynchronous event is not "
5840 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
5841 * @phba: pointer to lpfc hba data structure.
5842 * @acqe_link: pointer to the async grp5 completion queue entry.
5844 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
5845 * is an asynchronous notified of a logical link speed change. The Port
5846 * reports the logical link speed in units of 10Mbps.
5849 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
5850 struct lpfc_acqe_grp5 *acqe_grp5)
5852 uint16_t prev_ll_spd;
5854 phba->fc_eventTag = acqe_grp5->event_tag;
5855 phba->fcoe_eventtag = acqe_grp5->event_tag;
5856 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
5857 phba->sli4_hba.link_state.logical_speed =
5858 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
5859 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5860 "2789 GRP5 Async Event: Updating logical link speed "
5861 "from %dMbps to %dMbps\n", prev_ll_spd,
5862 phba->sli4_hba.link_state.logical_speed);
5866 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
5867 * @phba: pointer to lpfc hba data structure.
5869 * This routine is invoked by the worker thread to process all the pending
5870 * SLI4 asynchronous events.
5872 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
5874 struct lpfc_cq_event *cq_event;
5876 /* First, declare the async event has been handled */
5877 spin_lock_irq(&phba->hbalock);
5878 phba->hba_flag &= ~ASYNC_EVENT;
5879 spin_unlock_irq(&phba->hbalock);
5880 /* Now, handle all the async events */
5881 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
5882 /* Get the first event from the head of the event queue */
5883 spin_lock_irq(&phba->hbalock);
5884 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
5885 cq_event, struct lpfc_cq_event, list);
5886 spin_unlock_irq(&phba->hbalock);
5887 /* Process the asynchronous event */
5888 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
5889 case LPFC_TRAILER_CODE_LINK:
5890 lpfc_sli4_async_link_evt(phba,
5891 &cq_event->cqe.acqe_link);
5893 case LPFC_TRAILER_CODE_FCOE:
5894 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
5896 case LPFC_TRAILER_CODE_DCBX:
5897 lpfc_sli4_async_dcbx_evt(phba,
5898 &cq_event->cqe.acqe_dcbx);
5900 case LPFC_TRAILER_CODE_GRP5:
5901 lpfc_sli4_async_grp5_evt(phba,
5902 &cq_event->cqe.acqe_grp5);
5904 case LPFC_TRAILER_CODE_FC:
5905 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
5907 case LPFC_TRAILER_CODE_SLI:
5908 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
5911 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5912 "1804 Invalid asynchronous event code: "
5913 "x%x\n", bf_get(lpfc_trailer_code,
5914 &cq_event->cqe.mcqe_cmpl));
5917 /* Free the completion event processed to the free pool */
5918 lpfc_sli4_cq_event_release(phba, cq_event);
5923 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
5924 * @phba: pointer to lpfc hba data structure.
5926 * This routine is invoked by the worker thread to process FCF table
5927 * rediscovery pending completion event.
5929 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
5933 spin_lock_irq(&phba->hbalock);
5934 /* Clear FCF rediscovery timeout event */
5935 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
5936 /* Clear driver fast failover FCF record flag */
5937 phba->fcf.failover_rec.flag = 0;
5938 /* Set state for FCF fast failover */
5939 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
5940 spin_unlock_irq(&phba->hbalock);
5942 /* Scan FCF table from the first entry to re-discover SAN */
5943 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5944 "2777 Start post-quiescent FCF table scan\n");
5945 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
5947 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5948 "2747 Issue FCF scan read FCF mailbox "
5949 "command failed 0x%x\n", rc);
5953 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
5954 * @phba: pointer to lpfc hba data structure.
5955 * @dev_grp: The HBA PCI-Device group number.
5957 * This routine is invoked to set up the per HBA PCI-Device group function
5958 * API jump table entries.
5960 * Return: 0 if success, otherwise -ENODEV
5963 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5967 /* Set up lpfc PCI-device group */
5968 phba->pci_dev_grp = dev_grp;
5970 /* The LPFC_PCI_DEV_OC uses SLI4 */
5971 if (dev_grp == LPFC_PCI_DEV_OC)
5972 phba->sli_rev = LPFC_SLI_REV4;
5974 /* Set up device INIT API function jump table */
5975 rc = lpfc_init_api_table_setup(phba, dev_grp);
5978 /* Set up SCSI API function jump table */
5979 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
5982 /* Set up SLI API function jump table */
5983 rc = lpfc_sli_api_table_setup(phba, dev_grp);
5986 /* Set up MBOX API function jump table */
5987 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
5995 * lpfc_log_intr_mode - Log the active interrupt mode
5996 * @phba: pointer to lpfc hba data structure.
5997 * @intr_mode: active interrupt mode adopted.
5999 * This routine it invoked to log the currently used active interrupt mode
6002 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
6004 switch (intr_mode) {
6006 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6007 "0470 Enable INTx interrupt mode.\n");
6010 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6011 "0481 Enabled MSI interrupt mode.\n");
6014 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6015 "0480 Enabled MSI-X interrupt mode.\n");
6018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6019 "0482 Illegal interrupt mode.\n");
6026 * lpfc_cpumask_of_node_init - initalizes cpumask of phba's NUMA node
6027 * @phba: Pointer to HBA context object.
6031 lpfc_cpumask_of_node_init(struct lpfc_hba *phba)
6033 unsigned int cpu, numa_node;
6034 struct cpumask *numa_mask = &phba->sli4_hba.numa_mask;
6036 cpumask_clear(numa_mask);
6038 /* Check if we're a NUMA architecture */
6039 numa_node = dev_to_node(&phba->pcidev->dev);
6040 if (numa_node == NUMA_NO_NODE)
6043 for_each_possible_cpu(cpu)
6044 if (cpu_to_node(cpu) == numa_node)
6045 cpumask_set_cpu(cpu, numa_mask);
6049 * lpfc_enable_pci_dev - Enable a generic PCI device.
6050 * @phba: pointer to lpfc hba data structure.
6052 * This routine is invoked to enable the PCI device that is common to all
6057 * other values - error
6060 lpfc_enable_pci_dev(struct lpfc_hba *phba)
6062 struct pci_dev *pdev;
6064 /* Obtain PCI device reference */
6068 pdev = phba->pcidev;
6069 /* Enable PCI device */
6070 if (pci_enable_device_mem(pdev))
6072 /* Request PCI resource for the device */
6073 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
6074 goto out_disable_device;
6075 /* Set up device as PCI master and save state for EEH */
6076 pci_set_master(pdev);
6077 pci_try_set_mwi(pdev);
6078 pci_save_state(pdev);
6080 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6081 if (pci_is_pcie(pdev))
6082 pdev->needs_freset = 1;
6087 pci_disable_device(pdev);
6089 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6090 "1401 Failed to enable pci device\n");
6095 * lpfc_disable_pci_dev - Disable a generic PCI device.
6096 * @phba: pointer to lpfc hba data structure.
6098 * This routine is invoked to disable the PCI device that is common to all
6102 lpfc_disable_pci_dev(struct lpfc_hba *phba)
6104 struct pci_dev *pdev;
6106 /* Obtain PCI device reference */
6110 pdev = phba->pcidev;
6111 /* Release PCI resource and disable PCI device */
6112 pci_release_mem_regions(pdev);
6113 pci_disable_device(pdev);
6119 * lpfc_reset_hba - Reset a hba
6120 * @phba: pointer to lpfc hba data structure.
6122 * This routine is invoked to reset a hba device. It brings the HBA
6123 * offline, performs a board restart, and then brings the board back
6124 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
6125 * on outstanding mailbox commands.
6128 lpfc_reset_hba(struct lpfc_hba *phba)
6130 /* If resets are disabled then set error state and return. */
6131 if (!phba->cfg_enable_hba_reset) {
6132 phba->link_state = LPFC_HBA_ERROR;
6135 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
6136 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6138 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
6140 lpfc_sli_brdrestart(phba);
6142 lpfc_unblock_mgmt_io(phba);
6146 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
6147 * @phba: pointer to lpfc hba data structure.
6149 * This function enables the PCI SR-IOV virtual functions to a physical
6150 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6151 * enable the number of virtual functions to the physical function. As
6152 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6153 * API call does not considered as an error condition for most of the device.
6156 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
6158 struct pci_dev *pdev = phba->pcidev;
6162 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6166 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
6171 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
6172 * @phba: pointer to lpfc hba data structure.
6173 * @nr_vfn: number of virtual functions to be enabled.
6175 * This function enables the PCI SR-IOV virtual functions to a physical
6176 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6177 * enable the number of virtual functions to the physical function. As
6178 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6179 * API call does not considered as an error condition for most of the device.
6182 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
6184 struct pci_dev *pdev = phba->pcidev;
6185 uint16_t max_nr_vfn;
6188 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
6189 if (nr_vfn > max_nr_vfn) {
6190 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6191 "3057 Requested vfs (%d) greater than "
6192 "supported vfs (%d)", nr_vfn, max_nr_vfn);
6196 rc = pci_enable_sriov(pdev, nr_vfn);
6198 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6199 "2806 Failed to enable sriov on this device "
6200 "with vfn number nr_vf:%d, rc:%d\n",
6203 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6204 "2807 Successful enable sriov on this device "
6205 "with vfn number nr_vf:%d\n", nr_vfn);
6210 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
6211 * @phba: pointer to lpfc hba data structure.
6213 * This routine is invoked to set up the driver internal resources before the
6214 * device specific resource setup to support the HBA device it attached to.
6218 * other values - error
6221 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
6223 struct lpfc_sli *psli = &phba->sli;
6226 * Driver resources common to all SLI revisions
6228 atomic_set(&phba->fast_event_count, 0);
6229 spin_lock_init(&phba->hbalock);
6231 /* Initialize ndlp management spinlock */
6232 spin_lock_init(&phba->ndlp_lock);
6234 /* Initialize port_list spinlock */
6235 spin_lock_init(&phba->port_list_lock);
6236 INIT_LIST_HEAD(&phba->port_list);
6238 INIT_LIST_HEAD(&phba->work_list);
6239 init_waitqueue_head(&phba->wait_4_mlo_m_q);
6241 /* Initialize the wait queue head for the kernel thread */
6242 init_waitqueue_head(&phba->work_waitq);
6244 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6245 "1403 Protocols supported %s %s %s\n",
6246 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
6248 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
6250 (phba->nvmet_support ? "NVMET" : " "));
6252 /* Initialize the IO buffer list used by driver for SLI3 SCSI */
6253 spin_lock_init(&phba->scsi_buf_list_get_lock);
6254 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
6255 spin_lock_init(&phba->scsi_buf_list_put_lock);
6256 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
6258 /* Initialize the fabric iocb list */
6259 INIT_LIST_HEAD(&phba->fabric_iocb_list);
6261 /* Initialize list to save ELS buffers */
6262 INIT_LIST_HEAD(&phba->elsbuf);
6264 /* Initialize FCF connection rec list */
6265 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
6267 /* Initialize OAS configuration list */
6268 spin_lock_init(&phba->devicelock);
6269 INIT_LIST_HEAD(&phba->luns);
6271 /* MBOX heartbeat timer */
6272 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
6273 /* Fabric block timer */
6274 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
6275 /* EA polling mode timer */
6276 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
6277 /* Heartbeat timer */
6278 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
6280 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
6286 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
6287 * @phba: pointer to lpfc hba data structure.
6289 * This routine is invoked to set up the driver internal resources specific to
6290 * support the SLI-3 HBA device it attached to.
6294 * other values - error
6297 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
6302 * Initialize timers used by driver
6305 /* FCP polling mode timer */
6306 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
6308 /* Host attention work mask setup */
6309 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
6310 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
6312 /* Get all the module params for configuring this host */
6313 lpfc_get_cfgparam(phba);
6314 /* Set up phase-1 common device driver resources */
6316 rc = lpfc_setup_driver_resource_phase1(phba);
6320 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
6321 phba->menlo_flag |= HBA_MENLO_SUPPORT;
6322 /* check for menlo minimum sg count */
6323 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
6324 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
6327 if (!phba->sli.sli3_ring)
6328 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
6329 sizeof(struct lpfc_sli_ring),
6331 if (!phba->sli.sli3_ring)
6335 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
6336 * used to create the sg_dma_buf_pool must be dynamically calculated.
6339 if (phba->sli_rev == LPFC_SLI_REV4)
6340 entry_sz = sizeof(struct sli4_sge);
6342 entry_sz = sizeof(struct ulp_bde64);
6344 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
6345 if (phba->cfg_enable_bg) {
6347 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
6348 * the FCP rsp, and a BDE for each. Sice we have no control
6349 * over how many protection data segments the SCSI Layer
6350 * will hand us (ie: there could be one for every block
6351 * in the IO), we just allocate enough BDEs to accomidate
6352 * our max amount and we need to limit lpfc_sg_seg_cnt to
6353 * minimize the risk of running out.
6355 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6356 sizeof(struct fcp_rsp) +
6357 (LPFC_MAX_SG_SEG_CNT * entry_sz);
6359 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
6360 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
6362 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
6363 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
6366 * The scsi_buf for a regular I/O will hold the FCP cmnd,
6367 * the FCP rsp, a BDE for each, and a BDE for up to
6368 * cfg_sg_seg_cnt data segments.
6370 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6371 sizeof(struct fcp_rsp) +
6372 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
6374 /* Total BDEs in BPL for scsi_sg_list */
6375 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
6378 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6379 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
6380 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6381 phba->cfg_total_seg_cnt);
6383 phba->max_vpi = LPFC_MAX_VPI;
6384 /* This will be set to correct value after config_port mbox */
6385 phba->max_vports = 0;
6388 * Initialize the SLI Layer to run with lpfc HBAs.
6390 lpfc_sli_setup(phba);
6391 lpfc_sli_queue_init(phba);
6393 /* Allocate device driver memory */
6394 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
6397 phba->lpfc_sg_dma_buf_pool =
6398 dma_pool_create("lpfc_sg_dma_buf_pool",
6399 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
6402 if (!phba->lpfc_sg_dma_buf_pool)
6405 phba->lpfc_cmd_rsp_buf_pool =
6406 dma_pool_create("lpfc_cmd_rsp_buf_pool",
6408 sizeof(struct fcp_cmnd) +
6409 sizeof(struct fcp_rsp),
6412 if (!phba->lpfc_cmd_rsp_buf_pool)
6413 goto fail_free_dma_buf_pool;
6416 * Enable sr-iov virtual functions if supported and configured
6417 * through the module parameter.
6419 if (phba->cfg_sriov_nr_virtfn > 0) {
6420 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6421 phba->cfg_sriov_nr_virtfn);
6423 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6424 "2808 Requested number of SR-IOV "
6425 "virtual functions (%d) is not "
6427 phba->cfg_sriov_nr_virtfn);
6428 phba->cfg_sriov_nr_virtfn = 0;
6434 fail_free_dma_buf_pool:
6435 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6436 phba->lpfc_sg_dma_buf_pool = NULL;
6438 lpfc_mem_free(phba);
6443 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
6444 * @phba: pointer to lpfc hba data structure.
6446 * This routine is invoked to unset the driver internal resources set up
6447 * specific for supporting the SLI-3 HBA device it attached to.
6450 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
6452 /* Free device driver memory allocated */
6453 lpfc_mem_free_all(phba);
6459 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
6460 * @phba: pointer to lpfc hba data structure.
6462 * This routine is invoked to set up the driver internal resources specific to
6463 * support the SLI-4 HBA device it attached to.
6467 * other values - error
6470 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6472 LPFC_MBOXQ_t *mboxq;
6474 int rc, i, max_buf_size;
6475 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
6476 struct lpfc_mqe *mqe;
6483 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
6484 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
6485 phba->sli4_hba.curr_disp_cpu = 0;
6486 lpfc_cpumask_of_node_init(phba);
6488 /* Get all the module params for configuring this host */
6489 lpfc_get_cfgparam(phba);
6491 /* Set up phase-1 common device driver resources */
6492 rc = lpfc_setup_driver_resource_phase1(phba);
6496 /* Before proceed, wait for POST done and device ready */
6497 rc = lpfc_sli4_post_status_check(phba);
6501 /* Allocate all driver workqueues here */
6503 /* The lpfc_wq workqueue for deferred irq use */
6504 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6507 * Initialize timers used by driver
6510 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
6512 /* FCF rediscover timer */
6513 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
6516 * Control structure for handling external multi-buffer mailbox
6517 * command pass-through.
6519 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
6520 sizeof(struct lpfc_mbox_ext_buf_ctx));
6521 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
6523 phba->max_vpi = LPFC_MAX_VPI;
6525 /* This will be set to correct value after the read_config mbox */
6526 phba->max_vports = 0;
6528 /* Program the default value of vlan_id and fc_map */
6529 phba->valid_vlan = 0;
6530 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
6531 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
6532 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
6535 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
6536 * we will associate a new ring, for each EQ/CQ/WQ tuple.
6537 * The WQ create will allocate the ring.
6540 /* Initialize buffer queue management fields */
6541 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
6542 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
6543 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
6546 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
6548 /* Initialize the Abort buffer list used by driver */
6549 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
6550 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
6552 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6553 /* Initialize the Abort nvme buffer list used by driver */
6554 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
6555 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
6556 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
6557 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
6558 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
6561 /* This abort list used by worker thread */
6562 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
6563 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
6566 * Initialize driver internal slow-path work queues
6569 /* Driver internel slow-path CQ Event pool */
6570 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
6571 /* Response IOCB work queue list */
6572 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
6573 /* Asynchronous event CQ Event work queue list */
6574 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
6575 /* Fast-path XRI aborted CQ Event work queue list */
6576 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
6577 /* Slow-path XRI aborted CQ Event work queue list */
6578 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
6579 /* Receive queue CQ Event work queue list */
6580 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
6582 /* Initialize extent block lists. */
6583 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
6584 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
6585 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
6586 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
6588 /* Initialize mboxq lists. If the early init routines fail
6589 * these lists need to be correctly initialized.
6591 INIT_LIST_HEAD(&phba->sli.mboxq);
6592 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
6594 /* initialize optic_state to 0xFF */
6595 phba->sli4_hba.lnk_info.optic_state = 0xff;
6597 /* Allocate device driver memory */
6598 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
6602 /* IF Type 2 ports get initialized now. */
6603 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
6604 LPFC_SLI_INTF_IF_TYPE_2) {
6605 rc = lpfc_pci_function_reset(phba);
6610 phba->temp_sensor_support = 1;
6613 /* Create the bootstrap mailbox command */
6614 rc = lpfc_create_bootstrap_mbox(phba);
6618 /* Set up the host's endian order with the device. */
6619 rc = lpfc_setup_endian_order(phba);
6621 goto out_free_bsmbx;
6623 /* Set up the hba's configuration parameters. */
6624 rc = lpfc_sli4_read_config(phba);
6626 goto out_free_bsmbx;
6627 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
6629 goto out_free_bsmbx;
6631 /* IF Type 0 ports get initialized now. */
6632 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6633 LPFC_SLI_INTF_IF_TYPE_0) {
6634 rc = lpfc_pci_function_reset(phba);
6636 goto out_free_bsmbx;
6639 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6643 goto out_free_bsmbx;
6646 /* Check for NVMET being configured */
6647 phba->nvmet_support = 0;
6648 if (lpfc_enable_nvmet_cnt) {
6650 /* First get WWN of HBA instance */
6651 lpfc_read_nv(phba, mboxq);
6652 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6653 if (rc != MBX_SUCCESS) {
6654 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6655 "6016 Mailbox failed , mbxCmd x%x "
6656 "READ_NV, mbxStatus x%x\n",
6657 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6658 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6659 mempool_free(mboxq, phba->mbox_mem_pool);
6661 goto out_free_bsmbx;
6664 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
6666 wwn = cpu_to_be64(wwn);
6667 phba->sli4_hba.wwnn.u.name = wwn;
6668 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
6670 /* wwn is WWPN of HBA instance */
6671 wwn = cpu_to_be64(wwn);
6672 phba->sli4_hba.wwpn.u.name = wwn;
6674 /* Check to see if it matches any module parameter */
6675 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
6676 if (wwn == lpfc_enable_nvmet[i]) {
6677 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
6678 if (lpfc_nvmet_mem_alloc(phba))
6681 phba->nvmet_support = 1; /* a match */
6683 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6684 "6017 NVME Target %016llx\n",
6687 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6688 "6021 Can't enable NVME Target."
6689 " NVME_TARGET_FC infrastructure"
6690 " is not in kernel\n");
6692 /* Not supported for NVMET */
6693 phba->cfg_xri_rebalancing = 0;
6699 lpfc_nvme_mod_param_dep(phba);
6701 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
6702 lpfc_supported_pages(mboxq);
6703 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6705 mqe = &mboxq->u.mqe;
6706 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
6707 LPFC_MAX_SUPPORTED_PAGES);
6708 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
6709 switch (pn_page[i]) {
6710 case LPFC_SLI4_PARAMETERS:
6711 phba->sli4_hba.pc_sli4_params.supported = 1;
6717 /* Read the port's SLI4 Parameters capabilities if supported. */
6718 if (phba->sli4_hba.pc_sli4_params.supported)
6719 rc = lpfc_pc_sli4_params_get(phba, mboxq);
6721 mempool_free(mboxq, phba->mbox_mem_pool);
6723 goto out_free_bsmbx;
6728 * Get sli4 parameters that override parameters from Port capabilities.
6729 * If this call fails, it isn't critical unless the SLI4 parameters come
6732 rc = lpfc_get_sli4_parameters(phba, mboxq);
6734 if_type = bf_get(lpfc_sli_intf_if_type,
6735 &phba->sli4_hba.sli_intf);
6736 if_fam = bf_get(lpfc_sli_intf_sli_family,
6737 &phba->sli4_hba.sli_intf);
6738 if (phba->sli4_hba.extents_in_use &&
6739 phba->sli4_hba.rpi_hdrs_in_use) {
6740 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6741 "2999 Unsupported SLI4 Parameters "
6742 "Extents and RPI headers enabled.\n");
6743 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6744 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
6745 mempool_free(mboxq, phba->mbox_mem_pool);
6747 goto out_free_bsmbx;
6750 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6751 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
6752 mempool_free(mboxq, phba->mbox_mem_pool);
6754 goto out_free_bsmbx;
6759 * 1 for cmd, 1 for rsp, NVME adds an extra one
6760 * for boundary conditions in its max_sgl_segment template.
6763 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
6767 * It doesn't matter what family our adapter is in, we are
6768 * limited to 2 Pages, 512 SGEs, for our SGL.
6769 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
6771 max_buf_size = (2 * SLI4_PAGE_SIZE);
6774 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
6775 * used to create the sg_dma_buf_pool must be calculated.
6777 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
6778 /* Both cfg_enable_bg and cfg_external_dif code paths */
6781 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
6782 * the FCP rsp, and a SGE. Sice we have no control
6783 * over how many protection segments the SCSI Layer
6784 * will hand us (ie: there could be one for every block
6785 * in the IO), just allocate enough SGEs to accomidate
6786 * our max amount and we need to limit lpfc_sg_seg_cnt
6787 * to minimize the risk of running out.
6789 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6790 sizeof(struct fcp_rsp) + max_buf_size;
6792 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
6793 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
6796 * If supporting DIF, reduce the seg count for scsi to
6797 * allow room for the DIF sges.
6799 if (phba->cfg_enable_bg &&
6800 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
6801 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
6803 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6807 * The scsi_buf for a regular I/O holds the FCP cmnd,
6808 * the FCP rsp, a SGE for each, and a SGE for up to
6809 * cfg_sg_seg_cnt data segments.
6811 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6812 sizeof(struct fcp_rsp) +
6813 ((phba->cfg_sg_seg_cnt + extra) *
6814 sizeof(struct sli4_sge));
6816 /* Total SGEs for scsi_sg_list */
6817 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
6818 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6821 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
6822 * need to post 1 page for the SGL.
6826 if (phba->cfg_xpsgl && !phba->nvmet_support)
6827 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
6828 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
6829 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
6831 phba->cfg_sg_dma_buf_size =
6832 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
6834 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
6835 sizeof(struct sli4_sge);
6837 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
6838 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6839 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
6840 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
6841 "6300 Reducing NVME sg segment "
6843 LPFC_MAX_NVME_SEG_CNT);
6844 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
6846 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
6849 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6850 "9087 sg_seg_cnt:%d dmabuf_size:%d "
6851 "total:%d scsi:%d nvme:%d\n",
6852 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6853 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
6854 phba->cfg_nvme_seg_cnt);
6856 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
6857 i = phba->cfg_sg_dma_buf_size;
6861 phba->lpfc_sg_dma_buf_pool =
6862 dma_pool_create("lpfc_sg_dma_buf_pool",
6864 phba->cfg_sg_dma_buf_size,
6866 if (!phba->lpfc_sg_dma_buf_pool)
6867 goto out_free_bsmbx;
6869 phba->lpfc_cmd_rsp_buf_pool =
6870 dma_pool_create("lpfc_cmd_rsp_buf_pool",
6872 sizeof(struct fcp_cmnd) +
6873 sizeof(struct fcp_rsp),
6875 if (!phba->lpfc_cmd_rsp_buf_pool)
6876 goto out_free_sg_dma_buf;
6878 mempool_free(mboxq, phba->mbox_mem_pool);
6880 /* Verify OAS is supported */
6881 lpfc_sli4_oas_verify(phba);
6883 /* Verify RAS support on adapter */
6884 lpfc_sli4_ras_init(phba);
6886 /* Verify all the SLI4 queues */
6887 rc = lpfc_sli4_queue_verify(phba);
6889 goto out_free_cmd_rsp_buf;
6891 /* Create driver internal CQE event pool */
6892 rc = lpfc_sli4_cq_event_pool_create(phba);
6894 goto out_free_cmd_rsp_buf;
6896 /* Initialize sgl lists per host */
6897 lpfc_init_sgl_list(phba);
6899 /* Allocate and initialize active sgl array */
6900 rc = lpfc_init_active_sgl_array(phba);
6902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6903 "1430 Failed to initialize sgl list.\n");
6904 goto out_destroy_cq_event_pool;
6906 rc = lpfc_sli4_init_rpi_hdrs(phba);
6908 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6909 "1432 Failed to initialize rpi headers.\n");
6910 goto out_free_active_sgl;
6913 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
6914 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
6915 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
6917 if (!phba->fcf.fcf_rr_bmask) {
6918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6919 "2759 Failed allocate memory for FCF round "
6920 "robin failover bmask\n");
6922 goto out_remove_rpi_hdrs;
6925 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
6926 sizeof(struct lpfc_hba_eq_hdl),
6928 if (!phba->sli4_hba.hba_eq_hdl) {
6929 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6930 "2572 Failed allocate memory for "
6931 "fast-path per-EQ handle array\n");
6933 goto out_free_fcf_rr_bmask;
6936 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
6937 sizeof(struct lpfc_vector_map_info),
6939 if (!phba->sli4_hba.cpu_map) {
6940 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6941 "3327 Failed allocate memory for msi-x "
6942 "interrupt vector mapping\n");
6944 goto out_free_hba_eq_hdl;
6947 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
6948 if (!phba->sli4_hba.eq_info) {
6949 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6950 "3321 Failed allocation for per_cpu stats\n");
6952 goto out_free_hba_cpu_map;
6955 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
6956 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
6957 if (!phba->sli4_hba.c_stat) {
6958 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6959 "3332 Failed allocating per cpu hdwq stats\n");
6961 goto out_free_hba_eq_info;
6966 * Enable sr-iov virtual functions if supported and configured
6967 * through the module parameter.
6969 if (phba->cfg_sriov_nr_virtfn > 0) {
6970 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6971 phba->cfg_sriov_nr_virtfn);
6973 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6974 "3020 Requested number of SR-IOV "
6975 "virtual functions (%d) is not "
6977 phba->cfg_sriov_nr_virtfn);
6978 phba->cfg_sriov_nr_virtfn = 0;
6984 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
6985 out_free_hba_eq_info:
6986 free_percpu(phba->sli4_hba.eq_info);
6988 out_free_hba_cpu_map:
6989 kfree(phba->sli4_hba.cpu_map);
6990 out_free_hba_eq_hdl:
6991 kfree(phba->sli4_hba.hba_eq_hdl);
6992 out_free_fcf_rr_bmask:
6993 kfree(phba->fcf.fcf_rr_bmask);
6994 out_remove_rpi_hdrs:
6995 lpfc_sli4_remove_rpi_hdrs(phba);
6996 out_free_active_sgl:
6997 lpfc_free_active_sgl(phba);
6998 out_destroy_cq_event_pool:
6999 lpfc_sli4_cq_event_pool_destroy(phba);
7000 out_free_cmd_rsp_buf:
7001 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
7002 phba->lpfc_cmd_rsp_buf_pool = NULL;
7003 out_free_sg_dma_buf:
7004 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7005 phba->lpfc_sg_dma_buf_pool = NULL;
7007 lpfc_destroy_bootstrap_mbox(phba);
7009 lpfc_mem_free(phba);
7014 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
7015 * @phba: pointer to lpfc hba data structure.
7017 * This routine is invoked to unset the driver internal resources set up
7018 * specific for supporting the SLI-4 HBA device it attached to.
7021 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
7023 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
7025 free_percpu(phba->sli4_hba.eq_info);
7026 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7027 free_percpu(phba->sli4_hba.c_stat);
7030 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
7031 kfree(phba->sli4_hba.cpu_map);
7032 phba->sli4_hba.num_possible_cpu = 0;
7033 phba->sli4_hba.num_present_cpu = 0;
7034 phba->sli4_hba.curr_disp_cpu = 0;
7035 cpumask_clear(&phba->sli4_hba.numa_mask);
7037 /* Free memory allocated for fast-path work queue handles */
7038 kfree(phba->sli4_hba.hba_eq_hdl);
7040 /* Free the allocated rpi headers. */
7041 lpfc_sli4_remove_rpi_hdrs(phba);
7042 lpfc_sli4_remove_rpis(phba);
7044 /* Free eligible FCF index bmask */
7045 kfree(phba->fcf.fcf_rr_bmask);
7047 /* Free the ELS sgl list */
7048 lpfc_free_active_sgl(phba);
7049 lpfc_free_els_sgl_list(phba);
7050 lpfc_free_nvmet_sgl_list(phba);
7052 /* Free the completion queue EQ event pool */
7053 lpfc_sli4_cq_event_release_all(phba);
7054 lpfc_sli4_cq_event_pool_destroy(phba);
7056 /* Release resource identifiers. */
7057 lpfc_sli4_dealloc_resource_identifiers(phba);
7059 /* Free the bsmbx region. */
7060 lpfc_destroy_bootstrap_mbox(phba);
7062 /* Free the SLI Layer memory with SLI4 HBAs */
7063 lpfc_mem_free_all(phba);
7065 /* Free the current connect table */
7066 list_for_each_entry_safe(conn_entry, next_conn_entry,
7067 &phba->fcf_conn_rec_list, list) {
7068 list_del_init(&conn_entry->list);
7076 * lpfc_init_api_table_setup - Set up init api function jump table
7077 * @phba: The hba struct for which this call is being executed.
7078 * @dev_grp: The HBA PCI-Device group number.
7080 * This routine sets up the device INIT interface API function jump table
7083 * Returns: 0 - success, -ENODEV - failure.
7086 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7088 phba->lpfc_hba_init_link = lpfc_hba_init_link;
7089 phba->lpfc_hba_down_link = lpfc_hba_down_link;
7090 phba->lpfc_selective_reset = lpfc_selective_reset;
7092 case LPFC_PCI_DEV_LP:
7093 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
7094 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
7095 phba->lpfc_stop_port = lpfc_stop_port_s3;
7097 case LPFC_PCI_DEV_OC:
7098 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
7099 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
7100 phba->lpfc_stop_port = lpfc_stop_port_s4;
7103 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7104 "1431 Invalid HBA PCI-device group: 0x%x\n",
7113 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
7114 * @phba: pointer to lpfc hba data structure.
7116 * This routine is invoked to set up the driver internal resources after the
7117 * device specific resource setup to support the HBA device it attached to.
7121 * other values - error
7124 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
7128 /* Startup the kernel thread for this host adapter. */
7129 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7130 "lpfc_worker_%d", phba->brd_no);
7131 if (IS_ERR(phba->worker_thread)) {
7132 error = PTR_ERR(phba->worker_thread);
7140 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
7141 * @phba: pointer to lpfc hba data structure.
7143 * This routine is invoked to unset the driver internal resources set up after
7144 * the device specific resource setup for supporting the HBA device it
7148 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
7151 flush_workqueue(phba->wq);
7152 destroy_workqueue(phba->wq);
7156 /* Stop kernel worker thread */
7157 if (phba->worker_thread)
7158 kthread_stop(phba->worker_thread);
7162 * lpfc_free_iocb_list - Free iocb list.
7163 * @phba: pointer to lpfc hba data structure.
7165 * This routine is invoked to free the driver's IOCB list and memory.
7168 lpfc_free_iocb_list(struct lpfc_hba *phba)
7170 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
7172 spin_lock_irq(&phba->hbalock);
7173 list_for_each_entry_safe(iocbq_entry, iocbq_next,
7174 &phba->lpfc_iocb_list, list) {
7175 list_del(&iocbq_entry->list);
7177 phba->total_iocbq_bufs--;
7179 spin_unlock_irq(&phba->hbalock);
7185 * lpfc_init_iocb_list - Allocate and initialize iocb list.
7186 * @phba: pointer to lpfc hba data structure.
7188 * This routine is invoked to allocate and initizlize the driver's IOCB
7189 * list and set up the IOCB tag array accordingly.
7193 * other values - error
7196 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
7198 struct lpfc_iocbq *iocbq_entry = NULL;
7202 /* Initialize and populate the iocb list per host. */
7203 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
7204 for (i = 0; i < iocb_count; i++) {
7205 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
7206 if (iocbq_entry == NULL) {
7207 printk(KERN_ERR "%s: only allocated %d iocbs of "
7208 "expected %d count. Unloading driver.\n",
7209 __func__, i, iocb_count);
7210 goto out_free_iocbq;
7213 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
7216 printk(KERN_ERR "%s: failed to allocate IOTAG. "
7217 "Unloading driver.\n", __func__);
7218 goto out_free_iocbq;
7220 iocbq_entry->sli4_lxritag = NO_XRI;
7221 iocbq_entry->sli4_xritag = NO_XRI;
7223 spin_lock_irq(&phba->hbalock);
7224 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
7225 phba->total_iocbq_bufs++;
7226 spin_unlock_irq(&phba->hbalock);
7232 lpfc_free_iocb_list(phba);
7238 * lpfc_free_sgl_list - Free a given sgl list.
7239 * @phba: pointer to lpfc hba data structure.
7240 * @sglq_list: pointer to the head of sgl list.
7242 * This routine is invoked to free a give sgl list and memory.
7245 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
7247 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7249 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
7250 list_del(&sglq_entry->list);
7251 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
7257 * lpfc_free_els_sgl_list - Free els sgl list.
7258 * @phba: pointer to lpfc hba data structure.
7260 * This routine is invoked to free the driver's els sgl list and memory.
7263 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
7265 LIST_HEAD(sglq_list);
7267 /* Retrieve all els sgls from driver list */
7268 spin_lock_irq(&phba->hbalock);
7269 spin_lock(&phba->sli4_hba.sgl_list_lock);
7270 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
7271 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7272 spin_unlock_irq(&phba->hbalock);
7274 /* Now free the sgl list */
7275 lpfc_free_sgl_list(phba, &sglq_list);
7279 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
7280 * @phba: pointer to lpfc hba data structure.
7282 * This routine is invoked to free the driver's nvmet sgl list and memory.
7285 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
7287 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7288 LIST_HEAD(sglq_list);
7290 /* Retrieve all nvmet sgls from driver list */
7291 spin_lock_irq(&phba->hbalock);
7292 spin_lock(&phba->sli4_hba.sgl_list_lock);
7293 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
7294 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7295 spin_unlock_irq(&phba->hbalock);
7297 /* Now free the sgl list */
7298 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
7299 list_del(&sglq_entry->list);
7300 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
7304 /* Update the nvmet_xri_cnt to reflect no current sgls.
7305 * The next initialization cycle sets the count and allocates
7306 * the sgls over again.
7308 phba->sli4_hba.nvmet_xri_cnt = 0;
7312 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
7313 * @phba: pointer to lpfc hba data structure.
7315 * This routine is invoked to allocate the driver's active sgl memory.
7316 * This array will hold the sglq_entry's for active IOs.
7319 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
7322 size = sizeof(struct lpfc_sglq *);
7323 size *= phba->sli4_hba.max_cfg_param.max_xri;
7325 phba->sli4_hba.lpfc_sglq_active_list =
7326 kzalloc(size, GFP_KERNEL);
7327 if (!phba->sli4_hba.lpfc_sglq_active_list)
7333 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
7334 * @phba: pointer to lpfc hba data structure.
7336 * This routine is invoked to walk through the array of active sglq entries
7337 * and free all of the resources.
7338 * This is just a place holder for now.
7341 lpfc_free_active_sgl(struct lpfc_hba *phba)
7343 kfree(phba->sli4_hba.lpfc_sglq_active_list);
7347 * lpfc_init_sgl_list - Allocate and initialize sgl list.
7348 * @phba: pointer to lpfc hba data structure.
7350 * This routine is invoked to allocate and initizlize the driver's sgl
7351 * list and set up the sgl xritag tag array accordingly.
7355 lpfc_init_sgl_list(struct lpfc_hba *phba)
7357 /* Initialize and populate the sglq list per host/VF. */
7358 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
7359 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7360 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
7361 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7363 /* els xri-sgl book keeping */
7364 phba->sli4_hba.els_xri_cnt = 0;
7366 /* nvme xri-buffer book keeping */
7367 phba->sli4_hba.io_xri_cnt = 0;
7371 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
7372 * @phba: pointer to lpfc hba data structure.
7374 * This routine is invoked to post rpi header templates to the
7375 * port for those SLI4 ports that do not support extents. This routine
7376 * posts a PAGE_SIZE memory region to the port to hold up to
7377 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
7378 * and should be called only when interrupts are disabled.
7382 * -ERROR - otherwise.
7385 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
7388 struct lpfc_rpi_hdr *rpi_hdr;
7390 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
7391 if (!phba->sli4_hba.rpi_hdrs_in_use)
7393 if (phba->sli4_hba.extents_in_use)
7396 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
7398 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7399 "0391 Error during rpi post operation\n");
7400 lpfc_sli4_remove_rpis(phba);
7408 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
7409 * @phba: pointer to lpfc hba data structure.
7411 * This routine is invoked to allocate a single 4KB memory region to
7412 * support rpis and stores them in the phba. This single region
7413 * provides support for up to 64 rpis. The region is used globally
7417 * A valid rpi hdr on success.
7418 * A NULL pointer on any failure.
7420 struct lpfc_rpi_hdr *
7421 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
7423 uint16_t rpi_limit, curr_rpi_range;
7424 struct lpfc_dmabuf *dmabuf;
7425 struct lpfc_rpi_hdr *rpi_hdr;
7428 * If the SLI4 port supports extents, posting the rpi header isn't
7429 * required. Set the expected maximum count and let the actual value
7430 * get set when extents are fully allocated.
7432 if (!phba->sli4_hba.rpi_hdrs_in_use)
7434 if (phba->sli4_hba.extents_in_use)
7437 /* The limit on the logical index is just the max_rpi count. */
7438 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
7440 spin_lock_irq(&phba->hbalock);
7442 * Establish the starting RPI in this header block. The starting
7443 * rpi is normalized to a zero base because the physical rpi is
7446 curr_rpi_range = phba->sli4_hba.next_rpi;
7447 spin_unlock_irq(&phba->hbalock);
7449 /* Reached full RPI range */
7450 if (curr_rpi_range == rpi_limit)
7454 * First allocate the protocol header region for the port. The
7455 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
7457 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7461 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
7462 LPFC_HDR_TEMPLATE_SIZE,
7463 &dmabuf->phys, GFP_KERNEL);
7464 if (!dmabuf->virt) {
7466 goto err_free_dmabuf;
7469 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
7471 goto err_free_coherent;
7474 /* Save the rpi header data for cleanup later. */
7475 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
7477 goto err_free_coherent;
7479 rpi_hdr->dmabuf = dmabuf;
7480 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
7481 rpi_hdr->page_count = 1;
7482 spin_lock_irq(&phba->hbalock);
7484 /* The rpi_hdr stores the logical index only. */
7485 rpi_hdr->start_rpi = curr_rpi_range;
7486 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
7487 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
7489 spin_unlock_irq(&phba->hbalock);
7493 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
7494 dmabuf->virt, dmabuf->phys);
7501 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
7502 * @phba: pointer to lpfc hba data structure.
7504 * This routine is invoked to remove all memory resources allocated
7505 * to support rpis for SLI4 ports not supporting extents. This routine
7506 * presumes the caller has released all rpis consumed by fabric or port
7507 * logins and is prepared to have the header pages removed.
7510 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
7512 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
7514 if (!phba->sli4_hba.rpi_hdrs_in_use)
7517 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
7518 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
7519 list_del(&rpi_hdr->list);
7520 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
7521 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
7522 kfree(rpi_hdr->dmabuf);
7526 /* There are no rpis available to the port now. */
7527 phba->sli4_hba.next_rpi = 0;
7531 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
7532 * @pdev: pointer to pci device data structure.
7534 * This routine is invoked to allocate the driver hba data structure for an
7535 * HBA device. If the allocation is successful, the phba reference to the
7536 * PCI device data structure is set.
7539 * pointer to @phba - successful
7542 static struct lpfc_hba *
7543 lpfc_hba_alloc(struct pci_dev *pdev)
7545 struct lpfc_hba *phba;
7547 /* Allocate memory for HBA structure */
7548 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
7550 dev_err(&pdev->dev, "failed to allocate hba struct\n");
7554 /* Set reference to PCI device in HBA structure */
7555 phba->pcidev = pdev;
7557 /* Assign an unused board number */
7558 phba->brd_no = lpfc_get_instance();
7559 if (phba->brd_no < 0) {
7563 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
7565 spin_lock_init(&phba->ct_ev_lock);
7566 INIT_LIST_HEAD(&phba->ct_ev_waiters);
7572 * lpfc_hba_free - Free driver hba data structure with a device.
7573 * @phba: pointer to lpfc hba data structure.
7575 * This routine is invoked to free the driver hba data structure with an
7579 lpfc_hba_free(struct lpfc_hba *phba)
7581 if (phba->sli_rev == LPFC_SLI_REV4)
7582 kfree(phba->sli4_hba.hdwq);
7584 /* Release the driver assigned board number */
7585 idr_remove(&lpfc_hba_index, phba->brd_no);
7587 /* Free memory allocated with sli3 rings */
7588 kfree(phba->sli.sli3_ring);
7589 phba->sli.sli3_ring = NULL;
7596 * lpfc_create_shost - Create hba physical port with associated scsi host.
7597 * @phba: pointer to lpfc hba data structure.
7599 * This routine is invoked to create HBA physical port and associate a SCSI
7604 * other values - error
7607 lpfc_create_shost(struct lpfc_hba *phba)
7609 struct lpfc_vport *vport;
7610 struct Scsi_Host *shost;
7612 /* Initialize HBA FC structure */
7613 phba->fc_edtov = FF_DEF_EDTOV;
7614 phba->fc_ratov = FF_DEF_RATOV;
7615 phba->fc_altov = FF_DEF_ALTOV;
7616 phba->fc_arbtov = FF_DEF_ARBTOV;
7618 atomic_set(&phba->sdev_cnt, 0);
7619 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
7623 shost = lpfc_shost_from_vport(vport);
7624 phba->pport = vport;
7626 if (phba->nvmet_support) {
7627 /* Only 1 vport (pport) will support NVME target */
7628 phba->targetport = NULL;
7629 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
7630 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
7631 "6076 NVME Target Found\n");
7634 lpfc_debugfs_initialize(vport);
7635 /* Put reference to SCSI host to driver's device private data */
7636 pci_set_drvdata(phba->pcidev, shost);
7639 * At this point we are fully registered with PSA. In addition,
7640 * any initial discovery should be completed.
7642 vport->load_flag |= FC_ALLOW_FDMI;
7643 if (phba->cfg_enable_SmartSAN ||
7644 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
7646 /* Setup appropriate attribute masks */
7647 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
7648 if (phba->cfg_enable_SmartSAN)
7649 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
7651 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
7657 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
7658 * @phba: pointer to lpfc hba data structure.
7660 * This routine is invoked to destroy HBA physical port and the associated
7664 lpfc_destroy_shost(struct lpfc_hba *phba)
7666 struct lpfc_vport *vport = phba->pport;
7668 /* Destroy physical port that associated with the SCSI host */
7669 destroy_port(vport);
7675 * lpfc_setup_bg - Setup Block guard structures and debug areas.
7676 * @phba: pointer to lpfc hba data structure.
7677 * @shost: the shost to be used to detect Block guard settings.
7679 * This routine sets up the local Block guard protocol settings for @shost.
7680 * This routine also allocates memory for debugging bg buffers.
7683 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
7688 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7689 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7690 "1478 Registering BlockGuard with the "
7693 old_mask = phba->cfg_prot_mask;
7694 old_guard = phba->cfg_prot_guard;
7696 /* Only allow supported values */
7697 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
7698 SHOST_DIX_TYPE0_PROTECTION |
7699 SHOST_DIX_TYPE1_PROTECTION);
7700 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
7701 SHOST_DIX_GUARD_CRC);
7703 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
7704 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
7705 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
7707 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7708 if ((old_mask != phba->cfg_prot_mask) ||
7709 (old_guard != phba->cfg_prot_guard))
7710 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7711 "1475 Registering BlockGuard with the "
7712 "SCSI layer: mask %d guard %d\n",
7713 phba->cfg_prot_mask,
7714 phba->cfg_prot_guard);
7716 scsi_host_set_prot(shost, phba->cfg_prot_mask);
7717 scsi_host_set_guard(shost, phba->cfg_prot_guard);
7719 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7720 "1479 Not Registering BlockGuard with the SCSI "
7721 "layer, Bad protection parameters: %d %d\n",
7722 old_mask, old_guard);
7727 * lpfc_post_init_setup - Perform necessary device post initialization setup.
7728 * @phba: pointer to lpfc hba data structure.
7730 * This routine is invoked to perform all the necessary post initialization
7731 * setup for the device.
7734 lpfc_post_init_setup(struct lpfc_hba *phba)
7736 struct Scsi_Host *shost;
7737 struct lpfc_adapter_event_header adapter_event;
7739 /* Get the default values for Model Name and Description */
7740 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
7743 * hba setup may have changed the hba_queue_depth so we need to
7744 * adjust the value of can_queue.
7746 shost = pci_get_drvdata(phba->pcidev);
7747 shost->can_queue = phba->cfg_hba_queue_depth - 10;
7749 lpfc_host_attrib_init(shost);
7751 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7752 spin_lock_irq(shost->host_lock);
7753 lpfc_poll_start_timer(phba);
7754 spin_unlock_irq(shost->host_lock);
7757 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7758 "0428 Perform SCSI scan\n");
7759 /* Send board arrival event to upper layer */
7760 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
7761 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
7762 fc_host_post_vendor_event(shost, fc_get_event_number(),
7763 sizeof(adapter_event),
7764 (char *) &adapter_event,
7770 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
7771 * @phba: pointer to lpfc hba data structure.
7773 * This routine is invoked to set up the PCI device memory space for device
7774 * with SLI-3 interface spec.
7778 * other values - error
7781 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7783 struct pci_dev *pdev = phba->pcidev;
7784 unsigned long bar0map_len, bar2map_len;
7792 /* Set the device DMA mask size */
7793 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7795 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7800 /* Get the bus address of Bar0 and Bar2 and the number of bytes
7801 * required by each mapping.
7803 phba->pci_bar0_map = pci_resource_start(pdev, 0);
7804 bar0map_len = pci_resource_len(pdev, 0);
7806 phba->pci_bar2_map = pci_resource_start(pdev, 2);
7807 bar2map_len = pci_resource_len(pdev, 2);
7809 /* Map HBA SLIM to a kernel virtual address. */
7810 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
7811 if (!phba->slim_memmap_p) {
7812 dev_printk(KERN_ERR, &pdev->dev,
7813 "ioremap failed for SLIM memory.\n");
7817 /* Map HBA Control Registers to a kernel virtual address. */
7818 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
7819 if (!phba->ctrl_regs_memmap_p) {
7820 dev_printk(KERN_ERR, &pdev->dev,
7821 "ioremap failed for HBA control registers.\n");
7822 goto out_iounmap_slim;
7825 /* Allocate memory for SLI-2 structures */
7826 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7827 &phba->slim2p.phys, GFP_KERNEL);
7828 if (!phba->slim2p.virt)
7831 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
7832 phba->mbox_ext = (phba->slim2p.virt +
7833 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
7834 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
7835 phba->IOCBs = (phba->slim2p.virt +
7836 offsetof(struct lpfc_sli2_slim, IOCBs));
7838 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
7839 lpfc_sli_hbq_size(),
7840 &phba->hbqslimp.phys,
7842 if (!phba->hbqslimp.virt)
7845 hbq_count = lpfc_sli_hbq_count();
7846 ptr = phba->hbqslimp.virt;
7847 for (i = 0; i < hbq_count; ++i) {
7848 phba->hbqs[i].hbq_virt = ptr;
7849 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
7850 ptr += (lpfc_hbq_defs[i]->entry_count *
7851 sizeof(struct lpfc_hbq_entry));
7853 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
7854 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
7856 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
7858 phba->MBslimaddr = phba->slim_memmap_p;
7859 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
7860 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
7861 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
7862 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
7867 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7868 phba->slim2p.virt, phba->slim2p.phys);
7870 iounmap(phba->ctrl_regs_memmap_p);
7872 iounmap(phba->slim_memmap_p);
7878 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
7879 * @phba: pointer to lpfc hba data structure.
7881 * This routine is invoked to unset the PCI device memory space for device
7882 * with SLI-3 interface spec.
7885 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
7887 struct pci_dev *pdev;
7889 /* Obtain PCI device reference */
7893 pdev = phba->pcidev;
7895 /* Free coherent DMA memory allocated */
7896 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7897 phba->hbqslimp.virt, phba->hbqslimp.phys);
7898 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7899 phba->slim2p.virt, phba->slim2p.phys);
7901 /* I/O memory unmap */
7902 iounmap(phba->ctrl_regs_memmap_p);
7903 iounmap(phba->slim_memmap_p);
7909 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
7910 * @phba: pointer to lpfc hba data structure.
7912 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
7913 * done and check status.
7915 * Return 0 if successful, otherwise -ENODEV.
7918 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
7920 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
7921 struct lpfc_register reg_data;
7922 int i, port_error = 0;
7925 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
7926 memset(®_data, 0, sizeof(reg_data));
7927 if (!phba->sli4_hba.PSMPHRregaddr)
7930 /* Wait up to 30 seconds for the SLI Port POST done and ready */
7931 for (i = 0; i < 3000; i++) {
7932 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
7933 &portsmphr_reg.word0) ||
7934 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
7935 /* Port has a fatal POST error, break out */
7936 port_error = -ENODEV;
7939 if (LPFC_POST_STAGE_PORT_READY ==
7940 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
7946 * If there was a port error during POST, then don't proceed with
7947 * other register reads as the data may not be valid. Just exit.
7950 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7951 "1408 Port Failed POST - portsmphr=0x%x, "
7952 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
7953 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
7954 portsmphr_reg.word0,
7955 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
7956 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
7957 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
7958 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
7959 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
7960 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
7961 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
7962 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
7964 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7965 "2534 Device Info: SLIFamily=0x%x, "
7966 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
7967 "SLIHint_2=0x%x, FT=0x%x\n",
7968 bf_get(lpfc_sli_intf_sli_family,
7969 &phba->sli4_hba.sli_intf),
7970 bf_get(lpfc_sli_intf_slirev,
7971 &phba->sli4_hba.sli_intf),
7972 bf_get(lpfc_sli_intf_if_type,
7973 &phba->sli4_hba.sli_intf),
7974 bf_get(lpfc_sli_intf_sli_hint1,
7975 &phba->sli4_hba.sli_intf),
7976 bf_get(lpfc_sli_intf_sli_hint2,
7977 &phba->sli4_hba.sli_intf),
7978 bf_get(lpfc_sli_intf_func_type,
7979 &phba->sli4_hba.sli_intf));
7981 * Check for other Port errors during the initialization
7982 * process. Fail the load if the port did not come up
7985 if_type = bf_get(lpfc_sli_intf_if_type,
7986 &phba->sli4_hba.sli_intf);
7988 case LPFC_SLI_INTF_IF_TYPE_0:
7989 phba->sli4_hba.ue_mask_lo =
7990 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
7991 phba->sli4_hba.ue_mask_hi =
7992 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
7994 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
7996 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
7997 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
7998 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
7999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8000 "1422 Unrecoverable Error "
8001 "Detected during POST "
8002 "uerr_lo_reg=0x%x, "
8003 "uerr_hi_reg=0x%x, "
8004 "ue_mask_lo_reg=0x%x, "
8005 "ue_mask_hi_reg=0x%x\n",
8008 phba->sli4_hba.ue_mask_lo,
8009 phba->sli4_hba.ue_mask_hi);
8010 port_error = -ENODEV;
8013 case LPFC_SLI_INTF_IF_TYPE_2:
8014 case LPFC_SLI_INTF_IF_TYPE_6:
8015 /* Final checks. The port status should be clean. */
8016 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
8018 (bf_get(lpfc_sliport_status_err, ®_data) &&
8019 !bf_get(lpfc_sliport_status_rn, ®_data))) {
8020 phba->work_status[0] =
8021 readl(phba->sli4_hba.u.if_type2.
8023 phba->work_status[1] =
8024 readl(phba->sli4_hba.u.if_type2.
8026 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8027 "2888 Unrecoverable port error "
8028 "following POST: port status reg "
8029 "0x%x, port_smphr reg 0x%x, "
8030 "error 1=0x%x, error 2=0x%x\n",
8032 portsmphr_reg.word0,
8033 phba->work_status[0],
8034 phba->work_status[1]);
8035 port_error = -ENODEV;
8038 case LPFC_SLI_INTF_IF_TYPE_1:
8047 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
8048 * @phba: pointer to lpfc hba data structure.
8049 * @if_type: The SLI4 interface type getting configured.
8051 * This routine is invoked to set up SLI4 BAR0 PCI config space register
8055 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8058 case LPFC_SLI_INTF_IF_TYPE_0:
8059 phba->sli4_hba.u.if_type0.UERRLOregaddr =
8060 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
8061 phba->sli4_hba.u.if_type0.UERRHIregaddr =
8062 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
8063 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
8064 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
8065 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
8066 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
8067 phba->sli4_hba.SLIINTFregaddr =
8068 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8070 case LPFC_SLI_INTF_IF_TYPE_2:
8071 phba->sli4_hba.u.if_type2.EQDregaddr =
8072 phba->sli4_hba.conf_regs_memmap_p +
8073 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8074 phba->sli4_hba.u.if_type2.ERR1regaddr =
8075 phba->sli4_hba.conf_regs_memmap_p +
8076 LPFC_CTL_PORT_ER1_OFFSET;
8077 phba->sli4_hba.u.if_type2.ERR2regaddr =
8078 phba->sli4_hba.conf_regs_memmap_p +
8079 LPFC_CTL_PORT_ER2_OFFSET;
8080 phba->sli4_hba.u.if_type2.CTRLregaddr =
8081 phba->sli4_hba.conf_regs_memmap_p +
8082 LPFC_CTL_PORT_CTL_OFFSET;
8083 phba->sli4_hba.u.if_type2.STATUSregaddr =
8084 phba->sli4_hba.conf_regs_memmap_p +
8085 LPFC_CTL_PORT_STA_OFFSET;
8086 phba->sli4_hba.SLIINTFregaddr =
8087 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8088 phba->sli4_hba.PSMPHRregaddr =
8089 phba->sli4_hba.conf_regs_memmap_p +
8090 LPFC_CTL_PORT_SEM_OFFSET;
8091 phba->sli4_hba.RQDBregaddr =
8092 phba->sli4_hba.conf_regs_memmap_p +
8093 LPFC_ULP0_RQ_DOORBELL;
8094 phba->sli4_hba.WQDBregaddr =
8095 phba->sli4_hba.conf_regs_memmap_p +
8096 LPFC_ULP0_WQ_DOORBELL;
8097 phba->sli4_hba.CQDBregaddr =
8098 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
8099 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8100 phba->sli4_hba.MQDBregaddr =
8101 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
8102 phba->sli4_hba.BMBXregaddr =
8103 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8105 case LPFC_SLI_INTF_IF_TYPE_6:
8106 phba->sli4_hba.u.if_type2.EQDregaddr =
8107 phba->sli4_hba.conf_regs_memmap_p +
8108 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8109 phba->sli4_hba.u.if_type2.ERR1regaddr =
8110 phba->sli4_hba.conf_regs_memmap_p +
8111 LPFC_CTL_PORT_ER1_OFFSET;
8112 phba->sli4_hba.u.if_type2.ERR2regaddr =
8113 phba->sli4_hba.conf_regs_memmap_p +
8114 LPFC_CTL_PORT_ER2_OFFSET;
8115 phba->sli4_hba.u.if_type2.CTRLregaddr =
8116 phba->sli4_hba.conf_regs_memmap_p +
8117 LPFC_CTL_PORT_CTL_OFFSET;
8118 phba->sli4_hba.u.if_type2.STATUSregaddr =
8119 phba->sli4_hba.conf_regs_memmap_p +
8120 LPFC_CTL_PORT_STA_OFFSET;
8121 phba->sli4_hba.PSMPHRregaddr =
8122 phba->sli4_hba.conf_regs_memmap_p +
8123 LPFC_CTL_PORT_SEM_OFFSET;
8124 phba->sli4_hba.BMBXregaddr =
8125 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8127 case LPFC_SLI_INTF_IF_TYPE_1:
8129 dev_printk(KERN_ERR, &phba->pcidev->dev,
8130 "FATAL - unsupported SLI4 interface type - %d\n",
8137 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
8138 * @phba: pointer to lpfc hba data structure.
8140 * This routine is invoked to set up SLI4 BAR1 register memory map.
8143 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8146 case LPFC_SLI_INTF_IF_TYPE_0:
8147 phba->sli4_hba.PSMPHRregaddr =
8148 phba->sli4_hba.ctrl_regs_memmap_p +
8149 LPFC_SLIPORT_IF0_SMPHR;
8150 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8152 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8154 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8157 case LPFC_SLI_INTF_IF_TYPE_6:
8158 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8159 LPFC_IF6_RQ_DOORBELL;
8160 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8161 LPFC_IF6_WQ_DOORBELL;
8162 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8163 LPFC_IF6_CQ_DOORBELL;
8164 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8165 LPFC_IF6_EQ_DOORBELL;
8166 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8167 LPFC_IF6_MQ_DOORBELL;
8169 case LPFC_SLI_INTF_IF_TYPE_2:
8170 case LPFC_SLI_INTF_IF_TYPE_1:
8172 dev_err(&phba->pcidev->dev,
8173 "FATAL - unsupported SLI4 interface type - %d\n",
8180 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
8181 * @phba: pointer to lpfc hba data structure.
8182 * @vf: virtual function number
8184 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
8185 * based on the given viftual function number, @vf.
8187 * Return 0 if successful, otherwise -ENODEV.
8190 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
8192 if (vf > LPFC_VIR_FUNC_MAX)
8195 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8196 vf * LPFC_VFR_PAGE_SIZE +
8197 LPFC_ULP0_RQ_DOORBELL);
8198 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8199 vf * LPFC_VFR_PAGE_SIZE +
8200 LPFC_ULP0_WQ_DOORBELL);
8201 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8202 vf * LPFC_VFR_PAGE_SIZE +
8203 LPFC_EQCQ_DOORBELL);
8204 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8205 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8206 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
8207 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8208 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
8213 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
8214 * @phba: pointer to lpfc hba data structure.
8216 * This routine is invoked to create the bootstrap mailbox
8217 * region consistent with the SLI-4 interface spec. This
8218 * routine allocates all memory necessary to communicate
8219 * mailbox commands to the port and sets up all alignment
8220 * needs. No locks are expected to be held when calling
8225 * -ENOMEM - could not allocated memory.
8228 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
8231 struct lpfc_dmabuf *dmabuf;
8232 struct dma_address *dma_address;
8236 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8241 * The bootstrap mailbox region is comprised of 2 parts
8242 * plus an alignment restriction of 16 bytes.
8244 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
8245 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
8246 &dmabuf->phys, GFP_KERNEL);
8247 if (!dmabuf->virt) {
8253 * Initialize the bootstrap mailbox pointers now so that the register
8254 * operations are simple later. The mailbox dma address is required
8255 * to be 16-byte aligned. Also align the virtual memory as each
8256 * maibox is copied into the bmbx mailbox region before issuing the
8257 * command to the port.
8259 phba->sli4_hba.bmbx.dmabuf = dmabuf;
8260 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
8262 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
8263 LPFC_ALIGN_16_BYTE);
8264 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
8265 LPFC_ALIGN_16_BYTE);
8268 * Set the high and low physical addresses now. The SLI4 alignment
8269 * requirement is 16 bytes and the mailbox is posted to the port
8270 * as two 30-bit addresses. The other data is a bit marking whether
8271 * the 30-bit address is the high or low address.
8272 * Upcast bmbx aphys to 64bits so shift instruction compiles
8273 * clean on 32 bit machines.
8275 dma_address = &phba->sli4_hba.bmbx.dma_address;
8276 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
8277 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
8278 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
8279 LPFC_BMBX_BIT1_ADDR_HI);
8281 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
8282 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
8283 LPFC_BMBX_BIT1_ADDR_LO);
8288 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
8289 * @phba: pointer to lpfc hba data structure.
8291 * This routine is invoked to teardown the bootstrap mailbox
8292 * region and release all host resources. This routine requires
8293 * the caller to ensure all mailbox commands recovered, no
8294 * additional mailbox comands are sent, and interrupts are disabled
8295 * before calling this routine.
8299 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
8301 dma_free_coherent(&phba->pcidev->dev,
8302 phba->sli4_hba.bmbx.bmbx_size,
8303 phba->sli4_hba.bmbx.dmabuf->virt,
8304 phba->sli4_hba.bmbx.dmabuf->phys);
8306 kfree(phba->sli4_hba.bmbx.dmabuf);
8307 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
8310 static const char * const lpfc_topo_to_str[] = {
8321 * lpfc_map_topology - Map the topology read from READ_CONFIG
8322 * @phba: pointer to lpfc hba data structure.
8323 * @rdconf: pointer to read config data
8325 * This routine is invoked to map the topology values as read
8326 * from the read config mailbox command. If the persistent
8327 * topology feature is supported, the firmware will provide the
8328 * saved topology information to be used in INIT_LINK
8331 #define LINK_FLAGS_DEF 0x0
8332 #define LINK_FLAGS_P2P 0x1
8333 #define LINK_FLAGS_LOOP 0x2
8335 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
8339 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
8340 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
8341 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
8343 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8344 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
8347 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8348 "2019 FW does not support persistent topology "
8349 "Using driver parameter defined value [%s]",
8350 lpfc_topo_to_str[phba->cfg_topology]);
8353 /* FW supports persistent topology - override module parameter value */
8354 phba->hba_flag |= HBA_PERSISTENT_TOPO;
8355 switch (phba->pcidev->device) {
8356 case PCI_DEVICE_ID_LANCER_G7_FC:
8357 case PCI_DEVICE_ID_LANCER_G6_FC:
8359 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
8360 ? FLAGS_TOPOLOGY_MODE_LOOP
8361 : FLAGS_TOPOLOGY_MODE_PT_PT);
8363 phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
8368 /* If topology failover set - pt is '0' or '1' */
8369 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
8370 FLAGS_TOPOLOGY_MODE_LOOP_PT);
8372 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
8373 ? FLAGS_TOPOLOGY_MODE_PT_PT
8374 : FLAGS_TOPOLOGY_MODE_LOOP);
8378 if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
8379 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8380 "2020 Using persistent topology value [%s]",
8381 lpfc_topo_to_str[phba->cfg_topology]);
8383 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8384 "2021 Invalid topology values from FW "
8385 "Using driver parameter defined value [%s]",
8386 lpfc_topo_to_str[phba->cfg_topology]);
8391 * lpfc_sli4_read_config - Get the config parameters.
8392 * @phba: pointer to lpfc hba data structure.
8394 * This routine is invoked to read the configuration parameters from the HBA.
8395 * The configuration parameters are used to set the base and maximum values
8396 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
8397 * allocation for the port.
8401 * -ENOMEM - No available memory
8402 * -EIO - The mailbox failed to complete successfully.
8405 lpfc_sli4_read_config(struct lpfc_hba *phba)
8408 struct lpfc_mbx_read_config *rd_config;
8409 union lpfc_sli4_cfg_shdr *shdr;
8410 uint32_t shdr_status, shdr_add_status;
8411 struct lpfc_mbx_get_func_cfg *get_func_cfg;
8412 struct lpfc_rsrc_desc_fcfcoe *desc;
8414 uint16_t forced_link_speed;
8415 uint32_t if_type, qmin;
8416 int length, i, rc = 0, rc2;
8418 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8420 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8421 "2011 Unable to allocate memory for issuing "
8422 "SLI_CONFIG_SPECIAL mailbox command\n");
8426 lpfc_read_config(phba, pmb);
8428 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8429 if (rc != MBX_SUCCESS) {
8430 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8431 "2012 Mailbox failed , mbxCmd x%x "
8432 "READ_CONFIG, mbxStatus x%x\n",
8433 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8434 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8437 rd_config = &pmb->u.mqe.un.rd_config;
8438 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
8439 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
8440 phba->sli4_hba.lnk_info.lnk_tp =
8441 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
8442 phba->sli4_hba.lnk_info.lnk_no =
8443 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
8444 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8445 "3081 lnk_type:%d, lnk_numb:%d\n",
8446 phba->sli4_hba.lnk_info.lnk_tp,
8447 phba->sli4_hba.lnk_info.lnk_no);
8449 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8450 "3082 Mailbox (x%x) returned ldv:x0\n",
8451 bf_get(lpfc_mqe_command, &pmb->u.mqe));
8452 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
8453 phba->bbcredit_support = 1;
8454 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
8457 phba->sli4_hba.conf_trunk =
8458 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
8459 phba->sli4_hba.extents_in_use =
8460 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
8461 phba->sli4_hba.max_cfg_param.max_xri =
8462 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
8463 /* Reduce resource usage in kdump environment */
8464 if (is_kdump_kernel() &&
8465 phba->sli4_hba.max_cfg_param.max_xri > 512)
8466 phba->sli4_hba.max_cfg_param.max_xri = 512;
8467 phba->sli4_hba.max_cfg_param.xri_base =
8468 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
8469 phba->sli4_hba.max_cfg_param.max_vpi =
8470 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
8471 /* Limit the max we support */
8472 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
8473 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
8474 phba->sli4_hba.max_cfg_param.vpi_base =
8475 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
8476 phba->sli4_hba.max_cfg_param.max_rpi =
8477 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
8478 phba->sli4_hba.max_cfg_param.rpi_base =
8479 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
8480 phba->sli4_hba.max_cfg_param.max_vfi =
8481 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
8482 phba->sli4_hba.max_cfg_param.vfi_base =
8483 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
8484 phba->sli4_hba.max_cfg_param.max_fcfi =
8485 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
8486 phba->sli4_hba.max_cfg_param.max_eq =
8487 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
8488 phba->sli4_hba.max_cfg_param.max_rq =
8489 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
8490 phba->sli4_hba.max_cfg_param.max_wq =
8491 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
8492 phba->sli4_hba.max_cfg_param.max_cq =
8493 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
8494 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
8495 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
8496 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
8497 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
8498 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
8499 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
8500 phba->max_vports = phba->max_vpi;
8501 lpfc_map_topology(phba, rd_config);
8502 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8503 "2003 cfg params Extents? %d "
8508 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
8509 phba->sli4_hba.extents_in_use,
8510 phba->sli4_hba.max_cfg_param.xri_base,
8511 phba->sli4_hba.max_cfg_param.max_xri,
8512 phba->sli4_hba.max_cfg_param.vpi_base,
8513 phba->sli4_hba.max_cfg_param.max_vpi,
8514 phba->sli4_hba.max_cfg_param.vfi_base,
8515 phba->sli4_hba.max_cfg_param.max_vfi,
8516 phba->sli4_hba.max_cfg_param.rpi_base,
8517 phba->sli4_hba.max_cfg_param.max_rpi,
8518 phba->sli4_hba.max_cfg_param.max_fcfi,
8519 phba->sli4_hba.max_cfg_param.max_eq,
8520 phba->sli4_hba.max_cfg_param.max_cq,
8521 phba->sli4_hba.max_cfg_param.max_wq,
8522 phba->sli4_hba.max_cfg_param.max_rq);
8525 * Calculate queue resources based on how
8526 * many WQ/CQ/EQs are available.
8528 qmin = phba->sli4_hba.max_cfg_param.max_wq;
8529 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
8530 qmin = phba->sli4_hba.max_cfg_param.max_cq;
8531 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
8532 qmin = phba->sli4_hba.max_cfg_param.max_eq;
8534 * Whats left after this can go toward NVME / FCP.
8535 * The minus 4 accounts for ELS, NVME LS, MBOX
8536 * plus one extra. When configured for
8537 * NVMET, FCP io channel WQs are not created.
8541 /* Check to see if there is enough for NVME */
8542 if ((phba->cfg_irq_chann > qmin) ||
8543 (phba->cfg_hdw_queue > qmin)) {
8544 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8545 "2005 Reducing Queues: "
8546 "WQ %d CQ %d EQ %d: min %d: "
8548 phba->sli4_hba.max_cfg_param.max_wq,
8549 phba->sli4_hba.max_cfg_param.max_cq,
8550 phba->sli4_hba.max_cfg_param.max_eq,
8551 qmin, phba->cfg_irq_chann,
8552 phba->cfg_hdw_queue);
8554 if (phba->cfg_irq_chann > qmin)
8555 phba->cfg_irq_chann = qmin;
8556 if (phba->cfg_hdw_queue > qmin)
8557 phba->cfg_hdw_queue = qmin;
8564 /* Update link speed if forced link speed is supported */
8565 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8566 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
8568 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
8569 if (forced_link_speed) {
8570 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
8572 switch (forced_link_speed) {
8574 phba->cfg_link_speed =
8575 LPFC_USER_LINK_SPEED_1G;
8578 phba->cfg_link_speed =
8579 LPFC_USER_LINK_SPEED_2G;
8582 phba->cfg_link_speed =
8583 LPFC_USER_LINK_SPEED_4G;
8586 phba->cfg_link_speed =
8587 LPFC_USER_LINK_SPEED_8G;
8589 case LINK_SPEED_10G:
8590 phba->cfg_link_speed =
8591 LPFC_USER_LINK_SPEED_10G;
8593 case LINK_SPEED_16G:
8594 phba->cfg_link_speed =
8595 LPFC_USER_LINK_SPEED_16G;
8597 case LINK_SPEED_32G:
8598 phba->cfg_link_speed =
8599 LPFC_USER_LINK_SPEED_32G;
8601 case LINK_SPEED_64G:
8602 phba->cfg_link_speed =
8603 LPFC_USER_LINK_SPEED_64G;
8606 phba->cfg_link_speed =
8607 LPFC_USER_LINK_SPEED_AUTO;
8610 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8611 "0047 Unrecognized link "
8614 phba->cfg_link_speed =
8615 LPFC_USER_LINK_SPEED_AUTO;
8620 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
8621 length = phba->sli4_hba.max_cfg_param.max_xri -
8622 lpfc_sli4_get_els_iocb_cnt(phba);
8623 if (phba->cfg_hba_queue_depth > length) {
8624 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8625 "3361 HBA queue depth changed from %d to %d\n",
8626 phba->cfg_hba_queue_depth, length);
8627 phba->cfg_hba_queue_depth = length;
8630 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
8631 LPFC_SLI_INTF_IF_TYPE_2)
8634 /* get the pf# and vf# for SLI4 if_type 2 port */
8635 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
8636 sizeof(struct lpfc_sli4_cfg_mhdr));
8637 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
8638 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
8639 length, LPFC_SLI4_MBX_EMBED);
8641 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8642 shdr = (union lpfc_sli4_cfg_shdr *)
8643 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
8644 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8645 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8646 if (rc2 || shdr_status || shdr_add_status) {
8647 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8648 "3026 Mailbox failed , mbxCmd x%x "
8649 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
8650 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8651 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8655 /* search for fc_fcoe resrouce descriptor */
8656 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
8658 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
8659 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
8660 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
8661 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
8662 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
8663 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
8666 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
8667 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
8668 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
8669 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
8670 phba->sli4_hba.iov.pf_number =
8671 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
8672 phba->sli4_hba.iov.vf_number =
8673 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
8678 if (i < LPFC_RSRC_DESC_MAX_NUM)
8679 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8680 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
8681 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
8682 phba->sli4_hba.iov.vf_number);
8684 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8685 "3028 GET_FUNCTION_CONFIG: failed to find "
8686 "Resource Descriptor:x%x\n",
8687 LPFC_RSRC_DESC_TYPE_FCFCOE);
8690 mempool_free(pmb, phba->mbox_mem_pool);
8695 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
8696 * @phba: pointer to lpfc hba data structure.
8698 * This routine is invoked to setup the port-side endian order when
8699 * the port if_type is 0. This routine has no function for other
8704 * -ENOMEM - No available memory
8705 * -EIO - The mailbox failed to complete successfully.
8708 lpfc_setup_endian_order(struct lpfc_hba *phba)
8710 LPFC_MBOXQ_t *mboxq;
8711 uint32_t if_type, rc = 0;
8712 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
8713 HOST_ENDIAN_HIGH_WORD1};
8715 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8717 case LPFC_SLI_INTF_IF_TYPE_0:
8718 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8721 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8722 "0492 Unable to allocate memory for "
8723 "issuing SLI_CONFIG_SPECIAL mailbox "
8729 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
8730 * two words to contain special data values and no other data.
8732 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
8733 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
8734 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8735 if (rc != MBX_SUCCESS) {
8736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8737 "0493 SLI_CONFIG_SPECIAL mailbox "
8738 "failed with status x%x\n",
8742 mempool_free(mboxq, phba->mbox_mem_pool);
8744 case LPFC_SLI_INTF_IF_TYPE_6:
8745 case LPFC_SLI_INTF_IF_TYPE_2:
8746 case LPFC_SLI_INTF_IF_TYPE_1:
8754 * lpfc_sli4_queue_verify - Verify and update EQ counts
8755 * @phba: pointer to lpfc hba data structure.
8757 * This routine is invoked to check the user settable queue counts for EQs.
8758 * After this routine is called the counts will be set to valid values that
8759 * adhere to the constraints of the system's interrupt vectors and the port's
8764 * -ENOMEM - No available memory
8767 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
8770 * Sanity check for configured queue parameters against the run-time
8774 if (phba->nvmet_support) {
8775 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
8776 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
8777 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8778 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
8781 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8782 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
8783 phba->cfg_hdw_queue, phba->cfg_irq_chann,
8784 phba->cfg_nvmet_mrq);
8786 /* Get EQ depth from module parameter, fake the default for now */
8787 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8788 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8790 /* Get CQ depth from module parameter, fake the default for now */
8791 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8792 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8797 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
8799 struct lpfc_queue *qdesc;
8803 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
8804 /* Create Fast Path IO CQs */
8805 if (phba->enab_exp_wqcq_pages)
8806 /* Increase the CQ size when WQEs contain an embedded cdb */
8807 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8808 phba->sli4_hba.cq_esize,
8809 LPFC_CQE_EXP_COUNT, cpu);
8812 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8813 phba->sli4_hba.cq_esize,
8814 phba->sli4_hba.cq_ecount, cpu);
8816 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8817 "0499 Failed allocate fast-path IO CQ (%d)\n", idx);
8820 qdesc->qe_valid = 1;
8823 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
8825 /* Create Fast Path IO WQs */
8826 if (phba->enab_exp_wqcq_pages) {
8827 /* Increase the WQ size when WQEs contain an embedded cdb */
8828 wqesize = (phba->fcp_embed_io) ?
8829 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
8830 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8832 LPFC_WQE_EXP_COUNT, cpu);
8834 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8835 phba->sli4_hba.wq_esize,
8836 phba->sli4_hba.wq_ecount, cpu);
8839 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8840 "0503 Failed allocate fast-path IO WQ (%d)\n",
8846 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
8847 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8852 * lpfc_sli4_queue_create - Create all the SLI4 queues
8853 * @phba: pointer to lpfc hba data structure.
8855 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
8856 * operation. For each SLI4 queue type, the parameters such as queue entry
8857 * count (queue depth) shall be taken from the module parameter. For now,
8858 * we just use some constant number as place holder.
8862 * -ENOMEM - No availble memory
8863 * -EIO - The mailbox failed to complete successfully.
8866 lpfc_sli4_queue_create(struct lpfc_hba *phba)
8868 struct lpfc_queue *qdesc;
8869 int idx, cpu, eqcpu;
8870 struct lpfc_sli4_hdw_queue *qp;
8871 struct lpfc_vector_map_info *cpup;
8872 struct lpfc_vector_map_info *eqcpup;
8873 struct lpfc_eq_intr_info *eqi;
8876 * Create HBA Record arrays.
8877 * Both NVME and FCP will share that same vectors / EQs
8879 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
8880 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
8881 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
8882 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
8883 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
8884 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
8885 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8886 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8887 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8888 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8890 if (!phba->sli4_hba.hdwq) {
8891 phba->sli4_hba.hdwq = kcalloc(
8892 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
8894 if (!phba->sli4_hba.hdwq) {
8895 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8896 "6427 Failed allocate memory for "
8897 "fast-path Hardware Queue array\n");
8900 /* Prepare hardware queues to take IO buffers */
8901 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8902 qp = &phba->sli4_hba.hdwq[idx];
8903 spin_lock_init(&qp->io_buf_list_get_lock);
8904 spin_lock_init(&qp->io_buf_list_put_lock);
8905 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
8906 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
8907 qp->get_io_bufs = 0;
8908 qp->put_io_bufs = 0;
8909 qp->total_io_bufs = 0;
8910 spin_lock_init(&qp->abts_io_buf_list_lock);
8911 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
8912 qp->abts_scsi_io_bufs = 0;
8913 qp->abts_nvme_io_bufs = 0;
8914 INIT_LIST_HEAD(&qp->sgl_list);
8915 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
8916 spin_lock_init(&qp->hdwq_lock);
8920 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8921 if (phba->nvmet_support) {
8922 phba->sli4_hba.nvmet_cqset = kcalloc(
8923 phba->cfg_nvmet_mrq,
8924 sizeof(struct lpfc_queue *),
8926 if (!phba->sli4_hba.nvmet_cqset) {
8927 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8928 "3121 Fail allocate memory for "
8929 "fast-path CQ set array\n");
8932 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
8933 phba->cfg_nvmet_mrq,
8934 sizeof(struct lpfc_queue *),
8936 if (!phba->sli4_hba.nvmet_mrq_hdr) {
8937 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8938 "3122 Fail allocate memory for "
8939 "fast-path RQ set hdr array\n");
8942 phba->sli4_hba.nvmet_mrq_data = kcalloc(
8943 phba->cfg_nvmet_mrq,
8944 sizeof(struct lpfc_queue *),
8946 if (!phba->sli4_hba.nvmet_mrq_data) {
8947 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8948 "3124 Fail allocate memory for "
8949 "fast-path RQ set data array\n");
8955 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
8957 /* Create HBA Event Queues (EQs) */
8958 for_each_present_cpu(cpu) {
8959 /* We only want to create 1 EQ per vector, even though
8960 * multiple CPUs might be using that vector. so only
8961 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
8963 cpup = &phba->sli4_hba.cpu_map[cpu];
8964 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
8967 /* Get a ptr to the Hardware Queue associated with this CPU */
8968 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
8970 /* Allocate an EQ */
8971 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8972 phba->sli4_hba.eq_esize,
8973 phba->sli4_hba.eq_ecount, cpu);
8975 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8976 "0497 Failed allocate EQ (%d)\n",
8980 qdesc->qe_valid = 1;
8981 qdesc->hdwq = cpup->hdwq;
8982 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
8983 qdesc->last_cpu = qdesc->chann;
8985 /* Save the allocated EQ in the Hardware Queue */
8988 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
8989 list_add(&qdesc->cpu_list, &eqi->list);
8992 /* Now we need to populate the other Hardware Queues, that share
8993 * an IRQ vector, with the associated EQ ptr.
8995 for_each_present_cpu(cpu) {
8996 cpup = &phba->sli4_hba.cpu_map[cpu];
8998 /* Check for EQ already allocated in previous loop */
8999 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
9002 /* Check for multiple CPUs per hdwq */
9003 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
9007 /* We need to share an EQ for this hdwq */
9008 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
9009 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
9010 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
9013 /* Allocate IO Path SLI4 CQ/WQs */
9014 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9015 if (lpfc_alloc_io_wq_cq(phba, idx))
9019 if (phba->nvmet_support) {
9020 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9021 cpu = lpfc_find_cpu_handle(phba, idx,
9023 qdesc = lpfc_sli4_queue_alloc(phba,
9024 LPFC_DEFAULT_PAGE_SIZE,
9025 phba->sli4_hba.cq_esize,
9026 phba->sli4_hba.cq_ecount,
9029 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9030 "3142 Failed allocate NVME "
9031 "CQ Set (%d)\n", idx);
9034 qdesc->qe_valid = 1;
9037 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
9042 * Create Slow Path Completion Queues (CQs)
9045 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
9046 /* Create slow-path Mailbox Command Complete Queue */
9047 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9048 phba->sli4_hba.cq_esize,
9049 phba->sli4_hba.cq_ecount, cpu);
9051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9052 "0500 Failed allocate slow-path mailbox CQ\n");
9055 qdesc->qe_valid = 1;
9056 phba->sli4_hba.mbx_cq = qdesc;
9058 /* Create slow-path ELS Complete Queue */
9059 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9060 phba->sli4_hba.cq_esize,
9061 phba->sli4_hba.cq_ecount, cpu);
9063 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9064 "0501 Failed allocate slow-path ELS CQ\n");
9067 qdesc->qe_valid = 1;
9069 phba->sli4_hba.els_cq = qdesc;
9073 * Create Slow Path Work Queues (WQs)
9076 /* Create Mailbox Command Queue */
9078 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9079 phba->sli4_hba.mq_esize,
9080 phba->sli4_hba.mq_ecount, cpu);
9082 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9083 "0505 Failed allocate slow-path MQ\n");
9087 phba->sli4_hba.mbx_wq = qdesc;
9090 * Create ELS Work Queues
9093 /* Create slow-path ELS Work Queue */
9094 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9095 phba->sli4_hba.wq_esize,
9096 phba->sli4_hba.wq_ecount, cpu);
9098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9099 "0504 Failed allocate slow-path ELS WQ\n");
9103 phba->sli4_hba.els_wq = qdesc;
9104 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9106 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9107 /* Create NVME LS Complete Queue */
9108 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9109 phba->sli4_hba.cq_esize,
9110 phba->sli4_hba.cq_ecount, cpu);
9112 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9113 "6079 Failed allocate NVME LS CQ\n");
9117 qdesc->qe_valid = 1;
9118 phba->sli4_hba.nvmels_cq = qdesc;
9120 /* Create NVME LS Work Queue */
9121 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9122 phba->sli4_hba.wq_esize,
9123 phba->sli4_hba.wq_ecount, cpu);
9125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9126 "6080 Failed allocate NVME LS WQ\n");
9130 phba->sli4_hba.nvmels_wq = qdesc;
9131 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9135 * Create Receive Queue (RQ)
9138 /* Create Receive Queue for header */
9139 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9140 phba->sli4_hba.rq_esize,
9141 phba->sli4_hba.rq_ecount, cpu);
9143 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9144 "0506 Failed allocate receive HRQ\n");
9147 phba->sli4_hba.hdr_rq = qdesc;
9149 /* Create Receive Queue for data */
9150 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9151 phba->sli4_hba.rq_esize,
9152 phba->sli4_hba.rq_ecount, cpu);
9154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9155 "0507 Failed allocate receive DRQ\n");
9158 phba->sli4_hba.dat_rq = qdesc;
9160 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
9161 phba->nvmet_support) {
9162 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9163 cpu = lpfc_find_cpu_handle(phba, idx,
9165 /* Create NVMET Receive Queue for header */
9166 qdesc = lpfc_sli4_queue_alloc(phba,
9167 LPFC_DEFAULT_PAGE_SIZE,
9168 phba->sli4_hba.rq_esize,
9169 LPFC_NVMET_RQE_DEF_COUNT,
9172 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9173 "3146 Failed allocate "
9178 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
9180 /* Only needed for header of RQ pair */
9181 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
9184 if (qdesc->rqbp == NULL) {
9185 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9186 "6131 Failed allocate "
9191 /* Put list in known state in case driver load fails. */
9192 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
9194 /* Create NVMET Receive Queue for data */
9195 qdesc = lpfc_sli4_queue_alloc(phba,
9196 LPFC_DEFAULT_PAGE_SIZE,
9197 phba->sli4_hba.rq_esize,
9198 LPFC_NVMET_RQE_DEF_COUNT,
9201 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9202 "3156 Failed allocate "
9207 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
9211 /* Clear NVME stats */
9212 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9213 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9214 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
9215 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
9219 /* Clear SCSI stats */
9220 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
9221 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9222 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
9223 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
9230 lpfc_sli4_queue_destroy(phba);
9235 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
9238 lpfc_sli4_queue_free(*qp);
9244 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
9251 for (idx = 0; idx < max; idx++)
9252 __lpfc_sli4_release_queue(&(*qs)[idx]);
9259 lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
9261 struct lpfc_sli4_hdw_queue *hdwq;
9262 struct lpfc_queue *eq;
9265 hdwq = phba->sli4_hba.hdwq;
9267 /* Loop thru all Hardware Queues */
9268 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9269 /* Free the CQ/WQ corresponding to the Hardware Queue */
9270 lpfc_sli4_queue_free(hdwq[idx].io_cq);
9271 lpfc_sli4_queue_free(hdwq[idx].io_wq);
9272 hdwq[idx].hba_eq = NULL;
9273 hdwq[idx].io_cq = NULL;
9274 hdwq[idx].io_wq = NULL;
9275 if (phba->cfg_xpsgl && !phba->nvmet_support)
9276 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
9277 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
9279 /* Loop thru all IRQ vectors */
9280 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
9281 /* Free the EQ corresponding to the IRQ vector */
9282 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
9283 lpfc_sli4_queue_free(eq);
9284 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
9289 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
9290 * @phba: pointer to lpfc hba data structure.
9292 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
9297 * -ENOMEM - No available memory
9298 * -EIO - The mailbox failed to complete successfully.
9301 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
9304 * Set FREE_INIT before beginning to free the queues.
9305 * Wait until the users of queues to acknowledge to
9306 * release queues by clearing FREE_WAIT.
9308 spin_lock_irq(&phba->hbalock);
9309 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
9310 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
9311 spin_unlock_irq(&phba->hbalock);
9313 spin_lock_irq(&phba->hbalock);
9315 spin_unlock_irq(&phba->hbalock);
9317 lpfc_sli4_cleanup_poll_list(phba);
9319 /* Release HBA eqs */
9320 if (phba->sli4_hba.hdwq)
9321 lpfc_sli4_release_hdwq(phba);
9323 if (phba->nvmet_support) {
9324 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
9325 phba->cfg_nvmet_mrq);
9327 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
9328 phba->cfg_nvmet_mrq);
9329 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
9330 phba->cfg_nvmet_mrq);
9333 /* Release mailbox command work queue */
9334 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
9336 /* Release ELS work queue */
9337 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
9339 /* Release ELS work queue */
9340 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
9342 /* Release unsolicited receive queue */
9343 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
9344 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
9346 /* Release ELS complete queue */
9347 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
9349 /* Release NVME LS complete queue */
9350 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
9352 /* Release mailbox command complete queue */
9353 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
9355 /* Everything on this list has been freed */
9356 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
9358 /* Done with freeing the queues */
9359 spin_lock_irq(&phba->hbalock);
9360 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
9361 spin_unlock_irq(&phba->hbalock);
9365 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
9367 struct lpfc_rqb *rqbp;
9368 struct lpfc_dmabuf *h_buf;
9369 struct rqb_dmabuf *rqb_buffer;
9372 while (!list_empty(&rqbp->rqb_buffer_list)) {
9373 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
9374 struct lpfc_dmabuf, list);
9376 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
9377 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
9378 rqbp->buffer_count--;
9384 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
9385 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
9386 int qidx, uint32_t qtype)
9388 struct lpfc_sli_ring *pring;
9391 if (!eq || !cq || !wq) {
9392 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9393 "6085 Fast-path %s (%d) not allocated\n",
9394 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
9398 /* create the Cq first */
9399 rc = lpfc_cq_create(phba, cq, eq,
9400 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
9402 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9403 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
9404 qidx, (uint32_t)rc);
9408 if (qtype != LPFC_MBOX) {
9409 /* Setup cq_map for fast lookup */
9411 *cq_map = cq->queue_id;
9413 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9414 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
9415 qidx, cq->queue_id, qidx, eq->queue_id);
9418 rc = lpfc_wq_create(phba, wq, cq, qtype);
9420 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9421 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
9422 qidx, (uint32_t)rc);
9423 /* no need to tear down cq - caller will do so */
9427 /* Bind this CQ/WQ to the NVME ring */
9429 pring->sli.sli4.wqp = (void *)wq;
9432 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9433 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
9434 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
9436 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
9438 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9439 "0539 Failed setup of slow-path MQ: "
9441 /* no need to tear down cq - caller will do so */
9445 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9446 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
9447 phba->sli4_hba.mbx_wq->queue_id,
9448 phba->sli4_hba.mbx_cq->queue_id);
9455 * lpfc_setup_cq_lookup - Setup the CQ lookup table
9456 * @phba: pointer to lpfc hba data structure.
9458 * This routine will populate the cq_lookup table by all
9459 * available CQ queue_id's.
9462 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
9464 struct lpfc_queue *eq, *childq;
9467 memset(phba->sli4_hba.cq_lookup, 0,
9468 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
9469 /* Loop thru all IRQ vectors */
9470 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9471 /* Get the EQ corresponding to the IRQ vector */
9472 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9475 /* Loop through all CQs associated with that EQ */
9476 list_for_each_entry(childq, &eq->child_list, list) {
9477 if (childq->queue_id > phba->sli4_hba.cq_max)
9479 if (childq->subtype == LPFC_IO)
9480 phba->sli4_hba.cq_lookup[childq->queue_id] =
9487 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
9488 * @phba: pointer to lpfc hba data structure.
9490 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
9495 * -ENOMEM - No available memory
9496 * -EIO - The mailbox failed to complete successfully.
9499 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
9501 uint32_t shdr_status, shdr_add_status;
9502 union lpfc_sli4_cfg_shdr *shdr;
9503 struct lpfc_vector_map_info *cpup;
9504 struct lpfc_sli4_hdw_queue *qp;
9505 LPFC_MBOXQ_t *mboxq;
9507 uint32_t length, usdelay;
9510 /* Check for dual-ULP support */
9511 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9513 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9514 "3249 Unable to allocate memory for "
9515 "QUERY_FW_CFG mailbox command\n");
9518 length = (sizeof(struct lpfc_mbx_query_fw_config) -
9519 sizeof(struct lpfc_sli4_cfg_mhdr));
9520 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9521 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
9522 length, LPFC_SLI4_MBX_EMBED);
9524 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9526 shdr = (union lpfc_sli4_cfg_shdr *)
9527 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9528 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9529 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9530 if (shdr_status || shdr_add_status || rc) {
9531 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9532 "3250 QUERY_FW_CFG mailbox failed with status "
9533 "x%x add_status x%x, mbx status x%x\n",
9534 shdr_status, shdr_add_status, rc);
9535 if (rc != MBX_TIMEOUT)
9536 mempool_free(mboxq, phba->mbox_mem_pool);
9541 phba->sli4_hba.fw_func_mode =
9542 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
9543 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
9544 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
9545 phba->sli4_hba.physical_port =
9546 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
9547 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9548 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
9549 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
9550 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
9552 if (rc != MBX_TIMEOUT)
9553 mempool_free(mboxq, phba->mbox_mem_pool);
9556 * Set up HBA Event Queues (EQs)
9558 qp = phba->sli4_hba.hdwq;
9560 /* Set up HBA event queue */
9562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9563 "3147 Fast-path EQs not allocated\n");
9568 /* Loop thru all IRQ vectors */
9569 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9570 /* Create HBA Event Queues (EQs) in order */
9571 for_each_present_cpu(cpu) {
9572 cpup = &phba->sli4_hba.cpu_map[cpu];
9574 /* Look for the CPU thats using that vector with
9575 * LPFC_CPU_FIRST_IRQ set.
9577 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9579 if (qidx != cpup->eq)
9582 /* Create an EQ for that vector */
9583 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
9584 phba->cfg_fcp_imax);
9586 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9587 "0523 Failed setup of fast-path"
9588 " EQ (%d), rc = 0x%x\n",
9589 cpup->eq, (uint32_t)rc);
9593 /* Save the EQ for that vector in the hba_eq_hdl */
9594 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
9595 qp[cpup->hdwq].hba_eq;
9597 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9598 "2584 HBA EQ setup: queue[%d]-id=%d\n",
9600 qp[cpup->hdwq].hba_eq->queue_id);
9604 /* Loop thru all Hardware Queues */
9605 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9606 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
9607 cpup = &phba->sli4_hba.cpu_map[cpu];
9609 /* Create the CQ/WQ corresponding to the Hardware Queue */
9610 rc = lpfc_create_wq_cq(phba,
9611 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
9614 &phba->sli4_hba.hdwq[qidx].io_cq_map,
9618 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9619 "0535 Failed to setup fastpath "
9620 "IO WQ/CQ (%d), rc = 0x%x\n",
9621 qidx, (uint32_t)rc);
9627 * Set up Slow Path Complete Queues (CQs)
9630 /* Set up slow-path MBOX CQ/MQ */
9632 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
9633 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9634 "0528 %s not allocated\n",
9635 phba->sli4_hba.mbx_cq ?
9636 "Mailbox WQ" : "Mailbox CQ");
9641 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9642 phba->sli4_hba.mbx_cq,
9643 phba->sli4_hba.mbx_wq,
9644 NULL, 0, LPFC_MBOX);
9646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9647 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
9651 if (phba->nvmet_support) {
9652 if (!phba->sli4_hba.nvmet_cqset) {
9653 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9654 "3165 Fast-path NVME CQ Set "
9655 "array not allocated\n");
9659 if (phba->cfg_nvmet_mrq > 1) {
9660 rc = lpfc_cq_create_set(phba,
9661 phba->sli4_hba.nvmet_cqset,
9663 LPFC_WCQ, LPFC_NVMET);
9665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9666 "3164 Failed setup of NVME CQ "
9672 /* Set up NVMET Receive Complete Queue */
9673 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
9675 LPFC_WCQ, LPFC_NVMET);
9677 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9678 "6089 Failed setup NVMET CQ: "
9679 "rc = 0x%x\n", (uint32_t)rc);
9682 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
9684 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9685 "6090 NVMET CQ setup: cq-id=%d, "
9686 "parent eq-id=%d\n",
9687 phba->sli4_hba.nvmet_cqset[0]->queue_id,
9688 qp[0].hba_eq->queue_id);
9692 /* Set up slow-path ELS WQ/CQ */
9693 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
9694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9695 "0530 ELS %s not allocated\n",
9696 phba->sli4_hba.els_cq ? "WQ" : "CQ");
9700 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9701 phba->sli4_hba.els_cq,
9702 phba->sli4_hba.els_wq,
9705 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9706 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
9710 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9711 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
9712 phba->sli4_hba.els_wq->queue_id,
9713 phba->sli4_hba.els_cq->queue_id);
9715 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9716 /* Set up NVME LS Complete Queue */
9717 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
9718 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9719 "6091 LS %s not allocated\n",
9720 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
9724 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9725 phba->sli4_hba.nvmels_cq,
9726 phba->sli4_hba.nvmels_wq,
9727 NULL, 0, LPFC_NVME_LS);
9729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9730 "0526 Failed setup of NVVME LS WQ/CQ: "
9731 "rc = 0x%x\n", (uint32_t)rc);
9735 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9736 "6096 ELS WQ setup: wq-id=%d, "
9737 "parent cq-id=%d\n",
9738 phba->sli4_hba.nvmels_wq->queue_id,
9739 phba->sli4_hba.nvmels_cq->queue_id);
9743 * Create NVMET Receive Queue (RQ)
9745 if (phba->nvmet_support) {
9746 if ((!phba->sli4_hba.nvmet_cqset) ||
9747 (!phba->sli4_hba.nvmet_mrq_hdr) ||
9748 (!phba->sli4_hba.nvmet_mrq_data)) {
9749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9750 "6130 MRQ CQ Queues not "
9755 if (phba->cfg_nvmet_mrq > 1) {
9756 rc = lpfc_mrq_create(phba,
9757 phba->sli4_hba.nvmet_mrq_hdr,
9758 phba->sli4_hba.nvmet_mrq_data,
9759 phba->sli4_hba.nvmet_cqset,
9762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9763 "6098 Failed setup of NVMET "
9770 rc = lpfc_rq_create(phba,
9771 phba->sli4_hba.nvmet_mrq_hdr[0],
9772 phba->sli4_hba.nvmet_mrq_data[0],
9773 phba->sli4_hba.nvmet_cqset[0],
9776 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9777 "6057 Failed setup of NVMET "
9778 "Receive Queue: rc = 0x%x\n",
9784 phba, KERN_INFO, LOG_INIT,
9785 "6099 NVMET RQ setup: hdr-rq-id=%d, "
9786 "dat-rq-id=%d parent cq-id=%d\n",
9787 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
9788 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
9789 phba->sli4_hba.nvmet_cqset[0]->queue_id);
9794 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
9795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9796 "0540 Receive Queue not allocated\n");
9801 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
9802 phba->sli4_hba.els_cq, LPFC_USOL);
9804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9805 "0541 Failed setup of Receive Queue: "
9806 "rc = 0x%x\n", (uint32_t)rc);
9810 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9811 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
9812 "parent cq-id=%d\n",
9813 phba->sli4_hba.hdr_rq->queue_id,
9814 phba->sli4_hba.dat_rq->queue_id,
9815 phba->sli4_hba.els_cq->queue_id);
9817 if (phba->cfg_fcp_imax)
9818 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
9822 for (qidx = 0; qidx < phba->cfg_irq_chann;
9823 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
9824 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
9827 if (phba->sli4_hba.cq_max) {
9828 kfree(phba->sli4_hba.cq_lookup);
9829 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
9830 sizeof(struct lpfc_queue *), GFP_KERNEL);
9831 if (!phba->sli4_hba.cq_lookup) {
9832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9833 "0549 Failed setup of CQ Lookup table: "
9834 "size 0x%x\n", phba->sli4_hba.cq_max);
9838 lpfc_setup_cq_lookup(phba);
9843 lpfc_sli4_queue_unset(phba);
9849 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
9850 * @phba: pointer to lpfc hba data structure.
9852 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
9857 * -ENOMEM - No available memory
9858 * -EIO - The mailbox failed to complete successfully.
9861 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
9863 struct lpfc_sli4_hdw_queue *qp;
9864 struct lpfc_queue *eq;
9867 /* Unset mailbox command work queue */
9868 if (phba->sli4_hba.mbx_wq)
9869 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
9871 /* Unset NVME LS work queue */
9872 if (phba->sli4_hba.nvmels_wq)
9873 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
9875 /* Unset ELS work queue */
9876 if (phba->sli4_hba.els_wq)
9877 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
9879 /* Unset unsolicited receive queue */
9880 if (phba->sli4_hba.hdr_rq)
9881 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
9882 phba->sli4_hba.dat_rq);
9884 /* Unset mailbox command complete queue */
9885 if (phba->sli4_hba.mbx_cq)
9886 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
9888 /* Unset ELS complete queue */
9889 if (phba->sli4_hba.els_cq)
9890 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
9892 /* Unset NVME LS complete queue */
9893 if (phba->sli4_hba.nvmels_cq)
9894 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
9896 if (phba->nvmet_support) {
9897 /* Unset NVMET MRQ queue */
9898 if (phba->sli4_hba.nvmet_mrq_hdr) {
9899 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9902 phba->sli4_hba.nvmet_mrq_hdr[qidx],
9903 phba->sli4_hba.nvmet_mrq_data[qidx]);
9906 /* Unset NVMET CQ Set complete queue */
9907 if (phba->sli4_hba.nvmet_cqset) {
9908 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9910 phba, phba->sli4_hba.nvmet_cqset[qidx]);
9914 /* Unset fast-path SLI4 queues */
9915 if (phba->sli4_hba.hdwq) {
9916 /* Loop thru all Hardware Queues */
9917 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9918 /* Destroy the CQ/WQ corresponding to Hardware Queue */
9919 qp = &phba->sli4_hba.hdwq[qidx];
9920 lpfc_wq_destroy(phba, qp->io_wq);
9921 lpfc_cq_destroy(phba, qp->io_cq);
9923 /* Loop thru all IRQ vectors */
9924 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9925 /* Destroy the EQ corresponding to the IRQ vector */
9926 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9927 lpfc_eq_destroy(phba, eq);
9931 kfree(phba->sli4_hba.cq_lookup);
9932 phba->sli4_hba.cq_lookup = NULL;
9933 phba->sli4_hba.cq_max = 0;
9937 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
9938 * @phba: pointer to lpfc hba data structure.
9940 * This routine is invoked to allocate and set up a pool of completion queue
9941 * events. The body of the completion queue event is a completion queue entry
9942 * CQE. For now, this pool is used for the interrupt service routine to queue
9943 * the following HBA completion queue events for the worker thread to process:
9944 * - Mailbox asynchronous events
9945 * - Receive queue completion unsolicited events
9946 * Later, this can be used for all the slow-path events.
9950 * -ENOMEM - No available memory
9953 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
9955 struct lpfc_cq_event *cq_event;
9958 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
9959 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
9961 goto out_pool_create_fail;
9962 list_add_tail(&cq_event->list,
9963 &phba->sli4_hba.sp_cqe_event_pool);
9967 out_pool_create_fail:
9968 lpfc_sli4_cq_event_pool_destroy(phba);
9973 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
9974 * @phba: pointer to lpfc hba data structure.
9976 * This routine is invoked to free the pool of completion queue events at
9977 * driver unload time. Note that, it is the responsibility of the driver
9978 * cleanup routine to free all the outstanding completion-queue events
9979 * allocated from this pool back into the pool before invoking this routine
9980 * to destroy the pool.
9983 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
9985 struct lpfc_cq_event *cq_event, *next_cq_event;
9987 list_for_each_entry_safe(cq_event, next_cq_event,
9988 &phba->sli4_hba.sp_cqe_event_pool, list) {
9989 list_del(&cq_event->list);
9995 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
9996 * @phba: pointer to lpfc hba data structure.
9998 * This routine is the lock free version of the API invoked to allocate a
9999 * completion-queue event from the free pool.
10001 * Return: Pointer to the newly allocated completion-queue event if successful
10004 struct lpfc_cq_event *
10005 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
10007 struct lpfc_cq_event *cq_event = NULL;
10009 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
10010 struct lpfc_cq_event, list);
10015 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
10016 * @phba: pointer to lpfc hba data structure.
10018 * This routine is the lock version of the API invoked to allocate a
10019 * completion-queue event from the free pool.
10021 * Return: Pointer to the newly allocated completion-queue event if successful
10024 struct lpfc_cq_event *
10025 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
10027 struct lpfc_cq_event *cq_event;
10028 unsigned long iflags;
10030 spin_lock_irqsave(&phba->hbalock, iflags);
10031 cq_event = __lpfc_sli4_cq_event_alloc(phba);
10032 spin_unlock_irqrestore(&phba->hbalock, iflags);
10037 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
10038 * @phba: pointer to lpfc hba data structure.
10039 * @cq_event: pointer to the completion queue event to be freed.
10041 * This routine is the lock free version of the API invoked to release a
10042 * completion-queue event back into the free pool.
10045 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10046 struct lpfc_cq_event *cq_event)
10048 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
10052 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
10053 * @phba: pointer to lpfc hba data structure.
10054 * @cq_event: pointer to the completion queue event to be freed.
10056 * This routine is the lock version of the API invoked to release a
10057 * completion-queue event back into the free pool.
10060 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10061 struct lpfc_cq_event *cq_event)
10063 unsigned long iflags;
10064 spin_lock_irqsave(&phba->hbalock, iflags);
10065 __lpfc_sli4_cq_event_release(phba, cq_event);
10066 spin_unlock_irqrestore(&phba->hbalock, iflags);
10070 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
10071 * @phba: pointer to lpfc hba data structure.
10073 * This routine is to free all the pending completion-queue events to the
10074 * back into the free pool for device reset.
10077 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
10079 LIST_HEAD(cqelist);
10080 struct lpfc_cq_event *cqe;
10081 unsigned long iflags;
10083 /* Retrieve all the pending WCQEs from pending WCQE lists */
10084 spin_lock_irqsave(&phba->hbalock, iflags);
10085 /* Pending FCP XRI abort events */
10086 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
10088 /* Pending ELS XRI abort events */
10089 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10091 /* Pending asynnc events */
10092 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
10094 spin_unlock_irqrestore(&phba->hbalock, iflags);
10096 while (!list_empty(&cqelist)) {
10097 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
10098 lpfc_sli4_cq_event_release(phba, cqe);
10103 * lpfc_pci_function_reset - Reset pci function.
10104 * @phba: pointer to lpfc hba data structure.
10106 * This routine is invoked to request a PCI function reset. It will destroys
10107 * all resources assigned to the PCI function which originates this request.
10111 * -ENOMEM - No available memory
10112 * -EIO - The mailbox failed to complete successfully.
10115 lpfc_pci_function_reset(struct lpfc_hba *phba)
10117 LPFC_MBOXQ_t *mboxq;
10118 uint32_t rc = 0, if_type;
10119 uint32_t shdr_status, shdr_add_status;
10121 uint32_t port_reset = 0;
10122 union lpfc_sli4_cfg_shdr *shdr;
10123 struct lpfc_register reg_data;
10126 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10128 case LPFC_SLI_INTF_IF_TYPE_0:
10129 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10132 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10133 "0494 Unable to allocate memory for "
10134 "issuing SLI_FUNCTION_RESET mailbox "
10139 /* Setup PCI function reset mailbox-ioctl command */
10140 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10141 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
10142 LPFC_SLI4_MBX_EMBED);
10143 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10144 shdr = (union lpfc_sli4_cfg_shdr *)
10145 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10146 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10147 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
10149 if (rc != MBX_TIMEOUT)
10150 mempool_free(mboxq, phba->mbox_mem_pool);
10151 if (shdr_status || shdr_add_status || rc) {
10152 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10153 "0495 SLI_FUNCTION_RESET mailbox "
10154 "failed with status x%x add_status x%x,"
10155 " mbx status x%x\n",
10156 shdr_status, shdr_add_status, rc);
10160 case LPFC_SLI_INTF_IF_TYPE_2:
10161 case LPFC_SLI_INTF_IF_TYPE_6:
10164 * Poll the Port Status Register and wait for RDY for
10165 * up to 30 seconds. If the port doesn't respond, treat
10168 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
10169 if (lpfc_readl(phba->sli4_hba.u.if_type2.
10170 STATUSregaddr, ®_data.word0)) {
10174 if (bf_get(lpfc_sliport_status_rdy, ®_data))
10179 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
10180 phba->work_status[0] = readl(
10181 phba->sli4_hba.u.if_type2.ERR1regaddr);
10182 phba->work_status[1] = readl(
10183 phba->sli4_hba.u.if_type2.ERR2regaddr);
10184 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10185 "2890 Port not ready, port status reg "
10186 "0x%x error 1=0x%x, error 2=0x%x\n",
10188 phba->work_status[0],
10189 phba->work_status[1]);
10196 * Reset the port now
10198 reg_data.word0 = 0;
10199 bf_set(lpfc_sliport_ctrl_end, ®_data,
10200 LPFC_SLIPORT_LITTLE_ENDIAN);
10201 bf_set(lpfc_sliport_ctrl_ip, ®_data,
10202 LPFC_SLIPORT_INIT_PORT);
10203 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
10206 pci_read_config_word(phba->pcidev,
10207 PCI_DEVICE_ID, &devid);
10212 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
10218 case LPFC_SLI_INTF_IF_TYPE_1:
10224 /* Catch the not-ready port failure after a port reset. */
10226 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10227 "3317 HBA not functional: IP Reset Failed "
10228 "try: echo fw_reset > board_mode\n");
10236 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
10237 * @phba: pointer to lpfc hba data structure.
10239 * This routine is invoked to set up the PCI device memory space for device
10240 * with SLI-4 interface spec.
10244 * other values - error
10247 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10249 struct pci_dev *pdev = phba->pcidev;
10250 unsigned long bar0map_len, bar1map_len, bar2map_len;
10257 /* Set the device DMA mask size */
10258 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10260 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10265 * The BARs and register set definitions and offset locations are
10266 * dependent on the if_type.
10268 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
10269 &phba->sli4_hba.sli_intf.word0)) {
10273 /* There is no SLI3 failback for SLI4 devices. */
10274 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
10275 LPFC_SLI_INTF_VALID) {
10276 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10277 "2894 SLI_INTF reg contents invalid "
10278 "sli_intf reg 0x%x\n",
10279 phba->sli4_hba.sli_intf.word0);
10283 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10285 * Get the bus address of SLI4 device Bar regions and the
10286 * number of bytes required by each mapping. The mapping of the
10287 * particular PCI BARs regions is dependent on the type of
10290 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
10291 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
10292 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
10295 * Map SLI4 PCI Config Space Register base to a kernel virtual
10298 phba->sli4_hba.conf_regs_memmap_p =
10299 ioremap(phba->pci_bar0_map, bar0map_len);
10300 if (!phba->sli4_hba.conf_regs_memmap_p) {
10301 dev_printk(KERN_ERR, &pdev->dev,
10302 "ioremap failed for SLI4 PCI config "
10306 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
10307 /* Set up BAR0 PCI config space register memory map */
10308 lpfc_sli4_bar0_register_memmap(phba, if_type);
10310 phba->pci_bar0_map = pci_resource_start(pdev, 1);
10311 bar0map_len = pci_resource_len(pdev, 1);
10312 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10313 dev_printk(KERN_ERR, &pdev->dev,
10314 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
10317 phba->sli4_hba.conf_regs_memmap_p =
10318 ioremap(phba->pci_bar0_map, bar0map_len);
10319 if (!phba->sli4_hba.conf_regs_memmap_p) {
10320 dev_printk(KERN_ERR, &pdev->dev,
10321 "ioremap failed for SLI4 PCI config "
10325 lpfc_sli4_bar0_register_memmap(phba, if_type);
10328 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10329 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
10331 * Map SLI4 if type 0 HBA Control Register base to a
10332 * kernel virtual address and setup the registers.
10334 phba->pci_bar1_map = pci_resource_start(pdev,
10336 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10337 phba->sli4_hba.ctrl_regs_memmap_p =
10338 ioremap(phba->pci_bar1_map,
10340 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
10341 dev_err(&pdev->dev,
10342 "ioremap failed for SLI4 HBA "
10343 "control registers.\n");
10345 goto out_iounmap_conf;
10347 phba->pci_bar2_memmap_p =
10348 phba->sli4_hba.ctrl_regs_memmap_p;
10349 lpfc_sli4_bar1_register_memmap(phba, if_type);
10352 goto out_iounmap_conf;
10356 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
10357 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
10359 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
10360 * virtual address and setup the registers.
10362 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
10363 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10364 phba->sli4_hba.drbl_regs_memmap_p =
10365 ioremap(phba->pci_bar1_map, bar1map_len);
10366 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10367 dev_err(&pdev->dev,
10368 "ioremap failed for SLI4 HBA doorbell registers.\n");
10370 goto out_iounmap_conf;
10372 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
10373 lpfc_sli4_bar1_register_memmap(phba, if_type);
10376 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10377 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10379 * Map SLI4 if type 0 HBA Doorbell Register base to
10380 * a kernel virtual address and setup the registers.
10382 phba->pci_bar2_map = pci_resource_start(pdev,
10384 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10385 phba->sli4_hba.drbl_regs_memmap_p =
10386 ioremap(phba->pci_bar2_map,
10388 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10389 dev_err(&pdev->dev,
10390 "ioremap failed for SLI4 HBA"
10391 " doorbell registers.\n");
10393 goto out_iounmap_ctrl;
10395 phba->pci_bar4_memmap_p =
10396 phba->sli4_hba.drbl_regs_memmap_p;
10397 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
10399 goto out_iounmap_all;
10402 goto out_iounmap_all;
10406 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
10407 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10409 * Map SLI4 if type 6 HBA DPP Register base to a kernel
10410 * virtual address and setup the registers.
10412 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
10413 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10414 phba->sli4_hba.dpp_regs_memmap_p =
10415 ioremap(phba->pci_bar2_map, bar2map_len);
10416 if (!phba->sli4_hba.dpp_regs_memmap_p) {
10417 dev_err(&pdev->dev,
10418 "ioremap failed for SLI4 HBA dpp registers.\n");
10420 goto out_iounmap_ctrl;
10422 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
10425 /* Set up the EQ/CQ register handeling functions now */
10427 case LPFC_SLI_INTF_IF_TYPE_0:
10428 case LPFC_SLI_INTF_IF_TYPE_2:
10429 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
10430 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
10431 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
10433 case LPFC_SLI_INTF_IF_TYPE_6:
10434 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
10435 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
10436 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
10445 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10447 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10449 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10455 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
10456 * @phba: pointer to lpfc hba data structure.
10458 * This routine is invoked to unset the PCI device memory space for device
10459 * with SLI-4 interface spec.
10462 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
10465 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10468 case LPFC_SLI_INTF_IF_TYPE_0:
10469 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10470 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10471 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10473 case LPFC_SLI_INTF_IF_TYPE_2:
10474 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10476 case LPFC_SLI_INTF_IF_TYPE_6:
10477 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10478 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10479 if (phba->sli4_hba.dpp_regs_memmap_p)
10480 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
10482 case LPFC_SLI_INTF_IF_TYPE_1:
10484 dev_printk(KERN_ERR, &phba->pcidev->dev,
10485 "FATAL - unsupported SLI4 interface type - %d\n",
10492 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
10493 * @phba: pointer to lpfc hba data structure.
10495 * This routine is invoked to enable the MSI-X interrupt vectors to device
10496 * with SLI-3 interface specs.
10500 * other values - error
10503 lpfc_sli_enable_msix(struct lpfc_hba *phba)
10508 /* Set up MSI-X multi-message vectors */
10509 rc = pci_alloc_irq_vectors(phba->pcidev,
10510 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
10512 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10513 "0420 PCI enable MSI-X failed (%d)\n", rc);
10518 * Assign MSI-X vectors to interrupt handlers
10521 /* vector-0 is associated to slow-path handler */
10522 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
10523 &lpfc_sli_sp_intr_handler, 0,
10524 LPFC_SP_DRIVER_HANDLER_NAME, phba);
10526 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10527 "0421 MSI-X slow-path request_irq failed "
10532 /* vector-1 is associated to fast-path handler */
10533 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
10534 &lpfc_sli_fp_intr_handler, 0,
10535 LPFC_FP_DRIVER_HANDLER_NAME, phba);
10538 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10539 "0429 MSI-X fast-path request_irq failed "
10545 * Configure HBA MSI-X attention conditions to messages
10547 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10551 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10552 "0474 Unable to allocate memory for issuing "
10553 "MBOX_CONFIG_MSI command\n");
10556 rc = lpfc_config_msi(phba, pmb);
10559 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10560 if (rc != MBX_SUCCESS) {
10561 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
10562 "0351 Config MSI mailbox command failed, "
10563 "mbxCmd x%x, mbxStatus x%x\n",
10564 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
10568 /* Free memory allocated for mailbox command */
10569 mempool_free(pmb, phba->mbox_mem_pool);
10573 /* Free memory allocated for mailbox command */
10574 mempool_free(pmb, phba->mbox_mem_pool);
10577 /* free the irq already requested */
10578 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
10581 /* free the irq already requested */
10582 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
10585 /* Unconfigure MSI-X capability structure */
10586 pci_free_irq_vectors(phba->pcidev);
10593 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
10594 * @phba: pointer to lpfc hba data structure.
10596 * This routine is invoked to enable the MSI interrupt mode to device with
10597 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
10598 * enable the MSI vector. The device driver is responsible for calling the
10599 * request_irq() to register MSI vector with a interrupt the handler, which
10600 * is done in this function.
10604 * other values - error
10607 lpfc_sli_enable_msi(struct lpfc_hba *phba)
10611 rc = pci_enable_msi(phba->pcidev);
10613 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10614 "0462 PCI enable MSI mode success.\n");
10616 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10617 "0471 PCI enable MSI mode failed (%d)\n", rc);
10621 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10622 0, LPFC_DRIVER_NAME, phba);
10624 pci_disable_msi(phba->pcidev);
10625 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10626 "0478 MSI request_irq failed (%d)\n", rc);
10632 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
10633 * @phba: pointer to lpfc hba data structure.
10635 * This routine is invoked to enable device interrupt and associate driver's
10636 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
10637 * spec. Depends on the interrupt mode configured to the driver, the driver
10638 * will try to fallback from the configured interrupt mode to an interrupt
10639 * mode which is supported by the platform, kernel, and device in the order
10641 * MSI-X -> MSI -> IRQ.
10645 * other values - error
10648 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
10650 uint32_t intr_mode = LPFC_INTR_ERROR;
10653 if (cfg_mode == 2) {
10654 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
10655 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
10657 /* Now, try to enable MSI-X interrupt mode */
10658 retval = lpfc_sli_enable_msix(phba);
10660 /* Indicate initialization to MSI-X mode */
10661 phba->intr_type = MSIX;
10667 /* Fallback to MSI if MSI-X initialization failed */
10668 if (cfg_mode >= 1 && phba->intr_type == NONE) {
10669 retval = lpfc_sli_enable_msi(phba);
10671 /* Indicate initialization to MSI mode */
10672 phba->intr_type = MSI;
10677 /* Fallback to INTx if both MSI-X/MSI initalization failed */
10678 if (phba->intr_type == NONE) {
10679 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10680 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
10682 /* Indicate initialization to INTx mode */
10683 phba->intr_type = INTx;
10691 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
10692 * @phba: pointer to lpfc hba data structure.
10694 * This routine is invoked to disable device interrupt and disassociate the
10695 * driver's interrupt handler(s) from interrupt vector(s) to device with
10696 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
10697 * release the interrupt vector(s) for the message signaled interrupt.
10700 lpfc_sli_disable_intr(struct lpfc_hba *phba)
10704 if (phba->intr_type == MSIX)
10705 nr_irqs = LPFC_MSIX_VECTORS;
10709 for (i = 0; i < nr_irqs; i++)
10710 free_irq(pci_irq_vector(phba->pcidev, i), phba);
10711 pci_free_irq_vectors(phba->pcidev);
10713 /* Reset interrupt management states */
10714 phba->intr_type = NONE;
10715 phba->sli.slistat.sli_intr = 0;
10719 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
10720 * @phba: pointer to lpfc hba data structure.
10721 * @id: EQ vector index or Hardware Queue index
10722 * @match: LPFC_FIND_BY_EQ = match by EQ
10723 * LPFC_FIND_BY_HDWQ = match by Hardware Queue
10724 * Return the CPU that matches the selection criteria
10727 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
10729 struct lpfc_vector_map_info *cpup;
10732 /* Loop through all CPUs */
10733 for_each_present_cpu(cpu) {
10734 cpup = &phba->sli4_hba.cpu_map[cpu];
10736 /* If we are matching by EQ, there may be multiple CPUs using
10737 * using the same vector, so select the one with
10738 * LPFC_CPU_FIRST_IRQ set.
10740 if ((match == LPFC_FIND_BY_EQ) &&
10741 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
10745 /* If matching by HDWQ, select the first CPU that matches */
10746 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
10754 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
10755 * @phba: pointer to lpfc hba data structure.
10756 * @cpu: CPU map index
10757 * @phys_id: CPU package physical id
10758 * @core_id: CPU core id
10761 lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
10762 uint16_t phys_id, uint16_t core_id)
10764 struct lpfc_vector_map_info *cpup;
10767 for_each_present_cpu(idx) {
10768 cpup = &phba->sli4_hba.cpu_map[idx];
10769 /* Does the cpup match the one we are looking for */
10770 if ((cpup->phys_id == phys_id) &&
10771 (cpup->core_id == core_id) &&
10780 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
10781 * @phba: pointer to lpfc hba data structure.
10782 * @eqidx: index for eq and irq vector
10783 * @flag: flags to set for vector_map structure
10784 * @cpu: cpu used to index vector_map structure
10786 * The routine assigns eq info into vector_map structure
10789 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
10792 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
10793 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
10796 cpup->flag |= flag;
10798 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10799 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
10800 cpu, eqhdl->irq, cpup->eq, cpup->flag);
10804 * lpfc_cpu_map_array_init - Initialize cpu_map structure
10805 * @phba: pointer to lpfc hba data structure.
10807 * The routine initializes the cpu_map array structure
10810 lpfc_cpu_map_array_init(struct lpfc_hba *phba)
10812 struct lpfc_vector_map_info *cpup;
10813 struct lpfc_eq_intr_info *eqi;
10816 for_each_possible_cpu(cpu) {
10817 cpup = &phba->sli4_hba.cpu_map[cpu];
10818 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
10819 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
10820 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
10821 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
10823 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
10824 INIT_LIST_HEAD(&eqi->list);
10830 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
10831 * @phba: pointer to lpfc hba data structure.
10833 * The routine initializes the hba_eq_hdl array structure
10836 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
10838 struct lpfc_hba_eq_hdl *eqhdl;
10841 for (i = 0; i < phba->cfg_irq_chann; i++) {
10842 eqhdl = lpfc_get_eq_hdl(i);
10843 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
10844 eqhdl->phba = phba;
10849 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
10850 * @phba: pointer to lpfc hba data structure.
10851 * @vectors: number of msix vectors allocated.
10853 * The routine will figure out the CPU affinity assignment for every
10854 * MSI-X vector allocated for the HBA.
10855 * In addition, the CPU to IO channel mapping will be calculated
10856 * and the phba->sli4_hba.cpu_map array will reflect this.
10859 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10861 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
10862 int max_phys_id, min_phys_id;
10863 int max_core_id, min_core_id;
10864 struct lpfc_vector_map_info *cpup;
10865 struct lpfc_vector_map_info *new_cpup;
10867 struct cpuinfo_x86 *cpuinfo;
10869 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
10870 struct lpfc_hdwq_stat *c_stat;
10874 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
10876 min_core_id = LPFC_VECTOR_MAP_EMPTY;
10878 /* Update CPU map with physical id and core id of each CPU */
10879 for_each_present_cpu(cpu) {
10880 cpup = &phba->sli4_hba.cpu_map[cpu];
10882 cpuinfo = &cpu_data(cpu);
10883 cpup->phys_id = cpuinfo->phys_proc_id;
10884 cpup->core_id = cpuinfo->cpu_core_id;
10885 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
10886 cpup->flag |= LPFC_CPU_MAP_HYPER;
10888 /* No distinction between CPUs for other platforms */
10890 cpup->core_id = cpu;
10893 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10894 "3328 CPU %d physid %d coreid %d flag x%x\n",
10895 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
10897 if (cpup->phys_id > max_phys_id)
10898 max_phys_id = cpup->phys_id;
10899 if (cpup->phys_id < min_phys_id)
10900 min_phys_id = cpup->phys_id;
10902 if (cpup->core_id > max_core_id)
10903 max_core_id = cpup->core_id;
10904 if (cpup->core_id < min_core_id)
10905 min_core_id = cpup->core_id;
10908 /* After looking at each irq vector assigned to this pcidev, its
10909 * possible to see that not ALL CPUs have been accounted for.
10910 * Next we will set any unassigned (unaffinitized) cpu map
10911 * entries to a IRQ on the same phys_id.
10913 first_cpu = cpumask_first(cpu_present_mask);
10914 start_cpu = first_cpu;
10916 for_each_present_cpu(cpu) {
10917 cpup = &phba->sli4_hba.cpu_map[cpu];
10919 /* Is this CPU entry unassigned */
10920 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
10921 /* Mark CPU as IRQ not assigned by the kernel */
10922 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
10924 /* If so, find a new_cpup thats on the the SAME
10925 * phys_id as cpup. start_cpu will start where we
10926 * left off so all unassigned entries don't get assgined
10927 * the IRQ of the first entry.
10929 new_cpu = start_cpu;
10930 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10931 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10932 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
10933 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
10934 (new_cpup->phys_id == cpup->phys_id))
10936 new_cpu = cpumask_next(
10937 new_cpu, cpu_present_mask);
10938 if (new_cpu == nr_cpumask_bits)
10939 new_cpu = first_cpu;
10941 /* At this point, we leave the CPU as unassigned */
10944 /* We found a matching phys_id, so copy the IRQ info */
10945 cpup->eq = new_cpup->eq;
10947 /* Bump start_cpu to the next slot to minmize the
10948 * chance of having multiple unassigned CPU entries
10949 * selecting the same IRQ.
10951 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10952 if (start_cpu == nr_cpumask_bits)
10953 start_cpu = first_cpu;
10955 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10956 "3337 Set Affinity: CPU %d "
10957 "eq %d from peer cpu %d same "
10959 cpu, cpup->eq, new_cpu,
10964 /* Set any unassigned cpu map entries to a IRQ on any phys_id */
10965 start_cpu = first_cpu;
10967 for_each_present_cpu(cpu) {
10968 cpup = &phba->sli4_hba.cpu_map[cpu];
10970 /* Is this entry unassigned */
10971 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
10972 /* Mark it as IRQ not assigned by the kernel */
10973 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
10975 /* If so, find a new_cpup thats on ANY phys_id
10976 * as the cpup. start_cpu will start where we
10977 * left off so all unassigned entries don't get
10978 * assigned the IRQ of the first entry.
10980 new_cpu = start_cpu;
10981 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10982 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10983 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
10984 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
10986 new_cpu = cpumask_next(
10987 new_cpu, cpu_present_mask);
10988 if (new_cpu == nr_cpumask_bits)
10989 new_cpu = first_cpu;
10991 /* We should never leave an entry unassigned */
10992 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10993 "3339 Set Affinity: CPU %d "
10994 "eq %d UNASSIGNED\n",
10995 cpup->hdwq, cpup->eq);
10998 /* We found an available entry, copy the IRQ info */
10999 cpup->eq = new_cpup->eq;
11001 /* Bump start_cpu to the next slot to minmize the
11002 * chance of having multiple unassigned CPU entries
11003 * selecting the same IRQ.
11005 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11006 if (start_cpu == nr_cpumask_bits)
11007 start_cpu = first_cpu;
11009 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11010 "3338 Set Affinity: CPU %d "
11011 "eq %d from peer cpu %d (%d/%d)\n",
11012 cpu, cpup->eq, new_cpu,
11013 new_cpup->phys_id, new_cpup->core_id);
11017 /* Assign hdwq indices that are unique across all cpus in the map
11018 * that are also FIRST_CPUs.
11021 for_each_present_cpu(cpu) {
11022 cpup = &phba->sli4_hba.cpu_map[cpu];
11024 /* Only FIRST IRQs get a hdwq index assignment. */
11025 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11028 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
11031 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11032 "3333 Set Affinity: CPU %d (phys %d core %d): "
11033 "hdwq %d eq %d flg x%x\n",
11034 cpu, cpup->phys_id, cpup->core_id,
11035 cpup->hdwq, cpup->eq, cpup->flag);
11037 /* Associate a hdwq with each cpu_map entry
11038 * This will be 1 to 1 - hdwq to cpu, unless there are less
11039 * hardware queues then CPUs. For that case we will just round-robin
11040 * the available hardware queues as they get assigned to CPUs.
11041 * The next_idx is the idx from the FIRST_CPU loop above to account
11042 * for irq_chann < hdwq. The idx is used for round-robin assignments
11043 * and needs to start at 0.
11048 for_each_present_cpu(cpu) {
11049 cpup = &phba->sli4_hba.cpu_map[cpu];
11051 /* FIRST cpus are already mapped. */
11052 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
11055 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
11056 * of the unassigned cpus to the next idx so that all
11057 * hdw queues are fully utilized.
11059 if (next_idx < phba->cfg_hdw_queue) {
11060 cpup->hdwq = next_idx;
11065 /* Not a First CPU and all hdw_queues are used. Reuse a
11066 * Hardware Queue for another CPU, so be smart about it
11067 * and pick one that has its IRQ/EQ mapped to the same phys_id
11068 * (CPU package) and core_id.
11070 new_cpu = start_cpu;
11071 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11072 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11073 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11074 new_cpup->phys_id == cpup->phys_id &&
11075 new_cpup->core_id == cpup->core_id) {
11078 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11079 if (new_cpu == nr_cpumask_bits)
11080 new_cpu = first_cpu;
11083 /* If we can't match both phys_id and core_id,
11084 * settle for just a phys_id match.
11086 new_cpu = start_cpu;
11087 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11088 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11089 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11090 new_cpup->phys_id == cpup->phys_id)
11093 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11094 if (new_cpu == nr_cpumask_bits)
11095 new_cpu = first_cpu;
11098 /* Otherwise just round robin on cfg_hdw_queue */
11099 cpup->hdwq = idx % phba->cfg_hdw_queue;
11103 /* We found an available entry, copy the IRQ info */
11104 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11105 if (start_cpu == nr_cpumask_bits)
11106 start_cpu = first_cpu;
11107 cpup->hdwq = new_cpup->hdwq;
11109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11110 "3335 Set Affinity: CPU %d (phys %d core %d): "
11111 "hdwq %d eq %d flg x%x\n",
11112 cpu, cpup->phys_id, cpup->core_id,
11113 cpup->hdwq, cpup->eq, cpup->flag);
11117 * Initialize the cpu_map slots for not-present cpus in case
11118 * a cpu is hot-added. Perform a simple hdwq round robin assignment.
11121 for_each_possible_cpu(cpu) {
11122 cpup = &phba->sli4_hba.cpu_map[cpu];
11123 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11124 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
11125 c_stat->hdwq_no = cpup->hdwq;
11127 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
11130 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
11131 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11132 c_stat->hdwq_no = cpup->hdwq;
11134 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11135 "3340 Set Affinity: not present "
11136 "CPU %d hdwq %d\n",
11140 /* The cpu_map array will be used later during initialization
11141 * when EQ / CQ / WQs are allocated and configured.
11147 * lpfc_cpuhp_get_eq
11149 * @phba: pointer to lpfc hba data structure.
11150 * @cpu: cpu going offline
11154 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
11155 struct list_head *eqlist)
11157 const struct cpumask *maskp;
11158 struct lpfc_queue *eq;
11159 struct cpumask *tmp;
11162 tmp = kzalloc(cpumask_size(), GFP_KERNEL);
11166 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11167 maskp = pci_irq_get_affinity(phba->pcidev, idx);
11171 * if irq is not affinitized to the cpu going
11172 * then we don't need to poll the eq attached
11175 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
11177 /* get the cpus that are online and are affini-
11178 * tized to this irq vector. If the count is
11179 * more than 1 then cpuhp is not going to shut-
11180 * down this vector. Since this cpu has not
11181 * gone offline yet, we need >1.
11183 cpumask_and(tmp, maskp, cpu_online_mask);
11184 if (cpumask_weight(tmp) > 1)
11187 /* Now that we have an irq to shutdown, get the eq
11188 * mapped to this irq. Note: multiple hdwq's in
11189 * the software can share an eq, but eventually
11190 * only eq will be mapped to this vector
11192 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
11193 list_add(&eq->_poll_list, eqlist);
11199 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
11201 if (phba->sli_rev != LPFC_SLI_REV4)
11204 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
11207 * unregistering the instance doesn't stop the polling
11208 * timer. Wait for the poll timer to retire.
11211 del_timer_sync(&phba->cpuhp_poll_timer);
11214 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
11216 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
11219 __lpfc_cpuhp_remove(phba);
11222 static void lpfc_cpuhp_add(struct lpfc_hba *phba)
11224 if (phba->sli_rev != LPFC_SLI_REV4)
11229 if (!list_empty(&phba->poll_list))
11230 mod_timer(&phba->cpuhp_poll_timer,
11231 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
11235 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
11239 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
11241 if (phba->pport->load_flag & FC_UNLOADING) {
11246 if (phba->sli_rev != LPFC_SLI_REV4) {
11251 /* proceed with the hotplug */
11256 * lpfc_irq_set_aff - set IRQ affinity
11257 * @eqhdl: EQ handle
11258 * @cpu: cpu to set affinity
11262 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
11264 cpumask_clear(&eqhdl->aff_mask);
11265 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
11266 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11267 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
11271 * lpfc_irq_clear_aff - clear IRQ affinity
11272 * @eqhdl: EQ handle
11276 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
11278 cpumask_clear(&eqhdl->aff_mask);
11279 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11280 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
11284 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
11285 * @phba: pointer to HBA context object.
11286 * @cpu: cpu going offline/online
11287 * @offline: true, cpu is going offline. false, cpu is coming online.
11289 * If cpu is going offline, we'll try our best effort to find the next
11290 * online cpu on the phba's NUMA node and migrate all offlining IRQ affinities.
11292 * If cpu is coming online, reaffinitize the IRQ back to the onlineng cpu.
11294 * Note: Call only if cfg_irq_numa is enabled, otherwise rely on
11295 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
11299 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
11301 struct lpfc_vector_map_info *cpup;
11302 struct cpumask *aff_mask;
11303 unsigned int cpu_select, cpu_next, idx;
11304 const struct cpumask *numa_mask;
11306 if (!phba->cfg_irq_numa)
11309 numa_mask = &phba->sli4_hba.numa_mask;
11311 if (!cpumask_test_cpu(cpu, numa_mask))
11314 cpup = &phba->sli4_hba.cpu_map[cpu];
11316 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11320 /* Find next online CPU on NUMA node */
11321 cpu_next = cpumask_next_wrap(cpu, numa_mask, cpu, true);
11322 cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu_next);
11324 /* Found a valid CPU */
11325 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
11326 /* Go through each eqhdl and ensure offlining
11327 * cpu aff_mask is migrated
11329 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11330 aff_mask = lpfc_get_aff_mask(idx);
11332 /* Migrate affinity */
11333 if (cpumask_test_cpu(cpu, aff_mask))
11334 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
11338 /* Rely on irqbalance if no online CPUs left on NUMA */
11339 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
11340 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
11343 /* Migrate affinity back to this CPU */
11344 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
11348 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
11350 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11351 struct lpfc_queue *eq, *next;
11356 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11360 if (__lpfc_cpuhp_checks(phba, &retval))
11363 lpfc_irq_rebalance(phba, cpu, true);
11365 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
11369 /* start polling on these eq's */
11370 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
11371 list_del_init(&eq->_poll_list);
11372 lpfc_sli4_start_polling(eq);
11378 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
11380 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11381 struct lpfc_queue *eq, *next;
11386 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11390 if (__lpfc_cpuhp_checks(phba, &retval))
11393 lpfc_irq_rebalance(phba, cpu, false);
11395 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
11396 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
11398 lpfc_sli4_stop_polling(eq);
11405 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
11406 * @phba: pointer to lpfc hba data structure.
11408 * This routine is invoked to enable the MSI-X interrupt vectors to device
11409 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
11410 * to cpus on the system.
11412 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
11413 * the number of cpus on the same numa node as this adapter. The vectors are
11414 * allocated without requesting OS affinity mapping. A vector will be
11415 * allocated and assigned to each online and offline cpu. If the cpu is
11416 * online, then affinity will be set to that cpu. If the cpu is offline, then
11417 * affinity will be set to the nearest peer cpu within the numa node that is
11418 * online. If there are no online cpus within the numa node, affinity is not
11419 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
11420 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
11423 * If numa mode is not enabled and there is more than 1 vector allocated, then
11424 * the driver relies on the managed irq interface where the OS assigns vector to
11425 * cpu affinity. The driver will then use that affinity mapping to setup its
11426 * cpu mapping table.
11430 * other values - error
11433 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
11435 int vectors, rc, index;
11437 const struct cpumask *numa_mask = NULL;
11438 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
11439 struct lpfc_hba_eq_hdl *eqhdl;
11440 const struct cpumask *maskp;
11442 unsigned int flags = PCI_IRQ_MSIX;
11444 /* Set up MSI-X multi-message vectors */
11445 vectors = phba->cfg_irq_chann;
11447 if (phba->cfg_irq_numa) {
11448 numa_mask = &phba->sli4_hba.numa_mask;
11449 cpu_cnt = cpumask_weight(numa_mask);
11450 vectors = min(phba->cfg_irq_chann, cpu_cnt);
11452 /* cpu: iterates over numa_mask including offline or online
11453 * cpu_select: iterates over online numa_mask to set affinity
11455 cpu = cpumask_first(numa_mask);
11456 cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
11458 flags |= PCI_IRQ_AFFINITY;
11461 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
11463 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11464 "0484 PCI enable MSI-X failed (%d)\n", rc);
11469 /* Assign MSI-X vectors to interrupt handlers */
11470 for (index = 0; index < vectors; index++) {
11471 eqhdl = lpfc_get_eq_hdl(index);
11472 name = eqhdl->handler_name;
11473 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
11474 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
11475 LPFC_DRIVER_HANDLER_NAME"%d", index);
11477 eqhdl->idx = index;
11478 rc = request_irq(pci_irq_vector(phba->pcidev, index),
11479 &lpfc_sli4_hba_intr_handler, 0,
11482 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11483 "0486 MSI-X fast-path (%d) "
11484 "request_irq failed (%d)\n", index, rc);
11488 eqhdl->irq = pci_irq_vector(phba->pcidev, index);
11490 if (phba->cfg_irq_numa) {
11491 /* If found a neighboring online cpu, set affinity */
11492 if (cpu_select < nr_cpu_ids)
11493 lpfc_irq_set_aff(eqhdl, cpu_select);
11495 /* Assign EQ to cpu_map */
11496 lpfc_assign_eq_map_info(phba, index,
11497 LPFC_CPU_FIRST_IRQ,
11500 /* Iterate to next offline or online cpu in numa_mask */
11501 cpu = cpumask_next(cpu, numa_mask);
11503 /* Find next online cpu in numa_mask to set affinity */
11504 cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
11505 } else if (vectors == 1) {
11506 cpu = cpumask_first(cpu_present_mask);
11507 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
11510 maskp = pci_irq_get_affinity(phba->pcidev, index);
11513 /* Loop through all CPUs associated with vector index */
11514 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
11515 /* If this is the first CPU thats assigned to
11516 * this vector, set LPFC_CPU_FIRST_IRQ.
11518 lpfc_assign_eq_map_info(phba, index,
11520 LPFC_CPU_FIRST_IRQ : 0,
11528 if (vectors != phba->cfg_irq_chann) {
11529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11530 "3238 Reducing IO channels to match number of "
11531 "MSI-X vectors, requested %d got %d\n",
11532 phba->cfg_irq_chann, vectors);
11533 if (phba->cfg_irq_chann > vectors)
11534 phba->cfg_irq_chann = vectors;
11540 /* free the irq already requested */
11541 for (--index; index >= 0; index--) {
11542 eqhdl = lpfc_get_eq_hdl(index);
11543 lpfc_irq_clear_aff(eqhdl);
11544 irq_set_affinity_hint(eqhdl->irq, NULL);
11545 free_irq(eqhdl->irq, eqhdl);
11548 /* Unconfigure MSI-X capability structure */
11549 pci_free_irq_vectors(phba->pcidev);
11556 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
11557 * @phba: pointer to lpfc hba data structure.
11559 * This routine is invoked to enable the MSI interrupt mode to device with
11560 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
11561 * called to enable the MSI vector. The device driver is responsible for
11562 * calling the request_irq() to register MSI vector with a interrupt the
11563 * handler, which is done in this function.
11567 * other values - error
11570 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
11574 struct lpfc_hba_eq_hdl *eqhdl;
11576 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
11577 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
11579 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11580 "0487 PCI enable MSI mode success.\n");
11582 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11583 "0488 PCI enable MSI mode failed (%d)\n", rc);
11584 return rc ? rc : -1;
11587 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11588 0, LPFC_DRIVER_NAME, phba);
11590 pci_free_irq_vectors(phba->pcidev);
11591 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11592 "0490 MSI request_irq failed (%d)\n", rc);
11596 eqhdl = lpfc_get_eq_hdl(0);
11597 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11599 cpu = cpumask_first(cpu_present_mask);
11600 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
11602 for (index = 0; index < phba->cfg_irq_chann; index++) {
11603 eqhdl = lpfc_get_eq_hdl(index);
11604 eqhdl->idx = index;
11611 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
11612 * @phba: pointer to lpfc hba data structure.
11614 * This routine is invoked to enable device interrupt and associate driver's
11615 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
11616 * interface spec. Depends on the interrupt mode configured to the driver,
11617 * the driver will try to fallback from the configured interrupt mode to an
11618 * interrupt mode which is supported by the platform, kernel, and device in
11620 * MSI-X -> MSI -> IRQ.
11624 * other values - error
11627 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
11629 uint32_t intr_mode = LPFC_INTR_ERROR;
11632 if (cfg_mode == 2) {
11633 /* Preparation before conf_msi mbox cmd */
11636 /* Now, try to enable MSI-X interrupt mode */
11637 retval = lpfc_sli4_enable_msix(phba);
11639 /* Indicate initialization to MSI-X mode */
11640 phba->intr_type = MSIX;
11646 /* Fallback to MSI if MSI-X initialization failed */
11647 if (cfg_mode >= 1 && phba->intr_type == NONE) {
11648 retval = lpfc_sli4_enable_msi(phba);
11650 /* Indicate initialization to MSI mode */
11651 phba->intr_type = MSI;
11656 /* Fallback to INTx if both MSI-X/MSI initalization failed */
11657 if (phba->intr_type == NONE) {
11658 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11659 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
11661 struct lpfc_hba_eq_hdl *eqhdl;
11664 /* Indicate initialization to INTx mode */
11665 phba->intr_type = INTx;
11668 eqhdl = lpfc_get_eq_hdl(0);
11669 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11671 cpu = cpumask_first(cpu_present_mask);
11672 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
11674 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11675 eqhdl = lpfc_get_eq_hdl(idx);
11684 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
11685 * @phba: pointer to lpfc hba data structure.
11687 * This routine is invoked to disable device interrupt and disassociate
11688 * the driver's interrupt handler(s) from interrupt vector(s) to device
11689 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
11690 * will release the interrupt vector(s) for the message signaled interrupt.
11693 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
11695 /* Disable the currently initialized interrupt mode */
11696 if (phba->intr_type == MSIX) {
11698 struct lpfc_hba_eq_hdl *eqhdl;
11700 /* Free up MSI-X multi-message vectors */
11701 for (index = 0; index < phba->cfg_irq_chann; index++) {
11702 eqhdl = lpfc_get_eq_hdl(index);
11703 lpfc_irq_clear_aff(eqhdl);
11704 irq_set_affinity_hint(eqhdl->irq, NULL);
11705 free_irq(eqhdl->irq, eqhdl);
11708 free_irq(phba->pcidev->irq, phba);
11711 pci_free_irq_vectors(phba->pcidev);
11713 /* Reset interrupt management states */
11714 phba->intr_type = NONE;
11715 phba->sli.slistat.sli_intr = 0;
11719 * lpfc_unset_hba - Unset SLI3 hba device initialization
11720 * @phba: pointer to lpfc hba data structure.
11722 * This routine is invoked to unset the HBA device initialization steps to
11723 * a device with SLI-3 interface spec.
11726 lpfc_unset_hba(struct lpfc_hba *phba)
11728 struct lpfc_vport *vport = phba->pport;
11729 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11731 spin_lock_irq(shost->host_lock);
11732 vport->load_flag |= FC_UNLOADING;
11733 spin_unlock_irq(shost->host_lock);
11735 kfree(phba->vpi_bmask);
11736 kfree(phba->vpi_ids);
11738 lpfc_stop_hba_timers(phba);
11740 phba->pport->work_port_events = 0;
11742 lpfc_sli_hba_down(phba);
11744 lpfc_sli_brdrestart(phba);
11746 lpfc_sli_disable_intr(phba);
11752 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
11753 * @phba: Pointer to HBA context object.
11755 * This function is called in the SLI4 code path to wait for completion
11756 * of device's XRIs exchange busy. It will check the XRI exchange busy
11757 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
11758 * that, it will check the XRI exchange busy on outstanding FCP and ELS
11759 * I/Os every 30 seconds, log error message, and wait forever. Only when
11760 * all XRI exchange busy complete, the driver unload shall proceed with
11761 * invoking the function reset ioctl mailbox command to the CNA and the
11762 * the rest of the driver unload resource release.
11765 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11767 struct lpfc_sli4_hdw_queue *qp;
11770 int io_xri_cmpl = 1;
11771 int nvmet_xri_cmpl = 1;
11772 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11774 /* Driver just aborted IOs during the hba_unset process. Pause
11775 * here to give the HBA time to complete the IO and get entries
11776 * into the abts lists.
11778 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
11780 /* Wait for NVME pending IO to flush back to transport. */
11781 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11782 lpfc_nvme_wait_for_io_drain(phba);
11785 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11786 qp = &phba->sli4_hba.hdwq[idx];
11787 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
11788 if (!io_xri_cmpl) /* if list is NOT empty */
11794 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11796 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11799 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
11800 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
11801 if (!nvmet_xri_cmpl)
11802 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11803 "6424 NVMET XRI exchange busy "
11804 "wait time: %d seconds.\n",
11807 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11808 "6100 IO XRI exchange busy "
11809 "wait time: %d seconds.\n",
11812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11813 "2878 ELS XRI exchange busy "
11814 "wait time: %d seconds.\n",
11816 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
11817 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
11819 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
11820 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
11824 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11825 qp = &phba->sli4_hba.hdwq[idx];
11826 io_xri_cmpl = list_empty(
11827 &qp->lpfc_abts_io_buf_list);
11828 if (!io_xri_cmpl) /* if list is NOT empty */
11834 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11835 nvmet_xri_cmpl = list_empty(
11836 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11839 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11845 * lpfc_sli4_hba_unset - Unset the fcoe hba
11846 * @phba: Pointer to HBA context object.
11848 * This function is called in the SLI4 code path to reset the HBA's FCoE
11849 * function. The caller is not required to hold any lock. This routine
11850 * issues PCI function reset mailbox command to reset the FCoE function.
11851 * At the end of the function, it calls lpfc_hba_down_post function to
11852 * free any pending commands.
11855 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
11858 LPFC_MBOXQ_t *mboxq;
11859 struct pci_dev *pdev = phba->pcidev;
11861 lpfc_stop_hba_timers(phba);
11863 phba->sli4_hba.intr_enable = 0;
11866 * Gracefully wait out the potential current outstanding asynchronous
11870 /* First, block any pending async mailbox command from posted */
11871 spin_lock_irq(&phba->hbalock);
11872 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
11873 spin_unlock_irq(&phba->hbalock);
11874 /* Now, trying to wait it out if we can */
11875 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11877 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
11880 /* Forcefully release the outstanding mailbox command if timed out */
11881 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11882 spin_lock_irq(&phba->hbalock);
11883 mboxq = phba->sli.mbox_active;
11884 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
11885 __lpfc_mbox_cmpl_put(phba, mboxq);
11886 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11887 phba->sli.mbox_active = NULL;
11888 spin_unlock_irq(&phba->hbalock);
11891 /* Abort all iocbs associated with the hba */
11892 lpfc_sli_hba_iocb_abort(phba);
11894 /* Wait for completion of device XRI exchange busy */
11895 lpfc_sli4_xri_exchange_busy_wait(phba);
11897 /* per-phba callback de-registration for hotplug event */
11898 lpfc_cpuhp_remove(phba);
11900 /* Disable PCI subsystem interrupt */
11901 lpfc_sli4_disable_intr(phba);
11903 /* Disable SR-IOV if enabled */
11904 if (phba->cfg_sriov_nr_virtfn)
11905 pci_disable_sriov(pdev);
11907 /* Stop kthread signal shall trigger work_done one more time */
11908 kthread_stop(phba->worker_thread);
11910 /* Disable FW logging to host memory */
11911 lpfc_ras_stop_fwlog(phba);
11913 /* Unset the queues shared with the hardware then release all
11914 * allocated resources.
11916 lpfc_sli4_queue_unset(phba);
11917 lpfc_sli4_queue_destroy(phba);
11919 /* Reset SLI4 HBA FCoE function */
11920 lpfc_pci_function_reset(phba);
11922 /* Free RAS DMA memory */
11923 if (phba->ras_fwlog.ras_enabled)
11924 lpfc_sli4_ras_dma_free(phba);
11926 /* Stop the SLI4 device port */
11928 phba->pport->work_port_events = 0;
11932 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
11933 * @phba: Pointer to HBA context object.
11934 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
11936 * This function is called in the SLI4 code path to read the port's
11937 * sli4 capabilities.
11939 * This function may be be called from any context that can block-wait
11940 * for the completion. The expectation is that this routine is called
11941 * typically from probe_one or from the online routine.
11944 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11947 struct lpfc_mqe *mqe;
11948 struct lpfc_pc_sli4_params *sli4_params;
11952 mqe = &mboxq->u.mqe;
11954 /* Read the port's SLI4 Parameters port capabilities */
11955 lpfc_pc_sli4_params(mboxq);
11956 if (!phba->sli4_hba.intr_enable)
11957 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11959 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
11960 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11966 sli4_params = &phba->sli4_hba.pc_sli4_params;
11967 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
11968 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
11969 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
11970 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
11971 &mqe->un.sli4_params);
11972 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
11973 &mqe->un.sli4_params);
11974 sli4_params->proto_types = mqe->un.sli4_params.word3;
11975 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
11976 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
11977 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
11978 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
11979 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
11980 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
11981 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
11982 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
11983 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
11984 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
11985 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
11986 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
11987 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
11988 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
11989 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
11990 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
11991 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
11992 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
11993 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
11994 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
11996 /* Make sure that sge_supp_len can be handled by the driver */
11997 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
11998 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
12004 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
12005 * @phba: Pointer to HBA context object.
12006 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
12008 * This function is called in the SLI4 code path to read the port's
12009 * sli4 capabilities.
12011 * This function may be be called from any context that can block-wait
12012 * for the completion. The expectation is that this routine is called
12013 * typically from probe_one or from the online routine.
12016 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
12019 struct lpfc_mqe *mqe = &mboxq->u.mqe;
12020 struct lpfc_pc_sli4_params *sli4_params;
12023 bool exp_wqcq_pages = true;
12024 struct lpfc_sli4_parameters *mbx_sli4_parameters;
12027 * By default, the driver assumes the SLI4 port requires RPI
12028 * header postings. The SLI4_PARAM response will correct this
12031 phba->sli4_hba.rpi_hdrs_in_use = 1;
12033 /* Read the port's SLI4 Config Parameters */
12034 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
12035 sizeof(struct lpfc_sli4_cfg_mhdr));
12036 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
12037 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
12038 length, LPFC_SLI4_MBX_EMBED);
12039 if (!phba->sli4_hba.intr_enable)
12040 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
12042 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
12043 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
12047 sli4_params = &phba->sli4_hba.pc_sli4_params;
12048 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
12049 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
12050 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
12051 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
12052 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
12053 mbx_sli4_parameters);
12054 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
12055 mbx_sli4_parameters);
12056 if (bf_get(cfg_phwq, mbx_sli4_parameters))
12057 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
12059 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
12060 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
12061 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
12062 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
12063 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
12064 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
12065 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
12066 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
12067 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
12068 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
12069 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
12070 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
12071 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
12072 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
12073 mbx_sli4_parameters);
12074 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
12075 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
12076 mbx_sli4_parameters);
12077 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
12078 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
12080 /* Check for Extended Pre-Registered SGL support */
12081 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
12083 /* Check for firmware nvme support */
12084 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
12085 bf_get(cfg_xib, mbx_sli4_parameters));
12088 /* Save this to indicate the Firmware supports NVME */
12089 sli4_params->nvme = 1;
12091 /* Firmware NVME support, check driver FC4 NVME support */
12092 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
12093 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12094 "6133 Disabling NVME support: "
12095 "FC4 type not supported: x%x\n",
12096 phba->cfg_enable_fc4_type);
12100 /* No firmware NVME support, check driver FC4 NVME support */
12101 sli4_params->nvme = 0;
12102 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12103 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
12104 "6101 Disabling NVME support: Not "
12105 "supported by firmware (%d %d) x%x\n",
12106 bf_get(cfg_nvme, mbx_sli4_parameters),
12107 bf_get(cfg_xib, mbx_sli4_parameters),
12108 phba->cfg_enable_fc4_type);
12110 phba->nvme_support = 0;
12111 phba->nvmet_support = 0;
12112 phba->cfg_nvmet_mrq = 0;
12113 phba->cfg_nvme_seg_cnt = 0;
12115 /* If no FC4 type support, move to just SCSI support */
12116 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
12118 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
12122 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
12123 * accommodate 512K and 1M IOs in a single nvme buf.
12125 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
12126 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
12128 /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
12129 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
12130 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
12131 phba->cfg_enable_pbde = 0;
12134 * To support Suppress Response feature we must satisfy 3 conditions.
12135 * lpfc_suppress_rsp module parameter must be set (default).
12136 * In SLI4-Parameters Descriptor:
12137 * Extended Inline Buffers (XIB) must be supported.
12138 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
12139 * (double negative).
12141 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
12142 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
12143 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
12145 phba->cfg_suppress_rsp = 0;
12147 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
12148 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
12150 /* Make sure that sge_supp_len can be handled by the driver */
12151 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
12152 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
12155 * Check whether the adapter supports an embedded copy of the
12156 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
12157 * to use this option, 128-byte WQEs must be used.
12159 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
12160 phba->fcp_embed_io = 1;
12162 phba->fcp_embed_io = 0;
12164 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12165 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
12166 bf_get(cfg_xib, mbx_sli4_parameters),
12167 phba->cfg_enable_pbde,
12168 phba->fcp_embed_io, phba->nvme_support,
12169 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
12171 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
12172 LPFC_SLI_INTF_IF_TYPE_2) &&
12173 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
12174 LPFC_SLI_INTF_FAMILY_LNCR_A0))
12175 exp_wqcq_pages = false;
12177 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
12178 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
12180 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
12181 phba->enab_exp_wqcq_pages = 1;
12183 phba->enab_exp_wqcq_pages = 0;
12185 * Check if the SLI port supports MDS Diagnostics
12187 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
12188 phba->mds_diags_support = 1;
12190 phba->mds_diags_support = 0;
12193 * Check if the SLI port supports NSLER
12195 if (bf_get(cfg_nsler, mbx_sli4_parameters))
12204 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
12205 * @pdev: pointer to PCI device
12206 * @pid: pointer to PCI device identifier
12208 * This routine is to be called to attach a device with SLI-3 interface spec
12209 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
12210 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
12211 * information of the device and driver to see if the driver state that it can
12212 * support this kind of device. If the match is successful, the driver core
12213 * invokes this routine. If this routine determines it can claim the HBA, it
12214 * does all the initialization that it needs to do to handle the HBA properly.
12217 * 0 - driver can claim the device
12218 * negative value - driver can not claim the device
12221 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
12223 struct lpfc_hba *phba;
12224 struct lpfc_vport *vport = NULL;
12225 struct Scsi_Host *shost = NULL;
12227 uint32_t cfg_mode, intr_mode;
12229 /* Allocate memory for HBA structure */
12230 phba = lpfc_hba_alloc(pdev);
12234 /* Perform generic PCI device enabling operation */
12235 error = lpfc_enable_pci_dev(phba);
12237 goto out_free_phba;
12239 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
12240 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
12242 goto out_disable_pci_dev;
12244 /* Set up SLI-3 specific device PCI memory space */
12245 error = lpfc_sli_pci_mem_setup(phba);
12247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12248 "1402 Failed to set up pci memory space.\n");
12249 goto out_disable_pci_dev;
12252 /* Set up SLI-3 specific device driver resources */
12253 error = lpfc_sli_driver_resource_setup(phba);
12255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12256 "1404 Failed to set up driver resource.\n");
12257 goto out_unset_pci_mem_s3;
12260 /* Initialize and populate the iocb list per host */
12262 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
12264 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12265 "1405 Failed to initialize iocb list.\n");
12266 goto out_unset_driver_resource_s3;
12269 /* Set up common device driver resources */
12270 error = lpfc_setup_driver_resource_phase2(phba);
12272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12273 "1406 Failed to set up driver resource.\n");
12274 goto out_free_iocb_list;
12277 /* Get the default values for Model Name and Description */
12278 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
12280 /* Create SCSI host to the physical port */
12281 error = lpfc_create_shost(phba);
12283 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12284 "1407 Failed to create scsi host.\n");
12285 goto out_unset_driver_resource;
12288 /* Configure sysfs attributes */
12289 vport = phba->pport;
12290 error = lpfc_alloc_sysfs_attr(vport);
12292 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12293 "1476 Failed to allocate sysfs attr\n");
12294 goto out_destroy_shost;
12297 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
12298 /* Now, trying to enable interrupt and bring up the device */
12299 cfg_mode = phba->cfg_use_msi;
12301 /* Put device to a known state before enabling interrupt */
12302 lpfc_stop_port(phba);
12303 /* Configure and enable interrupt */
12304 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
12305 if (intr_mode == LPFC_INTR_ERROR) {
12306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12307 "0431 Failed to enable interrupt.\n");
12309 goto out_free_sysfs_attr;
12311 /* SLI-3 HBA setup */
12312 if (lpfc_sli_hba_setup(phba)) {
12313 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12314 "1477 Failed to set up hba\n");
12316 goto out_remove_device;
12319 /* Wait 50ms for the interrupts of previous mailbox commands */
12321 /* Check active interrupts on message signaled interrupts */
12322 if (intr_mode == 0 ||
12323 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
12324 /* Log the current active interrupt mode */
12325 phba->intr_mode = intr_mode;
12326 lpfc_log_intr_mode(phba, intr_mode);
12329 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12330 "0447 Configure interrupt mode (%d) "
12331 "failed active interrupt test.\n",
12333 /* Disable the current interrupt mode */
12334 lpfc_sli_disable_intr(phba);
12335 /* Try next level of interrupt mode */
12336 cfg_mode = --intr_mode;
12340 /* Perform post initialization setup */
12341 lpfc_post_init_setup(phba);
12343 /* Check if there are static vports to be created. */
12344 lpfc_create_static_vport(phba);
12349 lpfc_unset_hba(phba);
12350 out_free_sysfs_attr:
12351 lpfc_free_sysfs_attr(vport);
12353 lpfc_destroy_shost(phba);
12354 out_unset_driver_resource:
12355 lpfc_unset_driver_resource_phase2(phba);
12356 out_free_iocb_list:
12357 lpfc_free_iocb_list(phba);
12358 out_unset_driver_resource_s3:
12359 lpfc_sli_driver_resource_unset(phba);
12360 out_unset_pci_mem_s3:
12361 lpfc_sli_pci_mem_unset(phba);
12362 out_disable_pci_dev:
12363 lpfc_disable_pci_dev(phba);
12365 scsi_host_put(shost);
12367 lpfc_hba_free(phba);
12372 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
12373 * @pdev: pointer to PCI device
12375 * This routine is to be called to disattach a device with SLI-3 interface
12376 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
12377 * removed from PCI bus, it performs all the necessary cleanup for the HBA
12378 * device to be removed from the PCI subsystem properly.
12381 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
12383 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12384 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
12385 struct lpfc_vport **vports;
12386 struct lpfc_hba *phba = vport->phba;
12389 spin_lock_irq(&phba->hbalock);
12390 vport->load_flag |= FC_UNLOADING;
12391 spin_unlock_irq(&phba->hbalock);
12393 lpfc_free_sysfs_attr(vport);
12395 /* Release all the vports against this physical port */
12396 vports = lpfc_create_vport_work_array(phba);
12397 if (vports != NULL)
12398 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
12399 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
12401 fc_vport_terminate(vports[i]->fc_vport);
12403 lpfc_destroy_vport_work_array(phba, vports);
12405 /* Remove FC host and then SCSI host with the physical port */
12406 fc_remove_host(shost);
12407 scsi_remove_host(shost);
12409 lpfc_cleanup(vport);
12412 * Bring down the SLI Layer. This step disable all interrupts,
12413 * clears the rings, discards all mailbox commands, and resets
12417 /* HBA interrupt will be disabled after this call */
12418 lpfc_sli_hba_down(phba);
12419 /* Stop kthread signal shall trigger work_done one more time */
12420 kthread_stop(phba->worker_thread);
12421 /* Final cleanup of txcmplq and reset the HBA */
12422 lpfc_sli_brdrestart(phba);
12424 kfree(phba->vpi_bmask);
12425 kfree(phba->vpi_ids);
12427 lpfc_stop_hba_timers(phba);
12428 spin_lock_irq(&phba->port_list_lock);
12429 list_del_init(&vport->listentry);
12430 spin_unlock_irq(&phba->port_list_lock);
12432 lpfc_debugfs_terminate(vport);
12434 /* Disable SR-IOV if enabled */
12435 if (phba->cfg_sriov_nr_virtfn)
12436 pci_disable_sriov(pdev);
12438 /* Disable interrupt */
12439 lpfc_sli_disable_intr(phba);
12441 scsi_host_put(shost);
12444 * Call scsi_free before mem_free since scsi bufs are released to their
12445 * corresponding pools here.
12447 lpfc_scsi_free(phba);
12448 lpfc_free_iocb_list(phba);
12450 lpfc_mem_free_all(phba);
12452 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
12453 phba->hbqslimp.virt, phba->hbqslimp.phys);
12455 /* Free resources associated with SLI2 interface */
12456 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
12457 phba->slim2p.virt, phba->slim2p.phys);
12459 /* unmap adapter SLIM and Control Registers */
12460 iounmap(phba->ctrl_regs_memmap_p);
12461 iounmap(phba->slim_memmap_p);
12463 lpfc_hba_free(phba);
12465 pci_release_mem_regions(pdev);
12466 pci_disable_device(pdev);
12470 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
12471 * @pdev: pointer to PCI device
12472 * @msg: power management message
12474 * This routine is to be called from the kernel's PCI subsystem to support
12475 * system Power Management (PM) to device with SLI-3 interface spec. When
12476 * PM invokes this method, it quiesces the device by stopping the driver's
12477 * worker thread for the device, turning off device's interrupt and DMA,
12478 * and bring the device offline. Note that as the driver implements the
12479 * minimum PM requirements to a power-aware driver's PM support for the
12480 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
12481 * to the suspend() method call will be treated as SUSPEND and the driver will
12482 * fully reinitialize its device during resume() method call, the driver will
12483 * set device to PCI_D3hot state in PCI config space instead of setting it
12484 * according to the @msg provided by the PM.
12487 * 0 - driver suspended the device
12491 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
12493 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12494 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12496 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12497 "0473 PCI device Power Management suspend.\n");
12499 /* Bring down the device */
12500 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12501 lpfc_offline(phba);
12502 kthread_stop(phba->worker_thread);
12504 /* Disable interrupt from device */
12505 lpfc_sli_disable_intr(phba);
12507 /* Save device state to PCI config space */
12508 pci_save_state(pdev);
12509 pci_set_power_state(pdev, PCI_D3hot);
12515 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
12516 * @pdev: pointer to PCI device
12518 * This routine is to be called from the kernel's PCI subsystem to support
12519 * system Power Management (PM) to device with SLI-3 interface spec. When PM
12520 * invokes this method, it restores the device's PCI config space state and
12521 * fully reinitializes the device and brings it online. Note that as the
12522 * driver implements the minimum PM requirements to a power-aware driver's
12523 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
12524 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
12525 * driver will fully reinitialize its device during resume() method call,
12526 * the device will be set to PCI_D0 directly in PCI config space before
12527 * restoring the state.
12530 * 0 - driver suspended the device
12534 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
12536 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12537 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12538 uint32_t intr_mode;
12541 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12542 "0452 PCI device Power Management resume.\n");
12544 /* Restore device state from PCI config space */
12545 pci_set_power_state(pdev, PCI_D0);
12546 pci_restore_state(pdev);
12549 * As the new kernel behavior of pci_restore_state() API call clears
12550 * device saved_state flag, need to save the restored state again.
12552 pci_save_state(pdev);
12554 if (pdev->is_busmaster)
12555 pci_set_master(pdev);
12557 /* Startup the kernel thread for this host adapter. */
12558 phba->worker_thread = kthread_run(lpfc_do_work, phba,
12559 "lpfc_worker_%d", phba->brd_no);
12560 if (IS_ERR(phba->worker_thread)) {
12561 error = PTR_ERR(phba->worker_thread);
12562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12563 "0434 PM resume failed to start worker "
12564 "thread: error=x%x.\n", error);
12568 /* Configure and enable interrupt */
12569 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12570 if (intr_mode == LPFC_INTR_ERROR) {
12571 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12572 "0430 PM resume Failed to enable interrupt\n");
12575 phba->intr_mode = intr_mode;
12577 /* Restart HBA and bring it online */
12578 lpfc_sli_brdrestart(phba);
12581 /* Log the current active interrupt mode */
12582 lpfc_log_intr_mode(phba, phba->intr_mode);
12588 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
12589 * @phba: pointer to lpfc hba data structure.
12591 * This routine is called to prepare the SLI3 device for PCI slot recover. It
12592 * aborts all the outstanding SCSI I/Os to the pci device.
12595 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
12597 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12598 "2723 PCI channel I/O abort preparing for recovery\n");
12601 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
12602 * and let the SCSI mid-layer to retry them to recover.
12604 lpfc_sli_abort_fcp_rings(phba);
12608 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
12609 * @phba: pointer to lpfc hba data structure.
12611 * This routine is called to prepare the SLI3 device for PCI slot reset. It
12612 * disables the device interrupt and pci device, and aborts the internal FCP
12616 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
12618 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12619 "2710 PCI channel disable preparing for reset\n");
12621 /* Block any management I/Os to the device */
12622 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
12624 /* Block all SCSI devices' I/Os on the host */
12625 lpfc_scsi_dev_block(phba);
12627 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
12628 lpfc_sli_flush_io_rings(phba);
12630 /* stop all timers */
12631 lpfc_stop_hba_timers(phba);
12633 /* Disable interrupt and pci device */
12634 lpfc_sli_disable_intr(phba);
12635 pci_disable_device(phba->pcidev);
12639 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
12640 * @phba: pointer to lpfc hba data structure.
12642 * This routine is called to prepare the SLI3 device for PCI slot permanently
12643 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
12647 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
12649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12650 "2711 PCI channel permanent disable for failure\n");
12651 /* Block all SCSI devices' I/Os on the host */
12652 lpfc_scsi_dev_block(phba);
12654 /* stop all timers */
12655 lpfc_stop_hba_timers(phba);
12657 /* Clean up all driver's outstanding SCSI I/Os */
12658 lpfc_sli_flush_io_rings(phba);
12662 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
12663 * @pdev: pointer to PCI device.
12664 * @state: the current PCI connection state.
12666 * This routine is called from the PCI subsystem for I/O error handling to
12667 * device with SLI-3 interface spec. This function is called by the PCI
12668 * subsystem after a PCI bus error affecting this device has been detected.
12669 * When this function is invoked, it will need to stop all the I/Os and
12670 * interrupt(s) to the device. Once that is done, it will return
12671 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
12675 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
12676 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
12677 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12679 static pci_ers_result_t
12680 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
12682 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12683 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12686 case pci_channel_io_normal:
12687 /* Non-fatal error, prepare for recovery */
12688 lpfc_sli_prep_dev_for_recover(phba);
12689 return PCI_ERS_RESULT_CAN_RECOVER;
12690 case pci_channel_io_frozen:
12691 /* Fatal error, prepare for slot reset */
12692 lpfc_sli_prep_dev_for_reset(phba);
12693 return PCI_ERS_RESULT_NEED_RESET;
12694 case pci_channel_io_perm_failure:
12695 /* Permanent failure, prepare for device down */
12696 lpfc_sli_prep_dev_for_perm_failure(phba);
12697 return PCI_ERS_RESULT_DISCONNECT;
12699 /* Unknown state, prepare and request slot reset */
12700 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12701 "0472 Unknown PCI error state: x%x\n", state);
12702 lpfc_sli_prep_dev_for_reset(phba);
12703 return PCI_ERS_RESULT_NEED_RESET;
12708 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
12709 * @pdev: pointer to PCI device.
12711 * This routine is called from the PCI subsystem for error handling to
12712 * device with SLI-3 interface spec. This is called after PCI bus has been
12713 * reset to restart the PCI card from scratch, as if from a cold-boot.
12714 * During the PCI subsystem error recovery, after driver returns
12715 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
12716 * recovery and then call this routine before calling the .resume method
12717 * to recover the device. This function will initialize the HBA device,
12718 * enable the interrupt, but it will just put the HBA to offline state
12719 * without passing any I/O traffic.
12722 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
12723 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12725 static pci_ers_result_t
12726 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
12728 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12729 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12730 struct lpfc_sli *psli = &phba->sli;
12731 uint32_t intr_mode;
12733 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
12734 if (pci_enable_device_mem(pdev)) {
12735 printk(KERN_ERR "lpfc: Cannot re-enable "
12736 "PCI device after reset.\n");
12737 return PCI_ERS_RESULT_DISCONNECT;
12740 pci_restore_state(pdev);
12743 * As the new kernel behavior of pci_restore_state() API call clears
12744 * device saved_state flag, need to save the restored state again.
12746 pci_save_state(pdev);
12748 if (pdev->is_busmaster)
12749 pci_set_master(pdev);
12751 spin_lock_irq(&phba->hbalock);
12752 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
12753 spin_unlock_irq(&phba->hbalock);
12755 /* Configure and enable interrupt */
12756 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12757 if (intr_mode == LPFC_INTR_ERROR) {
12758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12759 "0427 Cannot re-enable interrupt after "
12761 return PCI_ERS_RESULT_DISCONNECT;
12763 phba->intr_mode = intr_mode;
12765 /* Take device offline, it will perform cleanup */
12766 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12767 lpfc_offline(phba);
12768 lpfc_sli_brdrestart(phba);
12770 /* Log the current active interrupt mode */
12771 lpfc_log_intr_mode(phba, phba->intr_mode);
12773 return PCI_ERS_RESULT_RECOVERED;
12777 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
12778 * @pdev: pointer to PCI device
12780 * This routine is called from the PCI subsystem for error handling to device
12781 * with SLI-3 interface spec. It is called when kernel error recovery tells
12782 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
12783 * error recovery. After this call, traffic can start to flow from this device
12787 lpfc_io_resume_s3(struct pci_dev *pdev)
12789 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12790 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12792 /* Bring device online, it will be no-op for non-fatal error resume */
12797 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
12798 * @phba: pointer to lpfc hba data structure.
12800 * returns the number of ELS/CT IOCBs to reserve
12803 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
12805 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
12807 if (phba->sli_rev == LPFC_SLI_REV4) {
12808 if (max_xri <= 100)
12810 else if (max_xri <= 256)
12812 else if (max_xri <= 512)
12814 else if (max_xri <= 1024)
12816 else if (max_xri <= 1536)
12818 else if (max_xri <= 2048)
12827 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
12828 * @phba: pointer to lpfc hba data structure.
12830 * returns the number of ELS/CT + NVMET IOCBs to reserve
12833 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
12835 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
12837 if (phba->nvmet_support)
12838 max_xri += LPFC_NVMET_BUF_POST;
12844 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
12845 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
12846 const struct firmware *fw)
12850 /* Three cases: (1) FW was not supported on the detected adapter.
12851 * (2) FW update has been locked out administratively.
12852 * (3) Some other error during FW update.
12853 * In each case, an unmaskable message is written to the console
12854 * for admin diagnosis.
12856 if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
12857 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
12858 magic_number != MAGIC_NUMBER_G6) ||
12859 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
12860 magic_number != MAGIC_NUMBER_G7)) {
12861 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12862 "3030 This firmware version is not supported on"
12863 " this HBA model. Device:%x Magic:%x Type:%x "
12864 "ID:%x Size %d %zd\n",
12865 phba->pcidev->device, magic_number, ftype, fid,
12868 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
12869 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12870 "3021 Firmware downloads have been prohibited "
12871 "by a system configuration setting on "
12872 "Device:%x Magic:%x Type:%x ID:%x Size %d "
12874 phba->pcidev->device, magic_number, ftype, fid,
12878 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12879 "3022 FW Download failed. Add Status x%x "
12880 "Device:%x Magic:%x Type:%x ID:%x Size %d "
12882 offset, phba->pcidev->device, magic_number,
12883 ftype, fid, fsize, fw->size);
12890 * lpfc_write_firmware - attempt to write a firmware image to the port
12891 * @fw: pointer to firmware image returned from request_firmware.
12892 * @context: pointer to firmware image returned from request_firmware.
12893 * @ret: return value this routine provides to the caller.
12897 lpfc_write_firmware(const struct firmware *fw, void *context)
12899 struct lpfc_hba *phba = (struct lpfc_hba *)context;
12900 char fwrev[FW_REV_STR_SIZE];
12901 struct lpfc_grp_hdr *image;
12902 struct list_head dma_buffer_list;
12904 struct lpfc_dmabuf *dmabuf, *next;
12905 uint32_t offset = 0, temp_offset = 0;
12906 uint32_t magic_number, ftype, fid, fsize;
12908 /* It can be null in no-wait mode, sanity check */
12913 image = (struct lpfc_grp_hdr *)fw->data;
12915 magic_number = be32_to_cpu(image->magic_number);
12916 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
12917 fid = bf_get_be32(lpfc_grp_hdr_id, image);
12918 fsize = be32_to_cpu(image->size);
12920 INIT_LIST_HEAD(&dma_buffer_list);
12921 lpfc_decode_firmware_rev(phba, fwrev, 1);
12922 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
12923 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12924 "3023 Updating Firmware, Current Version:%s "
12925 "New Version:%s\n",
12926 fwrev, image->revision);
12927 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
12928 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
12934 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
12938 if (!dmabuf->virt) {
12943 list_add_tail(&dmabuf->list, &dma_buffer_list);
12945 while (offset < fw->size) {
12946 temp_offset = offset;
12947 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
12948 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
12949 memcpy(dmabuf->virt,
12950 fw->data + temp_offset,
12951 fw->size - temp_offset);
12952 temp_offset = fw->size;
12955 memcpy(dmabuf->virt, fw->data + temp_offset,
12957 temp_offset += SLI4_PAGE_SIZE;
12959 rc = lpfc_wr_object(phba, &dma_buffer_list,
12960 (fw->size - offset), &offset);
12962 rc = lpfc_log_write_firmware_error(phba, offset,
12973 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12974 "3029 Skipped Firmware update, Current "
12975 "Version:%s New Version:%s\n",
12976 fwrev, image->revision);
12979 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
12980 list_del(&dmabuf->list);
12981 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
12982 dmabuf->virt, dmabuf->phys);
12985 release_firmware(fw);
12988 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12989 "3062 Firmware update error, status %d.\n", rc);
12991 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12992 "3024 Firmware update success: size %d.\n", rc);
12996 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
12997 * @phba: pointer to lpfc hba data structure.
12999 * This routine is called to perform Linux generic firmware upgrade on device
13000 * that supports such feature.
13003 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
13005 uint8_t file_name[ELX_MODEL_NAME_SIZE];
13007 const struct firmware *fw;
13009 /* Only supported on SLI4 interface type 2 for now */
13010 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
13011 LPFC_SLI_INTF_IF_TYPE_2)
13014 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
13016 if (fw_upgrade == INT_FW_UPGRADE) {
13017 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
13018 file_name, &phba->pcidev->dev,
13019 GFP_KERNEL, (void *)phba,
13020 lpfc_write_firmware);
13021 } else if (fw_upgrade == RUN_FW_UPGRADE) {
13022 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
13024 lpfc_write_firmware(fw, (void *)phba);
13033 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
13034 * @pdev: pointer to PCI device
13035 * @pid: pointer to PCI device identifier
13037 * This routine is called from the kernel's PCI subsystem to device with
13038 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
13039 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13040 * information of the device and driver to see if the driver state that it
13041 * can support this kind of device. If the match is successful, the driver
13042 * core invokes this routine. If this routine determines it can claim the HBA,
13043 * it does all the initialization that it needs to do to handle the HBA
13047 * 0 - driver can claim the device
13048 * negative value - driver can not claim the device
13051 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
13053 struct lpfc_hba *phba;
13054 struct lpfc_vport *vport = NULL;
13055 struct Scsi_Host *shost = NULL;
13057 uint32_t cfg_mode, intr_mode;
13059 /* Allocate memory for HBA structure */
13060 phba = lpfc_hba_alloc(pdev);
13064 /* Perform generic PCI device enabling operation */
13065 error = lpfc_enable_pci_dev(phba);
13067 goto out_free_phba;
13069 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
13070 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
13072 goto out_disable_pci_dev;
13074 /* Set up SLI-4 specific device PCI memory space */
13075 error = lpfc_sli4_pci_mem_setup(phba);
13077 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13078 "1410 Failed to set up pci memory space.\n");
13079 goto out_disable_pci_dev;
13082 /* Set up SLI-4 Specific device driver resources */
13083 error = lpfc_sli4_driver_resource_setup(phba);
13085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13086 "1412 Failed to set up driver resource.\n");
13087 goto out_unset_pci_mem_s4;
13090 INIT_LIST_HEAD(&phba->active_rrq_list);
13091 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
13093 /* Set up common device driver resources */
13094 error = lpfc_setup_driver_resource_phase2(phba);
13096 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13097 "1414 Failed to set up driver resource.\n");
13098 goto out_unset_driver_resource_s4;
13101 /* Get the default values for Model Name and Description */
13102 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
13104 /* Now, trying to enable interrupt and bring up the device */
13105 cfg_mode = phba->cfg_use_msi;
13107 /* Put device to a known state before enabling interrupt */
13108 phba->pport = NULL;
13109 lpfc_stop_port(phba);
13111 /* Init cpu_map array */
13112 lpfc_cpu_map_array_init(phba);
13114 /* Init hba_eq_hdl array */
13115 lpfc_hba_eq_hdl_array_init(phba);
13117 /* Configure and enable interrupt */
13118 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
13119 if (intr_mode == LPFC_INTR_ERROR) {
13120 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13121 "0426 Failed to enable interrupt.\n");
13123 goto out_unset_driver_resource;
13125 /* Default to single EQ for non-MSI-X */
13126 if (phba->intr_type != MSIX) {
13127 phba->cfg_irq_chann = 1;
13128 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13129 if (phba->nvmet_support)
13130 phba->cfg_nvmet_mrq = 1;
13133 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
13135 /* Create SCSI host to the physical port */
13136 error = lpfc_create_shost(phba);
13138 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13139 "1415 Failed to create scsi host.\n");
13140 goto out_disable_intr;
13142 vport = phba->pport;
13143 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
13145 /* Configure sysfs attributes */
13146 error = lpfc_alloc_sysfs_attr(vport);
13148 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13149 "1416 Failed to allocate sysfs attr\n");
13150 goto out_destroy_shost;
13153 /* Set up SLI-4 HBA */
13154 if (lpfc_sli4_hba_setup(phba)) {
13155 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13156 "1421 Failed to set up hba\n");
13158 goto out_free_sysfs_attr;
13161 /* Log the current active interrupt mode */
13162 phba->intr_mode = intr_mode;
13163 lpfc_log_intr_mode(phba, intr_mode);
13165 /* Perform post initialization setup */
13166 lpfc_post_init_setup(phba);
13168 /* NVME support in FW earlier in the driver load corrects the
13169 * FC4 type making a check for nvme_support unnecessary.
13171 if (phba->nvmet_support == 0) {
13172 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13173 /* Create NVME binding with nvme_fc_transport. This
13174 * ensures the vport is initialized. If the localport
13175 * create fails, it should not unload the driver to
13176 * support field issues.
13178 error = lpfc_nvme_create_localport(vport);
13180 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13181 "6004 NVME registration "
13182 "failed, error x%x\n",
13188 /* check for firmware upgrade or downgrade */
13189 if (phba->cfg_request_firmware_upgrade)
13190 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
13192 /* Check if there are static vports to be created. */
13193 lpfc_create_static_vport(phba);
13195 /* Enable RAS FW log support */
13196 lpfc_sli4_ras_setup(phba);
13198 INIT_LIST_HEAD(&phba->poll_list);
13199 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
13200 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
13204 out_free_sysfs_attr:
13205 lpfc_free_sysfs_attr(vport);
13207 lpfc_destroy_shost(phba);
13209 lpfc_sli4_disable_intr(phba);
13210 out_unset_driver_resource:
13211 lpfc_unset_driver_resource_phase2(phba);
13212 out_unset_driver_resource_s4:
13213 lpfc_sli4_driver_resource_unset(phba);
13214 out_unset_pci_mem_s4:
13215 lpfc_sli4_pci_mem_unset(phba);
13216 out_disable_pci_dev:
13217 lpfc_disable_pci_dev(phba);
13219 scsi_host_put(shost);
13221 lpfc_hba_free(phba);
13226 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
13227 * @pdev: pointer to PCI device
13229 * This routine is called from the kernel's PCI subsystem to device with
13230 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
13231 * removed from PCI bus, it performs all the necessary cleanup for the HBA
13232 * device to be removed from the PCI subsystem properly.
13235 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
13237 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13238 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
13239 struct lpfc_vport **vports;
13240 struct lpfc_hba *phba = vport->phba;
13243 /* Mark the device unloading flag */
13244 spin_lock_irq(&phba->hbalock);
13245 vport->load_flag |= FC_UNLOADING;
13246 spin_unlock_irq(&phba->hbalock);
13248 /* Free the HBA sysfs attributes */
13249 lpfc_free_sysfs_attr(vport);
13251 /* Release all the vports against this physical port */
13252 vports = lpfc_create_vport_work_array(phba);
13253 if (vports != NULL)
13254 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
13255 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
13257 fc_vport_terminate(vports[i]->fc_vport);
13259 lpfc_destroy_vport_work_array(phba, vports);
13261 /* Remove FC host and then SCSI host with the physical port */
13262 fc_remove_host(shost);
13263 scsi_remove_host(shost);
13265 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
13266 * localports are destroyed after to cleanup all transport memory.
13268 lpfc_cleanup(vport);
13269 lpfc_nvmet_destroy_targetport(phba);
13270 lpfc_nvme_destroy_localport(vport);
13272 /* De-allocate multi-XRI pools */
13273 if (phba->cfg_xri_rebalancing)
13274 lpfc_destroy_multixri_pools(phba);
13277 * Bring down the SLI Layer. This step disables all interrupts,
13278 * clears the rings, discards all mailbox commands, and resets
13279 * the HBA FCoE function.
13281 lpfc_debugfs_terminate(vport);
13283 lpfc_stop_hba_timers(phba);
13284 spin_lock_irq(&phba->port_list_lock);
13285 list_del_init(&vport->listentry);
13286 spin_unlock_irq(&phba->port_list_lock);
13288 /* Perform scsi free before driver resource_unset since scsi
13289 * buffers are released to their corresponding pools here.
13291 lpfc_io_free(phba);
13292 lpfc_free_iocb_list(phba);
13293 lpfc_sli4_hba_unset(phba);
13295 lpfc_unset_driver_resource_phase2(phba);
13296 lpfc_sli4_driver_resource_unset(phba);
13298 /* Unmap adapter Control and Doorbell registers */
13299 lpfc_sli4_pci_mem_unset(phba);
13301 /* Release PCI resources and disable device's PCI function */
13302 scsi_host_put(shost);
13303 lpfc_disable_pci_dev(phba);
13305 /* Finally, free the driver's device data structure */
13306 lpfc_hba_free(phba);
13312 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
13313 * @pdev: pointer to PCI device
13314 * @msg: power management message
13316 * This routine is called from the kernel's PCI subsystem to support system
13317 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
13318 * this method, it quiesces the device by stopping the driver's worker
13319 * thread for the device, turning off device's interrupt and DMA, and bring
13320 * the device offline. Note that as the driver implements the minimum PM
13321 * requirements to a power-aware driver's PM support for suspend/resume -- all
13322 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
13323 * method call will be treated as SUSPEND and the driver will fully
13324 * reinitialize its device during resume() method call, the driver will set
13325 * device to PCI_D3hot state in PCI config space instead of setting it
13326 * according to the @msg provided by the PM.
13329 * 0 - driver suspended the device
13333 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
13335 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13336 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13338 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13339 "2843 PCI device Power Management suspend.\n");
13341 /* Bring down the device */
13342 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13343 lpfc_offline(phba);
13344 kthread_stop(phba->worker_thread);
13346 /* Disable interrupt from device */
13347 lpfc_sli4_disable_intr(phba);
13348 lpfc_sli4_queue_destroy(phba);
13350 /* Save device state to PCI config space */
13351 pci_save_state(pdev);
13352 pci_set_power_state(pdev, PCI_D3hot);
13358 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
13359 * @pdev: pointer to PCI device
13361 * This routine is called from the kernel's PCI subsystem to support system
13362 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
13363 * this method, it restores the device's PCI config space state and fully
13364 * reinitializes the device and brings it online. Note that as the driver
13365 * implements the minimum PM requirements to a power-aware driver's PM for
13366 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
13367 * to the suspend() method call will be treated as SUSPEND and the driver
13368 * will fully reinitialize its device during resume() method call, the device
13369 * will be set to PCI_D0 directly in PCI config space before restoring the
13373 * 0 - driver suspended the device
13377 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
13379 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13380 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13381 uint32_t intr_mode;
13384 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13385 "0292 PCI device Power Management resume.\n");
13387 /* Restore device state from PCI config space */
13388 pci_set_power_state(pdev, PCI_D0);
13389 pci_restore_state(pdev);
13392 * As the new kernel behavior of pci_restore_state() API call clears
13393 * device saved_state flag, need to save the restored state again.
13395 pci_save_state(pdev);
13397 if (pdev->is_busmaster)
13398 pci_set_master(pdev);
13400 /* Startup the kernel thread for this host adapter. */
13401 phba->worker_thread = kthread_run(lpfc_do_work, phba,
13402 "lpfc_worker_%d", phba->brd_no);
13403 if (IS_ERR(phba->worker_thread)) {
13404 error = PTR_ERR(phba->worker_thread);
13405 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13406 "0293 PM resume failed to start worker "
13407 "thread: error=x%x.\n", error);
13411 /* Configure and enable interrupt */
13412 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13413 if (intr_mode == LPFC_INTR_ERROR) {
13414 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13415 "0294 PM resume Failed to enable interrupt\n");
13418 phba->intr_mode = intr_mode;
13420 /* Restart HBA and bring it online */
13421 lpfc_sli_brdrestart(phba);
13424 /* Log the current active interrupt mode */
13425 lpfc_log_intr_mode(phba, phba->intr_mode);
13431 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
13432 * @phba: pointer to lpfc hba data structure.
13434 * This routine is called to prepare the SLI4 device for PCI slot recover. It
13435 * aborts all the outstanding SCSI I/Os to the pci device.
13438 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
13440 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13441 "2828 PCI channel I/O abort preparing for recovery\n");
13443 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
13444 * and let the SCSI mid-layer to retry them to recover.
13446 lpfc_sli_abort_fcp_rings(phba);
13450 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
13451 * @phba: pointer to lpfc hba data structure.
13453 * This routine is called to prepare the SLI4 device for PCI slot reset. It
13454 * disables the device interrupt and pci device, and aborts the internal FCP
13458 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
13460 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13461 "2826 PCI channel disable preparing for reset\n");
13463 /* Block any management I/Os to the device */
13464 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
13466 /* Block all SCSI devices' I/Os on the host */
13467 lpfc_scsi_dev_block(phba);
13469 /* Flush all driver's outstanding I/Os as we are to reset */
13470 lpfc_sli_flush_io_rings(phba);
13472 /* stop all timers */
13473 lpfc_stop_hba_timers(phba);
13475 /* Disable interrupt and pci device */
13476 lpfc_sli4_disable_intr(phba);
13477 lpfc_sli4_queue_destroy(phba);
13478 pci_disable_device(phba->pcidev);
13482 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
13483 * @phba: pointer to lpfc hba data structure.
13485 * This routine is called to prepare the SLI4 device for PCI slot permanently
13486 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
13490 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
13492 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13493 "2827 PCI channel permanent disable for failure\n");
13495 /* Block all SCSI devices' I/Os on the host */
13496 lpfc_scsi_dev_block(phba);
13498 /* stop all timers */
13499 lpfc_stop_hba_timers(phba);
13501 /* Clean up all driver's outstanding I/Os */
13502 lpfc_sli_flush_io_rings(phba);
13506 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
13507 * @pdev: pointer to PCI device.
13508 * @state: the current PCI connection state.
13510 * This routine is called from the PCI subsystem for error handling to device
13511 * with SLI-4 interface spec. This function is called by the PCI subsystem
13512 * after a PCI bus error affecting this device has been detected. When this
13513 * function is invoked, it will need to stop all the I/Os and interrupt(s)
13514 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
13515 * for the PCI subsystem to perform proper recovery as desired.
13518 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13519 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13521 static pci_ers_result_t
13522 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
13524 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13525 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13528 case pci_channel_io_normal:
13529 /* Non-fatal error, prepare for recovery */
13530 lpfc_sli4_prep_dev_for_recover(phba);
13531 return PCI_ERS_RESULT_CAN_RECOVER;
13532 case pci_channel_io_frozen:
13533 /* Fatal error, prepare for slot reset */
13534 lpfc_sli4_prep_dev_for_reset(phba);
13535 return PCI_ERS_RESULT_NEED_RESET;
13536 case pci_channel_io_perm_failure:
13537 /* Permanent failure, prepare for device down */
13538 lpfc_sli4_prep_dev_for_perm_failure(phba);
13539 return PCI_ERS_RESULT_DISCONNECT;
13541 /* Unknown state, prepare and request slot reset */
13542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13543 "2825 Unknown PCI error state: x%x\n", state);
13544 lpfc_sli4_prep_dev_for_reset(phba);
13545 return PCI_ERS_RESULT_NEED_RESET;
13550 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
13551 * @pdev: pointer to PCI device.
13553 * This routine is called from the PCI subsystem for error handling to device
13554 * with SLI-4 interface spec. It is called after PCI bus has been reset to
13555 * restart the PCI card from scratch, as if from a cold-boot. During the
13556 * PCI subsystem error recovery, after the driver returns
13557 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
13558 * recovery and then call this routine before calling the .resume method to
13559 * recover the device. This function will initialize the HBA device, enable
13560 * the interrupt, but it will just put the HBA to offline state without
13561 * passing any I/O traffic.
13564 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
13565 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13567 static pci_ers_result_t
13568 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
13570 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13571 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13572 struct lpfc_sli *psli = &phba->sli;
13573 uint32_t intr_mode;
13575 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
13576 if (pci_enable_device_mem(pdev)) {
13577 printk(KERN_ERR "lpfc: Cannot re-enable "
13578 "PCI device after reset.\n");
13579 return PCI_ERS_RESULT_DISCONNECT;
13582 pci_restore_state(pdev);
13585 * As the new kernel behavior of pci_restore_state() API call clears
13586 * device saved_state flag, need to save the restored state again.
13588 pci_save_state(pdev);
13590 if (pdev->is_busmaster)
13591 pci_set_master(pdev);
13593 spin_lock_irq(&phba->hbalock);
13594 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
13595 spin_unlock_irq(&phba->hbalock);
13597 /* Configure and enable interrupt */
13598 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13599 if (intr_mode == LPFC_INTR_ERROR) {
13600 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13601 "2824 Cannot re-enable interrupt after "
13603 return PCI_ERS_RESULT_DISCONNECT;
13605 phba->intr_mode = intr_mode;
13607 /* Log the current active interrupt mode */
13608 lpfc_log_intr_mode(phba, phba->intr_mode);
13610 return PCI_ERS_RESULT_RECOVERED;
13614 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
13615 * @pdev: pointer to PCI device
13617 * This routine is called from the PCI subsystem for error handling to device
13618 * with SLI-4 interface spec. It is called when kernel error recovery tells
13619 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
13620 * error recovery. After this call, traffic can start to flow from this device
13624 lpfc_io_resume_s4(struct pci_dev *pdev)
13626 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13627 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13630 * In case of slot reset, as function reset is performed through
13631 * mailbox command which needs DMA to be enabled, this operation
13632 * has to be moved to the io resume phase. Taking device offline
13633 * will perform the necessary cleanup.
13635 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
13636 /* Perform device reset */
13637 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13638 lpfc_offline(phba);
13639 lpfc_sli_brdrestart(phba);
13640 /* Bring the device back online */
13646 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
13647 * @pdev: pointer to PCI device
13648 * @pid: pointer to PCI device identifier
13650 * This routine is to be registered to the kernel's PCI subsystem. When an
13651 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
13652 * at PCI device-specific information of the device and driver to see if the
13653 * driver state that it can support this kind of device. If the match is
13654 * successful, the driver core invokes this routine. This routine dispatches
13655 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
13656 * do all the initialization that it needs to do to handle the HBA device
13660 * 0 - driver can claim the device
13661 * negative value - driver can not claim the device
13664 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
13667 struct lpfc_sli_intf intf;
13669 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
13672 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
13673 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
13674 rc = lpfc_pci_probe_one_s4(pdev, pid);
13676 rc = lpfc_pci_probe_one_s3(pdev, pid);
13682 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
13683 * @pdev: pointer to PCI device
13685 * This routine is to be registered to the kernel's PCI subsystem. When an
13686 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
13687 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
13688 * remove routine, which will perform all the necessary cleanup for the
13689 * device to be removed from the PCI subsystem properly.
13692 lpfc_pci_remove_one(struct pci_dev *pdev)
13694 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13695 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13697 switch (phba->pci_dev_grp) {
13698 case LPFC_PCI_DEV_LP:
13699 lpfc_pci_remove_one_s3(pdev);
13701 case LPFC_PCI_DEV_OC:
13702 lpfc_pci_remove_one_s4(pdev);
13705 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13706 "1424 Invalid PCI device group: 0x%x\n",
13707 phba->pci_dev_grp);
13714 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
13715 * @pdev: pointer to PCI device
13716 * @msg: power management message
13718 * This routine is to be registered to the kernel's PCI subsystem to support
13719 * system Power Management (PM). When PM invokes this method, it dispatches
13720 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
13721 * suspend the device.
13724 * 0 - driver suspended the device
13728 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
13730 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13731 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13734 switch (phba->pci_dev_grp) {
13735 case LPFC_PCI_DEV_LP:
13736 rc = lpfc_pci_suspend_one_s3(pdev, msg);
13738 case LPFC_PCI_DEV_OC:
13739 rc = lpfc_pci_suspend_one_s4(pdev, msg);
13742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13743 "1425 Invalid PCI device group: 0x%x\n",
13744 phba->pci_dev_grp);
13751 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
13752 * @pdev: pointer to PCI device
13754 * This routine is to be registered to the kernel's PCI subsystem to support
13755 * system Power Management (PM). When PM invokes this method, it dispatches
13756 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
13757 * resume the device.
13760 * 0 - driver suspended the device
13764 lpfc_pci_resume_one(struct pci_dev *pdev)
13766 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13767 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13770 switch (phba->pci_dev_grp) {
13771 case LPFC_PCI_DEV_LP:
13772 rc = lpfc_pci_resume_one_s3(pdev);
13774 case LPFC_PCI_DEV_OC:
13775 rc = lpfc_pci_resume_one_s4(pdev);
13778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13779 "1426 Invalid PCI device group: 0x%x\n",
13780 phba->pci_dev_grp);
13787 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
13788 * @pdev: pointer to PCI device.
13789 * @state: the current PCI connection state.
13791 * This routine is registered to the PCI subsystem for error handling. This
13792 * function is called by the PCI subsystem after a PCI bus error affecting
13793 * this device has been detected. When this routine is invoked, it dispatches
13794 * the action to the proper SLI-3 or SLI-4 device error detected handling
13795 * routine, which will perform the proper error detected operation.
13798 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13799 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13801 static pci_ers_result_t
13802 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
13804 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13805 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13806 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13808 switch (phba->pci_dev_grp) {
13809 case LPFC_PCI_DEV_LP:
13810 rc = lpfc_io_error_detected_s3(pdev, state);
13812 case LPFC_PCI_DEV_OC:
13813 rc = lpfc_io_error_detected_s4(pdev, state);
13816 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13817 "1427 Invalid PCI device group: 0x%x\n",
13818 phba->pci_dev_grp);
13825 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
13826 * @pdev: pointer to PCI device.
13828 * This routine is registered to the PCI subsystem for error handling. This
13829 * function is called after PCI bus has been reset to restart the PCI card
13830 * from scratch, as if from a cold-boot. When this routine is invoked, it
13831 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
13832 * routine, which will perform the proper device reset.
13835 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
13836 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13838 static pci_ers_result_t
13839 lpfc_io_slot_reset(struct pci_dev *pdev)
13841 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13842 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13843 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13845 switch (phba->pci_dev_grp) {
13846 case LPFC_PCI_DEV_LP:
13847 rc = lpfc_io_slot_reset_s3(pdev);
13849 case LPFC_PCI_DEV_OC:
13850 rc = lpfc_io_slot_reset_s4(pdev);
13853 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13854 "1428 Invalid PCI device group: 0x%x\n",
13855 phba->pci_dev_grp);
13862 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
13863 * @pdev: pointer to PCI device
13865 * This routine is registered to the PCI subsystem for error handling. It
13866 * is called when kernel error recovery tells the lpfc driver that it is
13867 * OK to resume normal PCI operation after PCI bus error recovery. When
13868 * this routine is invoked, it dispatches the action to the proper SLI-3
13869 * or SLI-4 device io_resume routine, which will resume the device operation.
13872 lpfc_io_resume(struct pci_dev *pdev)
13874 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13875 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13877 switch (phba->pci_dev_grp) {
13878 case LPFC_PCI_DEV_LP:
13879 lpfc_io_resume_s3(pdev);
13881 case LPFC_PCI_DEV_OC:
13882 lpfc_io_resume_s4(pdev);
13885 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13886 "1429 Invalid PCI device group: 0x%x\n",
13887 phba->pci_dev_grp);
13894 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
13895 * @phba: pointer to lpfc hba data structure.
13897 * This routine checks to see if OAS is supported for this adapter. If
13898 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
13899 * the enable oas flag is cleared and the pool created for OAS device data
13904 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
13907 if (!phba->cfg_EnableXLane)
13910 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
13914 mempool_destroy(phba->device_data_mem_pool);
13915 phba->device_data_mem_pool = NULL;
13922 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
13923 * @phba: pointer to lpfc hba data structure.
13925 * This routine checks to see if RAS is supported by the adapter. Check the
13926 * function through which RAS support enablement is to be done.
13929 lpfc_sli4_ras_init(struct lpfc_hba *phba)
13931 switch (phba->pcidev->device) {
13932 case PCI_DEVICE_ID_LANCER_G6_FC:
13933 case PCI_DEVICE_ID_LANCER_G7_FC:
13934 phba->ras_fwlog.ras_hwsupport = true;
13935 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
13936 phba->cfg_ras_fwlog_buffsize)
13937 phba->ras_fwlog.ras_enabled = true;
13939 phba->ras_fwlog.ras_enabled = false;
13942 phba->ras_fwlog.ras_hwsupport = false;
13947 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
13949 static const struct pci_error_handlers lpfc_err_handler = {
13950 .error_detected = lpfc_io_error_detected,
13951 .slot_reset = lpfc_io_slot_reset,
13952 .resume = lpfc_io_resume,
13955 static struct pci_driver lpfc_driver = {
13956 .name = LPFC_DRIVER_NAME,
13957 .id_table = lpfc_id_table,
13958 .probe = lpfc_pci_probe_one,
13959 .remove = lpfc_pci_remove_one,
13960 .shutdown = lpfc_pci_remove_one,
13961 .suspend = lpfc_pci_suspend_one,
13962 .resume = lpfc_pci_resume_one,
13963 .err_handler = &lpfc_err_handler,
13966 static const struct file_operations lpfc_mgmt_fop = {
13967 .owner = THIS_MODULE,
13970 static struct miscdevice lpfc_mgmt_dev = {
13971 .minor = MISC_DYNAMIC_MINOR,
13972 .name = "lpfcmgmt",
13973 .fops = &lpfc_mgmt_fop,
13977 * lpfc_init - lpfc module initialization routine
13979 * This routine is to be invoked when the lpfc module is loaded into the
13980 * kernel. The special kernel macro module_init() is used to indicate the
13981 * role of this routine to the kernel as lpfc module entry point.
13985 * -ENOMEM - FC attach transport failed
13986 * all others - failed
13993 printk(LPFC_MODULE_DESC "\n");
13994 printk(LPFC_COPYRIGHT "\n");
13996 error = misc_register(&lpfc_mgmt_dev);
13998 printk(KERN_ERR "Could not register lpfcmgmt device, "
13999 "misc_register returned with status %d", error);
14001 lpfc_transport_functions.vport_create = lpfc_vport_create;
14002 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
14003 lpfc_transport_template =
14004 fc_attach_transport(&lpfc_transport_functions);
14005 if (lpfc_transport_template == NULL)
14007 lpfc_vport_transport_template =
14008 fc_attach_transport(&lpfc_vport_transport_functions);
14009 if (lpfc_vport_transport_template == NULL) {
14010 fc_release_transport(lpfc_transport_template);
14013 lpfc_nvme_cmd_template();
14014 lpfc_nvmet_cmd_template();
14016 /* Initialize in case vector mapping is needed */
14017 lpfc_present_cpu = num_present_cpus();
14019 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
14020 "lpfc/sli4:online",
14021 lpfc_cpu_online, lpfc_cpu_offline);
14023 goto cpuhp_failure;
14024 lpfc_cpuhp_state = error;
14026 error = pci_register_driver(&lpfc_driver);
14033 cpuhp_remove_multi_state(lpfc_cpuhp_state);
14035 fc_release_transport(lpfc_transport_template);
14036 fc_release_transport(lpfc_vport_transport_template);
14042 * lpfc_exit - lpfc module removal routine
14044 * This routine is invoked when the lpfc module is removed from the kernel.
14045 * The special kernel macro module_exit() is used to indicate the role of
14046 * this routine to the kernel as lpfc module exit point.
14051 misc_deregister(&lpfc_mgmt_dev);
14052 pci_unregister_driver(&lpfc_driver);
14053 cpuhp_remove_multi_state(lpfc_cpuhp_state);
14054 fc_release_transport(lpfc_transport_template);
14055 fc_release_transport(lpfc_vport_transport_template);
14056 idr_destroy(&lpfc_hba_index);
14059 module_init(lpfc_init);
14060 module_exit(lpfc_exit);
14061 MODULE_LICENSE("GPL");
14062 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
14063 MODULE_AUTHOR("Broadcom");
14064 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);