1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/kthread.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
33 #include <linux/ctype.h>
34 #include <linux/aer.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/miscdevice.h>
38 #include <linux/percpu.h>
39 #include <linux/msi.h>
40 #include <linux/irq.h>
41 #include <linux/bitops.h>
42 #include <linux/crash_dump.h>
43 #include <linux/cpu.h>
44 #include <linux/cpuhotplug.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_host.h>
49 #include <scsi/scsi_transport_fc.h>
50 #include <scsi/scsi_tcq.h>
51 #include <scsi/fc/fc_fs.h>
56 #include "lpfc_sli4.h"
58 #include "lpfc_disc.h"
60 #include "lpfc_scsi.h"
61 #include "lpfc_nvme.h"
62 #include "lpfc_logmsg.h"
63 #include "lpfc_crtn.h"
64 #include "lpfc_vport.h"
65 #include "lpfc_version.h"
68 static enum cpuhp_state lpfc_cpuhp_state;
69 /* Used when mapping IRQ vectors in a driver centric manner */
70 static uint32_t lpfc_present_cpu;
71 static bool lpfc_pldv_detect;
73 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
74 static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
75 static void lpfc_cpuhp_add(struct lpfc_hba *phba);
76 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
77 static int lpfc_post_rcv_buf(struct lpfc_hba *);
78 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
79 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
80 static int lpfc_setup_endian_order(struct lpfc_hba *);
81 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
82 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
83 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
84 static void lpfc_init_sgl_list(struct lpfc_hba *);
85 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
86 static void lpfc_free_active_sgl(struct lpfc_hba *);
87 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
88 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
89 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
90 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
91 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
92 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
93 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
94 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
95 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
96 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
97 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
98 static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
100 static struct scsi_transport_template *lpfc_transport_template = NULL;
101 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
102 static DEFINE_IDR(lpfc_hba_index);
103 #define LPFC_NVMET_BUF_POST 254
104 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
107 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
108 * @phba: pointer to lpfc hba data structure.
110 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
111 * mailbox command. It retrieves the revision information from the HBA and
112 * collects the Vital Product Data (VPD) about the HBA for preparing the
113 * configuration of the HBA.
117 * -ERESTART - requests the SLI layer to reset the HBA and try again.
118 * Any other value - indicates an error.
121 lpfc_config_port_prep(struct lpfc_hba *phba)
123 lpfc_vpd_t *vp = &phba->vpd;
127 char *lpfc_vpd_data = NULL;
129 static char licensed[56] =
130 "key unlock for use with gnu public licensed code only\0";
131 static int init_key = 1;
133 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
135 phba->link_state = LPFC_HBA_ERROR;
140 phba->link_state = LPFC_INIT_MBX_CMDS;
142 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
144 uint32_t *ptext = (uint32_t *) licensed;
146 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
147 *ptext = cpu_to_be32(*ptext);
151 lpfc_read_nv(phba, pmb);
152 memset((char*)mb->un.varRDnvp.rsvd3, 0,
153 sizeof (mb->un.varRDnvp.rsvd3));
154 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
157 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
159 if (rc != MBX_SUCCESS) {
160 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
161 "0324 Config Port initialization "
162 "error, mbxCmd x%x READ_NVPARM, "
164 mb->mbxCommand, mb->mbxStatus);
165 mempool_free(pmb, phba->mbox_mem_pool);
168 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
170 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
175 * Clear all option bits except LPFC_SLI3_BG_ENABLED,
176 * which was already set in lpfc_get_cfgparam()
178 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
180 /* Setup and issue mailbox READ REV command */
181 lpfc_read_rev(phba, pmb);
182 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
183 if (rc != MBX_SUCCESS) {
184 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
185 "0439 Adapter failed to init, mbxCmd x%x "
186 "READ_REV, mbxStatus x%x\n",
187 mb->mbxCommand, mb->mbxStatus);
188 mempool_free( pmb, phba->mbox_mem_pool);
194 * The value of rr must be 1 since the driver set the cv field to 1.
195 * This setting requires the FW to set all revision fields.
197 if (mb->un.varRdRev.rr == 0) {
199 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
200 "0440 Adapter failed to init, READ_REV has "
201 "missing revision information.\n");
202 mempool_free(pmb, phba->mbox_mem_pool);
206 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
207 mempool_free(pmb, phba->mbox_mem_pool);
211 /* Save information as VPD data */
213 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
214 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
215 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
216 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
217 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
218 vp->rev.biuRev = mb->un.varRdRev.biuRev;
219 vp->rev.smRev = mb->un.varRdRev.smRev;
220 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
221 vp->rev.endecRev = mb->un.varRdRev.endecRev;
222 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
223 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
224 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
225 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
226 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
227 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
229 /* If the sli feature level is less then 9, we must
230 * tear down all RPIs and VPIs on link down if NPIV
233 if (vp->rev.feaLevelHigh < 9)
234 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
236 if (lpfc_is_LC_HBA(phba->pcidev->device))
237 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
238 sizeof (phba->RandomData));
240 /* Get adapter VPD information */
241 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
245 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
246 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
248 if (rc != MBX_SUCCESS) {
249 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
250 "0441 VPD not present on adapter, "
251 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
252 mb->mbxCommand, mb->mbxStatus);
253 mb->un.varDmp.word_cnt = 0;
255 /* dump mem may return a zero when finished or we got a
256 * mailbox error, either way we are done.
258 if (mb->un.varDmp.word_cnt == 0)
261 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
262 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
263 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
264 lpfc_vpd_data + offset,
265 mb->un.varDmp.word_cnt);
266 offset += mb->un.varDmp.word_cnt;
267 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
269 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
271 kfree(lpfc_vpd_data);
273 mempool_free(pmb, phba->mbox_mem_pool);
278 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
279 * @phba: pointer to lpfc hba data structure.
280 * @pmboxq: pointer to the driver internal queue element for mailbox command.
282 * This is the completion handler for driver's configuring asynchronous event
283 * mailbox command to the device. If the mailbox command returns successfully,
284 * it will set internal async event support flag to 1; otherwise, it will
285 * set internal async event support flag to 0.
288 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
290 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
291 phba->temp_sensor_support = 1;
293 phba->temp_sensor_support = 0;
294 mempool_free(pmboxq, phba->mbox_mem_pool);
299 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
300 * @phba: pointer to lpfc hba data structure.
301 * @pmboxq: pointer to the driver internal queue element for mailbox command.
303 * This is the completion handler for dump mailbox command for getting
304 * wake up parameters. When this command complete, the response contain
305 * Option rom version of the HBA. This function translate the version number
306 * into a human readable string and store it in OptionROMVersion.
309 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
312 uint32_t prog_id_word;
314 /* character array used for decoding dist type. */
315 char dist_char[] = "nabx";
317 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
318 mempool_free(pmboxq, phba->mbox_mem_pool);
322 prg = (struct prog_id *) &prog_id_word;
324 /* word 7 contain option rom version */
325 prog_id_word = pmboxq->u.mb.un.varWords[7];
327 /* Decode the Option rom version word to a readable string */
329 dist = dist_char[prg->dist];
331 if ((prg->dist == 3) && (prg->num == 0))
332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
333 prg->ver, prg->rev, prg->lev);
335 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
336 prg->ver, prg->rev, prg->lev,
338 mempool_free(pmboxq, phba->mbox_mem_pool);
343 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
344 * @vport: pointer to lpfc vport data structure.
351 lpfc_update_vport_wwn(struct lpfc_vport *vport)
353 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
354 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
357 * If the name is empty or there exists a soft name
358 * then copy the service params name, otherwise use the fc name
360 if (vport->fc_nodename.u.wwn[0] == 0)
361 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
362 sizeof(struct lpfc_name));
364 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
365 sizeof(struct lpfc_name));
368 * If the port name has changed, then set the Param changes flag
371 if (vport->fc_portname.u.wwn[0] != 0 &&
372 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
373 sizeof(struct lpfc_name)))
374 vport->vport_flag |= FAWWPN_PARAM_CHG;
376 if (vport->fc_portname.u.wwn[0] == 0 ||
377 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
378 vport->vport_flag & FAWWPN_SET) {
379 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
380 sizeof(struct lpfc_name));
381 vport->vport_flag &= ~FAWWPN_SET;
382 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
383 vport->vport_flag |= FAWWPN_SET;
386 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
387 sizeof(struct lpfc_name));
391 * lpfc_config_port_post - Perform lpfc initialization after config port
392 * @phba: pointer to lpfc hba data structure.
394 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
395 * command call. It performs all internal resource and state setups on the
396 * port: post IOCB buffers, enable appropriate host interrupt attentions,
397 * ELS ring timers, etc.
401 * Any other value - error.
404 lpfc_config_port_post(struct lpfc_hba *phba)
406 struct lpfc_vport *vport = phba->pport;
407 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
410 struct lpfc_dmabuf *mp;
411 struct lpfc_sli *psli = &phba->sli;
412 uint32_t status, timeout;
416 spin_lock_irq(&phba->hbalock);
418 * If the Config port completed correctly the HBA is not
419 * over heated any more.
421 if (phba->over_temp_state == HBA_OVER_TEMP)
422 phba->over_temp_state = HBA_NORMAL_TEMP;
423 spin_unlock_irq(&phba->hbalock);
425 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
427 phba->link_state = LPFC_HBA_ERROR;
432 /* Get login parameters for NID. */
433 rc = lpfc_read_sparam(phba, pmb, 0);
435 mempool_free(pmb, phba->mbox_mem_pool);
440 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
441 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
442 "0448 Adapter failed init, mbxCmd x%x "
443 "READ_SPARM mbxStatus x%x\n",
444 mb->mbxCommand, mb->mbxStatus);
445 phba->link_state = LPFC_HBA_ERROR;
446 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
447 mempool_free(pmb, phba->mbox_mem_pool);
448 lpfc_mbuf_free(phba, mp->virt, mp->phys);
453 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
455 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
456 lpfc_mbuf_free(phba, mp->virt, mp->phys);
459 lpfc_update_vport_wwn(vport);
461 /* Update the fc_host data structures with new wwn. */
462 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
463 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
464 fc_host_max_npiv_vports(shost) = phba->max_vpi;
466 /* If no serial number in VPD data, use low 6 bytes of WWNN */
467 /* This should be consolidated into parse_vpd ? - mr */
468 if (phba->SerialNumber[0] == 0) {
471 outptr = &vport->fc_nodename.u.s.IEEE[0];
472 for (i = 0; i < 12; i++) {
474 j = ((status & 0xf0) >> 4);
476 phba->SerialNumber[i] =
477 (char)((uint8_t) 0x30 + (uint8_t) j);
479 phba->SerialNumber[i] =
480 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
484 phba->SerialNumber[i] =
485 (char)((uint8_t) 0x30 + (uint8_t) j);
487 phba->SerialNumber[i] =
488 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
492 lpfc_read_config(phba, pmb);
494 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
495 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
496 "0453 Adapter failed to init, mbxCmd x%x "
497 "READ_CONFIG, mbxStatus x%x\n",
498 mb->mbxCommand, mb->mbxStatus);
499 phba->link_state = LPFC_HBA_ERROR;
500 mempool_free( pmb, phba->mbox_mem_pool);
504 /* Check if the port is disabled */
505 lpfc_sli_read_link_ste(phba);
507 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
508 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
509 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
510 "3359 HBA queue depth changed from %d to %d\n",
511 phba->cfg_hba_queue_depth,
512 mb->un.varRdConfig.max_xri);
513 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
516 phba->lmt = mb->un.varRdConfig.lmt;
518 /* Get the default values for Model Name and Description */
519 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
521 phba->link_state = LPFC_LINK_DOWN;
523 /* Only process IOCBs on ELS ring till hba_state is READY */
524 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
525 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
526 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
527 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
529 /* Post receive buffers for desired rings */
530 if (phba->sli_rev != 3)
531 lpfc_post_rcv_buf(phba);
534 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
536 if (phba->intr_type == MSIX) {
537 rc = lpfc_config_msi(phba, pmb);
539 mempool_free(pmb, phba->mbox_mem_pool);
542 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
543 if (rc != MBX_SUCCESS) {
544 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
545 "0352 Config MSI mailbox command "
546 "failed, mbxCmd x%x, mbxStatus x%x\n",
547 pmb->u.mb.mbxCommand,
548 pmb->u.mb.mbxStatus);
549 mempool_free(pmb, phba->mbox_mem_pool);
554 spin_lock_irq(&phba->hbalock);
555 /* Initialize ERATT handling flag */
556 phba->hba_flag &= ~HBA_ERATT_HANDLED;
558 /* Enable appropriate host interrupts */
559 if (lpfc_readl(phba->HCregaddr, &status)) {
560 spin_unlock_irq(&phba->hbalock);
563 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
564 if (psli->num_rings > 0)
565 status |= HC_R0INT_ENA;
566 if (psli->num_rings > 1)
567 status |= HC_R1INT_ENA;
568 if (psli->num_rings > 2)
569 status |= HC_R2INT_ENA;
570 if (psli->num_rings > 3)
571 status |= HC_R3INT_ENA;
573 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
574 (phba->cfg_poll & DISABLE_FCP_RING_INT))
575 status &= ~(HC_R0INT_ENA);
577 writel(status, phba->HCregaddr);
578 readl(phba->HCregaddr); /* flush */
579 spin_unlock_irq(&phba->hbalock);
581 /* Set up ring-0 (ELS) timer */
582 timeout = phba->fc_ratov * 2;
583 mod_timer(&vport->els_tmofunc,
584 jiffies + msecs_to_jiffies(1000 * timeout));
585 /* Set up heart beat (HB) timer */
586 mod_timer(&phba->hb_tmofunc,
587 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
588 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
589 phba->last_completion_time = jiffies;
590 /* Set up error attention (ERATT) polling timer */
591 mod_timer(&phba->eratt_poll,
592 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
594 if (phba->hba_flag & LINK_DISABLED) {
595 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
596 "2598 Adapter Link is disabled.\n");
597 lpfc_down_link(phba, pmb);
598 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
599 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
600 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
601 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
602 "2599 Adapter failed to issue DOWN_LINK"
603 " mbox command rc 0x%x\n", rc);
605 mempool_free(pmb, phba->mbox_mem_pool);
608 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
609 mempool_free(pmb, phba->mbox_mem_pool);
610 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
614 /* MBOX buffer will be freed in mbox compl */
615 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
617 phba->link_state = LPFC_HBA_ERROR;
621 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
622 pmb->mbox_cmpl = lpfc_config_async_cmpl;
623 pmb->vport = phba->pport;
624 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
626 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
627 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
628 "0456 Adapter failed to issue "
629 "ASYNCEVT_ENABLE mbox status x%x\n",
631 mempool_free(pmb, phba->mbox_mem_pool);
634 /* Get Option rom version */
635 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
637 phba->link_state = LPFC_HBA_ERROR;
641 lpfc_dump_wakeup_param(phba, pmb);
642 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
643 pmb->vport = phba->pport;
644 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
646 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
647 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
648 "0435 Adapter failed "
649 "to get Option ROM version status x%x\n", rc);
650 mempool_free(pmb, phba->mbox_mem_pool);
657 * lpfc_sli4_refresh_params - update driver copy of params.
658 * @phba: Pointer to HBA context object.
660 * This is called to refresh driver copy of dynamic fields from the
661 * common_get_sli4_parameters descriptor.
664 lpfc_sli4_refresh_params(struct lpfc_hba *phba)
667 struct lpfc_mqe *mqe;
668 struct lpfc_sli4_parameters *mbx_sli4_parameters;
671 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
676 /* Read the port's SLI4 Config Parameters */
677 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
678 sizeof(struct lpfc_sli4_cfg_mhdr));
679 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
680 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
681 length, LPFC_SLI4_MBX_EMBED);
683 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
685 mempool_free(mboxq, phba->mbox_mem_pool);
688 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
689 phba->sli4_hba.pc_sli4_params.mi_ver =
690 bf_get(cfg_mi_ver, mbx_sli4_parameters);
691 phba->sli4_hba.pc_sli4_params.cmf =
692 bf_get(cfg_cmf, mbx_sli4_parameters);
693 phba->sli4_hba.pc_sli4_params.pls =
694 bf_get(cfg_pvl, mbx_sli4_parameters);
696 mempool_free(mboxq, phba->mbox_mem_pool);
701 * lpfc_hba_init_link - Initialize the FC link
702 * @phba: pointer to lpfc hba data structure.
703 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
705 * This routine will issue the INIT_LINK mailbox command call.
706 * It is available to other drivers through the lpfc_hba data
707 * structure for use as a delayed link up mechanism with the
708 * module parameter lpfc_suppress_link_up.
712 * Any other value - error
715 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
717 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
721 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
722 * @phba: pointer to lpfc hba data structure.
723 * @fc_topology: desired fc topology.
724 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
726 * This routine will issue the INIT_LINK mailbox command call.
727 * It is available to other drivers through the lpfc_hba data
728 * structure for use as a delayed link up mechanism with the
729 * module parameter lpfc_suppress_link_up.
733 * Any other value - error
736 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
739 struct lpfc_vport *vport = phba->pport;
744 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
746 phba->link_state = LPFC_HBA_ERROR;
752 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
753 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
754 !(phba->lmt & LMT_1Gb)) ||
755 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
756 !(phba->lmt & LMT_2Gb)) ||
757 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
758 !(phba->lmt & LMT_4Gb)) ||
759 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
760 !(phba->lmt & LMT_8Gb)) ||
761 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
762 !(phba->lmt & LMT_10Gb)) ||
763 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
764 !(phba->lmt & LMT_16Gb)) ||
765 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
766 !(phba->lmt & LMT_32Gb)) ||
767 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
768 !(phba->lmt & LMT_64Gb))) {
769 /* Reset link speed to auto */
770 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
771 "1302 Invalid speed for this board:%d "
772 "Reset link speed to auto.\n",
773 phba->cfg_link_speed);
774 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
776 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
777 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
778 if (phba->sli_rev < LPFC_SLI_REV4)
779 lpfc_set_loopback_flag(phba);
780 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
781 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
782 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
783 "0498 Adapter failed to init, mbxCmd x%x "
784 "INIT_LINK, mbxStatus x%x\n",
785 mb->mbxCommand, mb->mbxStatus);
786 if (phba->sli_rev <= LPFC_SLI_REV3) {
787 /* Clear all interrupt enable conditions */
788 writel(0, phba->HCregaddr);
789 readl(phba->HCregaddr); /* flush */
790 /* Clear all pending interrupts */
791 writel(0xffffffff, phba->HAregaddr);
792 readl(phba->HAregaddr); /* flush */
794 phba->link_state = LPFC_HBA_ERROR;
795 if (rc != MBX_BUSY || flag == MBX_POLL)
796 mempool_free(pmb, phba->mbox_mem_pool);
799 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
800 if (flag == MBX_POLL)
801 mempool_free(pmb, phba->mbox_mem_pool);
807 * lpfc_hba_down_link - this routine downs the FC link
808 * @phba: pointer to lpfc hba data structure.
809 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
811 * This routine will issue the DOWN_LINK mailbox command call.
812 * It is available to other drivers through the lpfc_hba data
813 * structure for use to stop the link.
817 * Any other value - error
820 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
825 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
827 phba->link_state = LPFC_HBA_ERROR;
831 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
832 "0491 Adapter Link is disabled.\n");
833 lpfc_down_link(phba, pmb);
834 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
835 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
836 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
837 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
838 "2522 Adapter failed to issue DOWN_LINK"
839 " mbox command rc 0x%x\n", rc);
841 mempool_free(pmb, phba->mbox_mem_pool);
844 if (flag == MBX_POLL)
845 mempool_free(pmb, phba->mbox_mem_pool);
851 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
852 * @phba: pointer to lpfc HBA data structure.
854 * This routine will do LPFC uninitialization before the HBA is reset when
855 * bringing down the SLI Layer.
859 * Any other value - error.
862 lpfc_hba_down_prep(struct lpfc_hba *phba)
864 struct lpfc_vport **vports;
867 if (phba->sli_rev <= LPFC_SLI_REV3) {
868 /* Disable interrupts */
869 writel(0, phba->HCregaddr);
870 readl(phba->HCregaddr); /* flush */
873 if (phba->pport->load_flag & FC_UNLOADING)
874 lpfc_cleanup_discovery_resources(phba->pport);
876 vports = lpfc_create_vport_work_array(phba);
878 for (i = 0; i <= phba->max_vports &&
879 vports[i] != NULL; i++)
880 lpfc_cleanup_discovery_resources(vports[i]);
881 lpfc_destroy_vport_work_array(phba, vports);
887 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
888 * rspiocb which got deferred
890 * @phba: pointer to lpfc HBA data structure.
892 * This routine will cleanup completed slow path events after HBA is reset
893 * when bringing down the SLI Layer.
900 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
902 struct lpfc_iocbq *rspiocbq;
903 struct hbq_dmabuf *dmabuf;
904 struct lpfc_cq_event *cq_event;
906 spin_lock_irq(&phba->hbalock);
907 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
908 spin_unlock_irq(&phba->hbalock);
910 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
911 /* Get the response iocb from the head of work queue */
912 spin_lock_irq(&phba->hbalock);
913 list_remove_head(&phba->sli4_hba.sp_queue_event,
914 cq_event, struct lpfc_cq_event, list);
915 spin_unlock_irq(&phba->hbalock);
917 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
918 case CQE_CODE_COMPL_WQE:
919 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
921 lpfc_sli_release_iocbq(phba, rspiocbq);
923 case CQE_CODE_RECEIVE:
924 case CQE_CODE_RECEIVE_V1:
925 dmabuf = container_of(cq_event, struct hbq_dmabuf,
927 lpfc_in_buf_free(phba, &dmabuf->dbuf);
933 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
934 * @phba: pointer to lpfc HBA data structure.
936 * This routine will cleanup posted ELS buffers after the HBA is reset
937 * when bringing down the SLI Layer.
944 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
946 struct lpfc_sli *psli = &phba->sli;
947 struct lpfc_sli_ring *pring;
948 struct lpfc_dmabuf *mp, *next_mp;
952 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
953 lpfc_sli_hbqbuf_free_all(phba);
955 /* Cleanup preposted buffers on the ELS ring */
956 pring = &psli->sli3_ring[LPFC_ELS_RING];
957 spin_lock_irq(&phba->hbalock);
958 list_splice_init(&pring->postbufq, &buflist);
959 spin_unlock_irq(&phba->hbalock);
962 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
965 lpfc_mbuf_free(phba, mp->virt, mp->phys);
969 spin_lock_irq(&phba->hbalock);
970 pring->postbufq_cnt -= count;
971 spin_unlock_irq(&phba->hbalock);
976 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
977 * @phba: pointer to lpfc HBA data structure.
979 * This routine will cleanup the txcmplq after the HBA is reset when bringing
980 * down the SLI Layer.
986 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
988 struct lpfc_sli *psli = &phba->sli;
989 struct lpfc_queue *qp = NULL;
990 struct lpfc_sli_ring *pring;
991 LIST_HEAD(completions);
993 struct lpfc_iocbq *piocb, *next_iocb;
995 if (phba->sli_rev != LPFC_SLI_REV4) {
996 for (i = 0; i < psli->num_rings; i++) {
997 pring = &psli->sli3_ring[i];
998 spin_lock_irq(&phba->hbalock);
999 /* At this point in time the HBA is either reset or DOA
1000 * Nothing should be on txcmplq as it will
1003 list_splice_init(&pring->txcmplq, &completions);
1004 pring->txcmplq_cnt = 0;
1005 spin_unlock_irq(&phba->hbalock);
1007 lpfc_sli_abort_iocb_ring(phba, pring);
1009 /* Cancel all the IOCBs from the completions list */
1010 lpfc_sli_cancel_iocbs(phba, &completions,
1011 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1014 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1018 spin_lock_irq(&pring->ring_lock);
1019 list_for_each_entry_safe(piocb, next_iocb,
1020 &pring->txcmplq, list)
1021 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
1022 list_splice_init(&pring->txcmplq, &completions);
1023 pring->txcmplq_cnt = 0;
1024 spin_unlock_irq(&pring->ring_lock);
1025 lpfc_sli_abort_iocb_ring(phba, pring);
1027 /* Cancel all the IOCBs from the completions list */
1028 lpfc_sli_cancel_iocbs(phba, &completions,
1029 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1033 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1034 * @phba: pointer to lpfc HBA data structure.
1036 * This routine will do uninitialization after the HBA is reset when bring
1037 * down the SLI Layer.
1041 * Any other value - error.
1044 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1046 lpfc_hba_free_post_buf(phba);
1047 lpfc_hba_clean_txcmplq(phba);
1052 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1053 * @phba: pointer to lpfc HBA data structure.
1055 * This routine will do uninitialization after the HBA is reset when bring
1056 * down the SLI Layer.
1060 * Any other value - error.
1063 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1065 struct lpfc_io_buf *psb, *psb_next;
1066 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1067 struct lpfc_sli4_hdw_queue *qp;
1069 LIST_HEAD(nvme_aborts);
1070 LIST_HEAD(nvmet_aborts);
1071 struct lpfc_sglq *sglq_entry = NULL;
1075 lpfc_sli_hbqbuf_free_all(phba);
1076 lpfc_hba_clean_txcmplq(phba);
1078 /* At this point in time the HBA is either reset or DOA. Either
1079 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1080 * on the lpfc_els_sgl_list so that it can either be freed if the
1081 * driver is unloading or reposted if the driver is restarting
1085 /* sgl_list_lock required because worker thread uses this
1088 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1089 list_for_each_entry(sglq_entry,
1090 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1091 sglq_entry->state = SGL_FREED;
1093 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1094 &phba->sli4_hba.lpfc_els_sgl_list);
1097 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1099 /* abts_xxxx_buf_list_lock required because worker thread uses this
1102 spin_lock_irq(&phba->hbalock);
1104 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1105 qp = &phba->sli4_hba.hdwq[idx];
1107 spin_lock(&qp->abts_io_buf_list_lock);
1108 list_splice_init(&qp->lpfc_abts_io_buf_list,
1111 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1113 psb->status = IOSTAT_SUCCESS;
1116 spin_lock(&qp->io_buf_list_put_lock);
1117 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1118 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1119 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1120 qp->abts_scsi_io_bufs = 0;
1121 qp->abts_nvme_io_bufs = 0;
1122 spin_unlock(&qp->io_buf_list_put_lock);
1123 spin_unlock(&qp->abts_io_buf_list_lock);
1125 spin_unlock_irq(&phba->hbalock);
1127 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1128 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1129 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1131 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1132 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1133 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1134 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1138 lpfc_sli4_free_sp_events(phba);
1143 * lpfc_hba_down_post - Wrapper func for hba down post routine
1144 * @phba: pointer to lpfc HBA data structure.
1146 * This routine wraps the actual SLI3 or SLI4 routine for performing
1147 * uninitialization after the HBA is reset when bring down the SLI Layer.
1151 * Any other value - error.
1154 lpfc_hba_down_post(struct lpfc_hba *phba)
1156 return (*phba->lpfc_hba_down_post)(phba);
1160 * lpfc_hb_timeout - The HBA-timer timeout handler
1161 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1163 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1164 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1165 * work-port-events bitmap and the worker thread is notified. This timeout
1166 * event will be used by the worker thread to invoke the actual timeout
1167 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1168 * be performed in the timeout handler and the HBA timeout event bit shall
1169 * be cleared by the worker thread after it has taken the event bitmap out.
1172 lpfc_hb_timeout(struct timer_list *t)
1174 struct lpfc_hba *phba;
1175 uint32_t tmo_posted;
1176 unsigned long iflag;
1178 phba = from_timer(phba, t, hb_tmofunc);
1180 /* Check for heart beat timeout conditions */
1181 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1182 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1184 phba->pport->work_port_events |= WORKER_HB_TMO;
1185 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1187 /* Tell the worker thread there is work to do */
1189 lpfc_worker_wake_up(phba);
1194 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1195 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1197 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1198 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1199 * work-port-events bitmap and the worker thread is notified. This timeout
1200 * event will be used by the worker thread to invoke the actual timeout
1201 * handler routine, lpfc_rrq_handler. Any periodical operations will
1202 * be performed in the timeout handler and the RRQ timeout event bit shall
1203 * be cleared by the worker thread after it has taken the event bitmap out.
1206 lpfc_rrq_timeout(struct timer_list *t)
1208 struct lpfc_hba *phba;
1209 unsigned long iflag;
1211 phba = from_timer(phba, t, rrq_tmr);
1212 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1213 if (!(phba->pport->load_flag & FC_UNLOADING))
1214 phba->hba_flag |= HBA_RRQ_ACTIVE;
1216 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1217 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1219 if (!(phba->pport->load_flag & FC_UNLOADING))
1220 lpfc_worker_wake_up(phba);
1224 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1225 * @phba: pointer to lpfc hba data structure.
1226 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1228 * This is the callback function to the lpfc heart-beat mailbox command.
1229 * If configured, the lpfc driver issues the heart-beat mailbox command to
1230 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1231 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1232 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1233 * heart-beat outstanding state. Once the mailbox command comes back and
1234 * no error conditions detected, the heart-beat mailbox command timer is
1235 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1236 * state is cleared for the next heart-beat. If the timer expired with the
1237 * heart-beat outstanding state set, the driver will put the HBA offline.
1240 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1242 unsigned long drvr_flag;
1244 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1245 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1246 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1248 /* Check and reset heart-beat timer if necessary */
1249 mempool_free(pmboxq, phba->mbox_mem_pool);
1250 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1251 !(phba->link_state == LPFC_HBA_ERROR) &&
1252 !(phba->pport->load_flag & FC_UNLOADING))
1253 mod_timer(&phba->hb_tmofunc,
1255 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1260 * lpfc_idle_stat_delay_work - idle_stat tracking
1262 * This routine tracks per-cq idle_stat and determines polling decisions.
1268 lpfc_idle_stat_delay_work(struct work_struct *work)
1270 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1272 idle_stat_delay_work);
1273 struct lpfc_queue *cq;
1274 struct lpfc_sli4_hdw_queue *hdwq;
1275 struct lpfc_idle_stat *idle_stat;
1276 u32 i, idle_percent;
1277 u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1279 if (phba->pport->load_flag & FC_UNLOADING)
1282 if (phba->link_state == LPFC_HBA_ERROR ||
1283 phba->pport->fc_flag & FC_OFFLINE_MODE ||
1284 phba->cmf_active_mode != LPFC_CFG_OFF)
1287 for_each_present_cpu(i) {
1288 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1291 /* Skip if we've already handled this cq's primary CPU */
1295 idle_stat = &phba->sli4_hba.idle_stat[i];
1297 /* get_cpu_idle_time returns values as running counters. Thus,
1298 * to know the amount for this period, the prior counter values
1299 * need to be subtracted from the current counter values.
1300 * From there, the idle time stat can be calculated as a
1301 * percentage of 100 - the sum of the other consumption times.
1303 wall_idle = get_cpu_idle_time(i, &wall, 1);
1304 diff_idle = wall_idle - idle_stat->prev_idle;
1305 diff_wall = wall - idle_stat->prev_wall;
1307 if (diff_wall <= diff_idle)
1310 busy_time = diff_wall - diff_idle;
1312 idle_percent = div64_u64(100 * busy_time, diff_wall);
1313 idle_percent = 100 - idle_percent;
1315 if (idle_percent < 15)
1316 cq->poll_mode = LPFC_QUEUE_WORK;
1318 cq->poll_mode = LPFC_IRQ_POLL;
1320 idle_stat->prev_idle = wall_idle;
1321 idle_stat->prev_wall = wall;
1325 schedule_delayed_work(&phba->idle_stat_delay_work,
1326 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1330 lpfc_hb_eq_delay_work(struct work_struct *work)
1332 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1333 struct lpfc_hba, eq_delay_work);
1334 struct lpfc_eq_intr_info *eqi, *eqi_new;
1335 struct lpfc_queue *eq, *eq_next;
1336 unsigned char *ena_delay = NULL;
1340 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1343 if (phba->link_state == LPFC_HBA_ERROR ||
1344 phba->pport->fc_flag & FC_OFFLINE_MODE)
1347 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1352 for (i = 0; i < phba->cfg_irq_chann; i++) {
1353 /* Get the EQ corresponding to the IRQ vector */
1354 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1357 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1358 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1359 ena_delay[eq->last_cpu] = 1;
1363 for_each_present_cpu(i) {
1364 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1366 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1367 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1368 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1375 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1376 if (unlikely(eq->last_cpu != i)) {
1377 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1379 list_move_tail(&eq->cpu_list, &eqi_new->list);
1382 if (usdelay != eq->q_mode)
1383 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1391 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1392 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1396 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1397 * @phba: pointer to lpfc hba data structure.
1399 * For each heartbeat, this routine does some heuristic methods to adjust
1400 * XRI distribution. The goal is to fully utilize free XRIs.
1402 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1407 hwq_count = phba->cfg_hdw_queue;
1408 for (i = 0; i < hwq_count; i++) {
1409 /* Adjust XRIs in private pool */
1410 lpfc_adjust_pvt_pool_count(phba, i);
1412 /* Adjust high watermark */
1413 lpfc_adjust_high_watermark(phba, i);
1415 #ifdef LPFC_MXP_STAT
1416 /* Snapshot pbl, pvt and busy count */
1417 lpfc_snapshot_mxp(phba, i);
1423 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1424 * @phba: pointer to lpfc hba data structure.
1426 * If a HB mbox is not already in progrees, this routine will allocate
1427 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
1428 * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
1431 lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1433 LPFC_MBOXQ_t *pmboxq;
1436 /* Is a Heartbeat mbox already in progress */
1437 if (phba->hba_flag & HBA_HBEAT_INP)
1440 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1444 lpfc_heart_beat(phba, pmboxq);
1445 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1446 pmboxq->vport = phba->pport;
1447 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1449 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1450 mempool_free(pmboxq, phba->mbox_mem_pool);
1453 phba->hba_flag |= HBA_HBEAT_INP;
1459 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1460 * @phba: pointer to lpfc hba data structure.
1462 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
1463 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
1464 * of the value of lpfc_enable_hba_heartbeat.
1465 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
1466 * try to issue a MBX_HEARTBEAT mbox command.
1469 lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1471 if (phba->cfg_enable_hba_heartbeat)
1473 phba->hba_flag |= HBA_HBEAT_TMO;
1477 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1478 * @phba: pointer to lpfc hba data structure.
1480 * This is the actual HBA-timer timeout handler to be invoked by the worker
1481 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1482 * handler performs any periodic operations needed for the device. If such
1483 * periodic event has already been attended to either in the interrupt handler
1484 * or by processing slow-ring or fast-ring events within the HBA-timer
1485 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1486 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1487 * is configured and there is no heart-beat mailbox command outstanding, a
1488 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1489 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1493 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1495 struct lpfc_vport **vports;
1496 struct lpfc_dmabuf *buf_ptr;
1499 struct lpfc_sli *psli = &phba->sli;
1500 LIST_HEAD(completions);
1502 if (phba->cfg_xri_rebalancing) {
1503 /* Multi-XRI pools handler */
1504 lpfc_hb_mxp_handler(phba);
1507 vports = lpfc_create_vport_work_array(phba);
1509 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1510 lpfc_rcv_seq_check_edtov(vports[i]);
1511 lpfc_fdmi_change_check(vports[i]);
1513 lpfc_destroy_vport_work_array(phba, vports);
1515 if ((phba->link_state == LPFC_HBA_ERROR) ||
1516 (phba->pport->load_flag & FC_UNLOADING) ||
1517 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1520 if (phba->elsbuf_cnt &&
1521 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1522 spin_lock_irq(&phba->hbalock);
1523 list_splice_init(&phba->elsbuf, &completions);
1524 phba->elsbuf_cnt = 0;
1525 phba->elsbuf_prev_cnt = 0;
1526 spin_unlock_irq(&phba->hbalock);
1528 while (!list_empty(&completions)) {
1529 list_remove_head(&completions, buf_ptr,
1530 struct lpfc_dmabuf, list);
1531 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1535 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1537 /* If there is no heart beat outstanding, issue a heartbeat command */
1538 if (phba->cfg_enable_hba_heartbeat) {
1539 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */
1540 spin_lock_irq(&phba->pport->work_port_lock);
1541 if (time_after(phba->last_completion_time +
1542 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1544 spin_unlock_irq(&phba->pport->work_port_lock);
1545 if (phba->hba_flag & HBA_HBEAT_INP)
1546 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1548 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1551 spin_unlock_irq(&phba->pport->work_port_lock);
1553 /* Check if a MBX_HEARTBEAT is already in progress */
1554 if (phba->hba_flag & HBA_HBEAT_INP) {
1556 * If heart beat timeout called with HBA_HBEAT_INP set
1557 * we need to give the hb mailbox cmd a chance to
1560 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1561 "0459 Adapter heartbeat still outstanding: "
1562 "last compl time was %d ms.\n",
1563 jiffies_to_msecs(jiffies
1564 - phba->last_completion_time));
1565 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1567 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1568 (list_empty(&psli->mboxq))) {
1570 retval = lpfc_issue_hb_mbox(phba);
1572 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1575 phba->skipped_hb = 0;
1576 } else if (time_before_eq(phba->last_completion_time,
1577 phba->skipped_hb)) {
1578 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1579 "2857 Last completion time not "
1580 " updated in %d ms\n",
1581 jiffies_to_msecs(jiffies
1582 - phba->last_completion_time));
1584 phba->skipped_hb = jiffies;
1586 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1590 /* Check to see if we want to force a MBX_HEARTBEAT */
1591 if (phba->hba_flag & HBA_HBEAT_TMO) {
1592 retval = lpfc_issue_hb_mbox(phba);
1594 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1596 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1599 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1602 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1606 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1607 * @phba: pointer to lpfc hba data structure.
1609 * This routine is called to bring the HBA offline when HBA hardware error
1610 * other than Port Error 6 has been detected.
1613 lpfc_offline_eratt(struct lpfc_hba *phba)
1615 struct lpfc_sli *psli = &phba->sli;
1617 spin_lock_irq(&phba->hbalock);
1618 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1619 spin_unlock_irq(&phba->hbalock);
1620 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1623 lpfc_reset_barrier(phba);
1624 spin_lock_irq(&phba->hbalock);
1625 lpfc_sli_brdreset(phba);
1626 spin_unlock_irq(&phba->hbalock);
1627 lpfc_hba_down_post(phba);
1628 lpfc_sli_brdready(phba, HS_MBRDY);
1629 lpfc_unblock_mgmt_io(phba);
1630 phba->link_state = LPFC_HBA_ERROR;
1635 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1636 * @phba: pointer to lpfc hba data structure.
1638 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1639 * other than Port Error 6 has been detected.
1642 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1644 spin_lock_irq(&phba->hbalock);
1645 if (phba->link_state == LPFC_HBA_ERROR &&
1646 test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
1647 spin_unlock_irq(&phba->hbalock);
1650 phba->link_state = LPFC_HBA_ERROR;
1651 spin_unlock_irq(&phba->hbalock);
1653 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1654 lpfc_sli_flush_io_rings(phba);
1656 lpfc_hba_down_post(phba);
1657 lpfc_unblock_mgmt_io(phba);
1661 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1662 * @phba: pointer to lpfc hba data structure.
1664 * This routine is invoked to handle the deferred HBA hardware error
1665 * conditions. This type of error is indicated by HBA by setting ER1
1666 * and another ER bit in the host status register. The driver will
1667 * wait until the ER1 bit clears before handling the error condition.
1670 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1672 uint32_t old_host_status = phba->work_hs;
1673 struct lpfc_sli *psli = &phba->sli;
1675 /* If the pci channel is offline, ignore possible errors,
1676 * since we cannot communicate with the pci card anyway.
1678 if (pci_channel_offline(phba->pcidev)) {
1679 spin_lock_irq(&phba->hbalock);
1680 phba->hba_flag &= ~DEFER_ERATT;
1681 spin_unlock_irq(&phba->hbalock);
1685 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1686 "0479 Deferred Adapter Hardware Error "
1687 "Data: x%x x%x x%x\n",
1688 phba->work_hs, phba->work_status[0],
1689 phba->work_status[1]);
1691 spin_lock_irq(&phba->hbalock);
1692 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1693 spin_unlock_irq(&phba->hbalock);
1697 * Firmware stops when it triggred erratt. That could cause the I/Os
1698 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1699 * SCSI layer retry it after re-establishing link.
1701 lpfc_sli_abort_fcp_rings(phba);
1704 * There was a firmware error. Take the hba offline and then
1705 * attempt to restart it.
1707 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1710 /* Wait for the ER1 bit to clear.*/
1711 while (phba->work_hs & HS_FFER1) {
1713 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1714 phba->work_hs = UNPLUG_ERR ;
1717 /* If driver is unloading let the worker thread continue */
1718 if (phba->pport->load_flag & FC_UNLOADING) {
1725 * This is to ptrotect against a race condition in which
1726 * first write to the host attention register clear the
1727 * host status register.
1729 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1730 phba->work_hs = old_host_status & ~HS_FFER1;
1732 spin_lock_irq(&phba->hbalock);
1733 phba->hba_flag &= ~DEFER_ERATT;
1734 spin_unlock_irq(&phba->hbalock);
1735 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1736 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1740 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1742 struct lpfc_board_event_header board_event;
1743 struct Scsi_Host *shost;
1745 board_event.event_type = FC_REG_BOARD_EVENT;
1746 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1747 shost = lpfc_shost_from_vport(phba->pport);
1748 fc_host_post_vendor_event(shost, fc_get_event_number(),
1749 sizeof(board_event),
1750 (char *) &board_event,
1755 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1756 * @phba: pointer to lpfc hba data structure.
1758 * This routine is invoked to handle the following HBA hardware error
1760 * 1 - HBA error attention interrupt
1761 * 2 - DMA ring index out of range
1762 * 3 - Mailbox command came back as unknown
1765 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1767 struct lpfc_vport *vport = phba->pport;
1768 struct lpfc_sli *psli = &phba->sli;
1769 uint32_t event_data;
1770 unsigned long temperature;
1771 struct temp_event temp_event_data;
1772 struct Scsi_Host *shost;
1774 /* If the pci channel is offline, ignore possible errors,
1775 * since we cannot communicate with the pci card anyway.
1777 if (pci_channel_offline(phba->pcidev)) {
1778 spin_lock_irq(&phba->hbalock);
1779 phba->hba_flag &= ~DEFER_ERATT;
1780 spin_unlock_irq(&phba->hbalock);
1784 /* If resets are disabled then leave the HBA alone and return */
1785 if (!phba->cfg_enable_hba_reset)
1788 /* Send an internal error event to mgmt application */
1789 lpfc_board_errevt_to_mgmt(phba);
1791 if (phba->hba_flag & DEFER_ERATT)
1792 lpfc_handle_deferred_eratt(phba);
1794 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1795 if (phba->work_hs & HS_FFER6)
1796 /* Re-establishing Link */
1797 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1798 "1301 Re-establishing Link "
1799 "Data: x%x x%x x%x\n",
1800 phba->work_hs, phba->work_status[0],
1801 phba->work_status[1]);
1802 if (phba->work_hs & HS_FFER8)
1803 /* Device Zeroization */
1804 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1805 "2861 Host Authentication device "
1806 "zeroization Data:x%x x%x x%x\n",
1807 phba->work_hs, phba->work_status[0],
1808 phba->work_status[1]);
1810 spin_lock_irq(&phba->hbalock);
1811 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1812 spin_unlock_irq(&phba->hbalock);
1815 * Firmware stops when it triggled erratt with HS_FFER6.
1816 * That could cause the I/Os dropped by the firmware.
1817 * Error iocb (I/O) on txcmplq and let the SCSI layer
1818 * retry it after re-establishing link.
1820 lpfc_sli_abort_fcp_rings(phba);
1823 * There was a firmware error. Take the hba offline and then
1824 * attempt to restart it.
1826 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1828 lpfc_sli_brdrestart(phba);
1829 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1830 lpfc_unblock_mgmt_io(phba);
1833 lpfc_unblock_mgmt_io(phba);
1834 } else if (phba->work_hs & HS_CRIT_TEMP) {
1835 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1836 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1837 temp_event_data.event_code = LPFC_CRIT_TEMP;
1838 temp_event_data.data = (uint32_t)temperature;
1840 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1841 "0406 Adapter maximum temperature exceeded "
1842 "(%ld), taking this port offline "
1843 "Data: x%x x%x x%x\n",
1844 temperature, phba->work_hs,
1845 phba->work_status[0], phba->work_status[1]);
1847 shost = lpfc_shost_from_vport(phba->pport);
1848 fc_host_post_vendor_event(shost, fc_get_event_number(),
1849 sizeof(temp_event_data),
1850 (char *) &temp_event_data,
1851 SCSI_NL_VID_TYPE_PCI
1852 | PCI_VENDOR_ID_EMULEX);
1854 spin_lock_irq(&phba->hbalock);
1855 phba->over_temp_state = HBA_OVER_TEMP;
1856 spin_unlock_irq(&phba->hbalock);
1857 lpfc_offline_eratt(phba);
1860 /* The if clause above forces this code path when the status
1861 * failure is a value other than FFER6. Do not call the offline
1862 * twice. This is the adapter hardware error path.
1864 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1865 "0457 Adapter Hardware Error "
1866 "Data: x%x x%x x%x\n",
1868 phba->work_status[0], phba->work_status[1]);
1870 event_data = FC_REG_DUMP_EVENT;
1871 shost = lpfc_shost_from_vport(vport);
1872 fc_host_post_vendor_event(shost, fc_get_event_number(),
1873 sizeof(event_data), (char *) &event_data,
1874 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1876 lpfc_offline_eratt(phba);
1882 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1883 * @phba: pointer to lpfc hba data structure.
1884 * @mbx_action: flag for mailbox shutdown action.
1885 * @en_rn_msg: send reset/port recovery message.
1886 * This routine is invoked to perform an SLI4 port PCI function reset in
1887 * response to port status register polling attention. It waits for port
1888 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1889 * During this process, interrupt vectors are freed and later requested
1890 * for handling possible port resource change.
1893 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1898 LPFC_MBOXQ_t *mboxq;
1900 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1901 LPFC_SLI_INTF_IF_TYPE_2) {
1903 * On error status condition, driver need to wait for port
1904 * ready before performing reset.
1906 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1911 /* need reset: attempt for port recovery */
1913 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1914 "2887 Reset Needed: Attempting Port "
1917 /* If we are no wait, the HBA has been reset and is not
1918 * functional, thus we should clear
1919 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
1921 if (mbx_action == LPFC_MBX_NO_WAIT) {
1922 spin_lock_irq(&phba->hbalock);
1923 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1924 if (phba->sli.mbox_active) {
1925 mboxq = phba->sli.mbox_active;
1926 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1927 __lpfc_mbox_cmpl_put(phba, mboxq);
1928 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1929 phba->sli.mbox_active = NULL;
1931 spin_unlock_irq(&phba->hbalock);
1934 lpfc_offline_prep(phba, mbx_action);
1935 lpfc_sli_flush_io_rings(phba);
1937 /* release interrupt for possible resource change */
1938 lpfc_sli4_disable_intr(phba);
1939 rc = lpfc_sli_brdrestart(phba);
1941 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1942 "6309 Failed to restart board\n");
1945 /* request and enable interrupt */
1946 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1947 if (intr_mode == LPFC_INTR_ERROR) {
1948 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1949 "3175 Failed to enable interrupt\n");
1952 phba->intr_mode = intr_mode;
1953 rc = lpfc_online(phba);
1955 lpfc_unblock_mgmt_io(phba);
1961 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1962 * @phba: pointer to lpfc hba data structure.
1964 * This routine is invoked to handle the SLI4 HBA hardware error attention
1968 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1970 struct lpfc_vport *vport = phba->pport;
1971 uint32_t event_data;
1972 struct Scsi_Host *shost;
1974 struct lpfc_register portstat_reg = {0};
1975 uint32_t reg_err1, reg_err2;
1976 uint32_t uerrlo_reg, uemasklo_reg;
1977 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1978 bool en_rn_msg = true;
1979 struct temp_event temp_event_data;
1980 struct lpfc_register portsmphr_reg;
1983 /* If the pci channel is offline, ignore possible errors, since
1984 * we cannot communicate with the pci card anyway.
1986 if (pci_channel_offline(phba->pcidev)) {
1987 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1988 "3166 pci channel is offline\n");
1989 lpfc_sli_flush_io_rings(phba);
1993 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1994 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1996 case LPFC_SLI_INTF_IF_TYPE_0:
1997 pci_rd_rc1 = lpfc_readl(
1998 phba->sli4_hba.u.if_type0.UERRLOregaddr,
2000 pci_rd_rc2 = lpfc_readl(
2001 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
2003 /* consider PCI bus read error as pci_channel_offline */
2004 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
2006 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
2007 lpfc_sli4_offline_eratt(phba);
2010 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2011 "7623 Checking UE recoverable");
2013 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
2014 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2015 &portsmphr_reg.word0))
2018 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
2020 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2021 LPFC_PORT_SEM_UE_RECOVERABLE)
2023 /*Sleep for 1Sec, before checking SEMAPHORE */
2027 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2028 "4827 smphr_port_status x%x : Waited %dSec",
2029 smphr_port_status, i);
2031 /* Recoverable UE, reset the HBA device */
2032 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2033 LPFC_PORT_SEM_UE_RECOVERABLE) {
2034 for (i = 0; i < 20; i++) {
2036 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2037 &portsmphr_reg.word0) &&
2038 (LPFC_POST_STAGE_PORT_READY ==
2039 bf_get(lpfc_port_smphr_port_status,
2041 rc = lpfc_sli4_port_sta_fn_reset(phba,
2042 LPFC_MBX_NO_WAIT, en_rn_msg);
2045 lpfc_printf_log(phba, KERN_ERR,
2047 "4215 Failed to recover UE");
2052 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2053 "7624 Firmware not ready: Failing UE recovery,"
2054 " waited %dSec", i);
2055 phba->link_state = LPFC_HBA_ERROR;
2058 case LPFC_SLI_INTF_IF_TYPE_2:
2059 case LPFC_SLI_INTF_IF_TYPE_6:
2060 pci_rd_rc1 = lpfc_readl(
2061 phba->sli4_hba.u.if_type2.STATUSregaddr,
2062 &portstat_reg.word0);
2063 /* consider PCI bus read error as pci_channel_offline */
2064 if (pci_rd_rc1 == -EIO) {
2065 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2066 "3151 PCI bus read access failure: x%x\n",
2067 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2068 lpfc_sli4_offline_eratt(phba);
2071 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2072 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2073 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2074 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2075 "2889 Port Overtemperature event, "
2076 "taking port offline Data: x%x x%x\n",
2077 reg_err1, reg_err2);
2079 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2080 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2081 temp_event_data.event_code = LPFC_CRIT_TEMP;
2082 temp_event_data.data = 0xFFFFFFFF;
2084 shost = lpfc_shost_from_vport(phba->pport);
2085 fc_host_post_vendor_event(shost, fc_get_event_number(),
2086 sizeof(temp_event_data),
2087 (char *)&temp_event_data,
2088 SCSI_NL_VID_TYPE_PCI
2089 | PCI_VENDOR_ID_EMULEX);
2091 spin_lock_irq(&phba->hbalock);
2092 phba->over_temp_state = HBA_OVER_TEMP;
2093 spin_unlock_irq(&phba->hbalock);
2094 lpfc_sli4_offline_eratt(phba);
2097 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2098 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2099 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2100 "3143 Port Down: Firmware Update "
2103 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2104 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2105 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2106 "3144 Port Down: Debug Dump\n");
2107 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2108 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2109 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2110 "3145 Port Down: Provisioning\n");
2112 /* If resets are disabled then leave the HBA alone and return */
2113 if (!phba->cfg_enable_hba_reset)
2116 /* Check port status register for function reset */
2117 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2120 /* don't report event on forced debug dump */
2121 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2122 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2127 /* fall through for not able to recover */
2128 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2129 "3152 Unrecoverable error\n");
2130 phba->link_state = LPFC_HBA_ERROR;
2132 case LPFC_SLI_INTF_IF_TYPE_1:
2136 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2137 "3123 Report dump event to upper layer\n");
2138 /* Send an internal error event to mgmt application */
2139 lpfc_board_errevt_to_mgmt(phba);
2141 event_data = FC_REG_DUMP_EVENT;
2142 shost = lpfc_shost_from_vport(vport);
2143 fc_host_post_vendor_event(shost, fc_get_event_number(),
2144 sizeof(event_data), (char *) &event_data,
2145 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2149 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2150 * @phba: pointer to lpfc HBA data structure.
2152 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2153 * routine from the API jump table function pointer from the lpfc_hba struct.
2157 * Any other value - error.
2160 lpfc_handle_eratt(struct lpfc_hba *phba)
2162 (*phba->lpfc_handle_eratt)(phba);
2166 * lpfc_handle_latt - The HBA link event handler
2167 * @phba: pointer to lpfc hba data structure.
2169 * This routine is invoked from the worker thread to handle a HBA host
2170 * attention link event. SLI3 only.
2173 lpfc_handle_latt(struct lpfc_hba *phba)
2175 struct lpfc_vport *vport = phba->pport;
2176 struct lpfc_sli *psli = &phba->sli;
2178 volatile uint32_t control;
2179 struct lpfc_dmabuf *mp;
2182 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2185 goto lpfc_handle_latt_err_exit;
2188 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2191 goto lpfc_handle_latt_free_pmb;
2194 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2197 goto lpfc_handle_latt_free_mp;
2200 /* Cleanup any outstanding ELS commands */
2201 lpfc_els_flush_all_cmd(phba);
2203 psli->slistat.link_event++;
2204 lpfc_read_topology(phba, pmb, mp);
2205 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2207 /* Block ELS IOCBs until we have processed this mbox command */
2208 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2209 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2210 if (rc == MBX_NOT_FINISHED) {
2212 goto lpfc_handle_latt_free_mbuf;
2215 /* Clear Link Attention in HA REG */
2216 spin_lock_irq(&phba->hbalock);
2217 writel(HA_LATT, phba->HAregaddr);
2218 readl(phba->HAregaddr); /* flush */
2219 spin_unlock_irq(&phba->hbalock);
2223 lpfc_handle_latt_free_mbuf:
2224 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2225 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2226 lpfc_handle_latt_free_mp:
2228 lpfc_handle_latt_free_pmb:
2229 mempool_free(pmb, phba->mbox_mem_pool);
2230 lpfc_handle_latt_err_exit:
2231 /* Enable Link attention interrupts */
2232 spin_lock_irq(&phba->hbalock);
2233 psli->sli_flag |= LPFC_PROCESS_LA;
2234 control = readl(phba->HCregaddr);
2235 control |= HC_LAINT_ENA;
2236 writel(control, phba->HCregaddr);
2237 readl(phba->HCregaddr); /* flush */
2239 /* Clear Link Attention in HA REG */
2240 writel(HA_LATT, phba->HAregaddr);
2241 readl(phba->HAregaddr); /* flush */
2242 spin_unlock_irq(&phba->hbalock);
2243 lpfc_linkdown(phba);
2244 phba->link_state = LPFC_HBA_ERROR;
2246 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2247 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2253 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2254 * @phba: pointer to lpfc hba data structure.
2255 * @vpd: pointer to the vital product data.
2256 * @len: length of the vital product data in bytes.
2258 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2259 * an array of characters. In this routine, the ModelName, ProgramType, and
2260 * ModelDesc, etc. fields of the phba data structure will be populated.
2263 * 0 - pointer to the VPD passed in is NULL
2267 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2269 uint8_t lenlo, lenhi;
2279 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2280 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2281 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2283 while (!finished && (index < (len - 4))) {
2284 switch (vpd[index]) {
2292 i = ((((unsigned short)lenhi) << 8) + lenlo);
2301 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2302 if (Length > len - index)
2303 Length = len - index;
2304 while (Length > 0) {
2305 /* Look for Serial Number */
2306 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2313 phba->SerialNumber[j++] = vpd[index++];
2317 phba->SerialNumber[j] = 0;
2320 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2321 phba->vpd_flag |= VPD_MODEL_DESC;
2328 phba->ModelDesc[j++] = vpd[index++];
2332 phba->ModelDesc[j] = 0;
2335 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2336 phba->vpd_flag |= VPD_MODEL_NAME;
2343 phba->ModelName[j++] = vpd[index++];
2347 phba->ModelName[j] = 0;
2350 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2351 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2358 phba->ProgramType[j++] = vpd[index++];
2362 phba->ProgramType[j] = 0;
2365 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2366 phba->vpd_flag |= VPD_PORT;
2373 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2374 (phba->sli4_hba.pport_name_sta ==
2375 LPFC_SLI4_PPNAME_GET)) {
2379 phba->Port[j++] = vpd[index++];
2383 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2384 (phba->sli4_hba.pport_name_sta ==
2385 LPFC_SLI4_PPNAME_NON))
2412 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2413 * @phba: pointer to lpfc hba data structure.
2414 * @mdp: pointer to the data structure to hold the derived model name.
2415 * @descp: pointer to the data structure to hold the derived description.
2417 * This routine retrieves HBA's description based on its registered PCI device
2418 * ID. The @descp passed into this function points to an array of 256 chars. It
2419 * shall be returned with the model name, maximum speed, and the host bus type.
2420 * The @mdp passed into this function points to an array of 80 chars. When the
2421 * function returns, the @mdp will be filled with the model name.
2424 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2427 uint16_t dev_id = phba->pcidev->device;
2430 int oneConnect = 0; /* default is not a oneConnect */
2435 } m = {"<Unknown>", "", ""};
2437 if (mdp && mdp[0] != '\0'
2438 && descp && descp[0] != '\0')
2441 if (phba->lmt & LMT_64Gb)
2443 else if (phba->lmt & LMT_32Gb)
2445 else if (phba->lmt & LMT_16Gb)
2447 else if (phba->lmt & LMT_10Gb)
2449 else if (phba->lmt & LMT_8Gb)
2451 else if (phba->lmt & LMT_4Gb)
2453 else if (phba->lmt & LMT_2Gb)
2455 else if (phba->lmt & LMT_1Gb)
2463 case PCI_DEVICE_ID_FIREFLY:
2464 m = (typeof(m)){"LP6000", "PCI",
2465 "Obsolete, Unsupported Fibre Channel Adapter"};
2467 case PCI_DEVICE_ID_SUPERFLY:
2468 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2469 m = (typeof(m)){"LP7000", "PCI", ""};
2471 m = (typeof(m)){"LP7000E", "PCI", ""};
2472 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2474 case PCI_DEVICE_ID_DRAGONFLY:
2475 m = (typeof(m)){"LP8000", "PCI",
2476 "Obsolete, Unsupported Fibre Channel Adapter"};
2478 case PCI_DEVICE_ID_CENTAUR:
2479 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2480 m = (typeof(m)){"LP9002", "PCI", ""};
2482 m = (typeof(m)){"LP9000", "PCI", ""};
2483 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2485 case PCI_DEVICE_ID_RFLY:
2486 m = (typeof(m)){"LP952", "PCI",
2487 "Obsolete, Unsupported Fibre Channel Adapter"};
2489 case PCI_DEVICE_ID_PEGASUS:
2490 m = (typeof(m)){"LP9802", "PCI-X",
2491 "Obsolete, Unsupported Fibre Channel Adapter"};
2493 case PCI_DEVICE_ID_THOR:
2494 m = (typeof(m)){"LP10000", "PCI-X",
2495 "Obsolete, Unsupported Fibre Channel Adapter"};
2497 case PCI_DEVICE_ID_VIPER:
2498 m = (typeof(m)){"LPX1000", "PCI-X",
2499 "Obsolete, Unsupported Fibre Channel Adapter"};
2501 case PCI_DEVICE_ID_PFLY:
2502 m = (typeof(m)){"LP982", "PCI-X",
2503 "Obsolete, Unsupported Fibre Channel Adapter"};
2505 case PCI_DEVICE_ID_TFLY:
2506 m = (typeof(m)){"LP1050", "PCI-X",
2507 "Obsolete, Unsupported Fibre Channel Adapter"};
2509 case PCI_DEVICE_ID_HELIOS:
2510 m = (typeof(m)){"LP11000", "PCI-X2",
2511 "Obsolete, Unsupported Fibre Channel Adapter"};
2513 case PCI_DEVICE_ID_HELIOS_SCSP:
2514 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2515 "Obsolete, Unsupported Fibre Channel Adapter"};
2517 case PCI_DEVICE_ID_HELIOS_DCSP:
2518 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2519 "Obsolete, Unsupported Fibre Channel Adapter"};
2521 case PCI_DEVICE_ID_NEPTUNE:
2522 m = (typeof(m)){"LPe1000", "PCIe",
2523 "Obsolete, Unsupported Fibre Channel Adapter"};
2525 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2526 m = (typeof(m)){"LPe1000-SP", "PCIe",
2527 "Obsolete, Unsupported Fibre Channel Adapter"};
2529 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2530 m = (typeof(m)){"LPe1002-SP", "PCIe",
2531 "Obsolete, Unsupported Fibre Channel Adapter"};
2533 case PCI_DEVICE_ID_BMID:
2534 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2536 case PCI_DEVICE_ID_BSMB:
2537 m = (typeof(m)){"LP111", "PCI-X2",
2538 "Obsolete, Unsupported Fibre Channel Adapter"};
2540 case PCI_DEVICE_ID_ZEPHYR:
2541 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2543 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2544 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2546 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2547 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2550 case PCI_DEVICE_ID_ZMID:
2551 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2553 case PCI_DEVICE_ID_ZSMB:
2554 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2556 case PCI_DEVICE_ID_LP101:
2557 m = (typeof(m)){"LP101", "PCI-X",
2558 "Obsolete, Unsupported Fibre Channel Adapter"};
2560 case PCI_DEVICE_ID_LP10000S:
2561 m = (typeof(m)){"LP10000-S", "PCI",
2562 "Obsolete, Unsupported Fibre Channel Adapter"};
2564 case PCI_DEVICE_ID_LP11000S:
2565 m = (typeof(m)){"LP11000-S", "PCI-X2",
2566 "Obsolete, Unsupported Fibre Channel Adapter"};
2568 case PCI_DEVICE_ID_LPE11000S:
2569 m = (typeof(m)){"LPe11000-S", "PCIe",
2570 "Obsolete, Unsupported Fibre Channel Adapter"};
2572 case PCI_DEVICE_ID_SAT:
2573 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2575 case PCI_DEVICE_ID_SAT_MID:
2576 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2578 case PCI_DEVICE_ID_SAT_SMB:
2579 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2581 case PCI_DEVICE_ID_SAT_DCSP:
2582 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2584 case PCI_DEVICE_ID_SAT_SCSP:
2585 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2587 case PCI_DEVICE_ID_SAT_S:
2588 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2590 case PCI_DEVICE_ID_HORNET:
2591 m = (typeof(m)){"LP21000", "PCIe",
2592 "Obsolete, Unsupported FCoE Adapter"};
2595 case PCI_DEVICE_ID_PROTEUS_VF:
2596 m = (typeof(m)){"LPev12000", "PCIe IOV",
2597 "Obsolete, Unsupported Fibre Channel Adapter"};
2599 case PCI_DEVICE_ID_PROTEUS_PF:
2600 m = (typeof(m)){"LPev12000", "PCIe IOV",
2601 "Obsolete, Unsupported Fibre Channel Adapter"};
2603 case PCI_DEVICE_ID_PROTEUS_S:
2604 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2605 "Obsolete, Unsupported Fibre Channel Adapter"};
2607 case PCI_DEVICE_ID_TIGERSHARK:
2609 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2611 case PCI_DEVICE_ID_TOMCAT:
2613 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2615 case PCI_DEVICE_ID_FALCON:
2616 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2617 "EmulexSecure Fibre"};
2619 case PCI_DEVICE_ID_BALIUS:
2620 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2621 "Obsolete, Unsupported Fibre Channel Adapter"};
2623 case PCI_DEVICE_ID_LANCER_FC:
2624 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2626 case PCI_DEVICE_ID_LANCER_FC_VF:
2627 m = (typeof(m)){"LPe16000", "PCIe",
2628 "Obsolete, Unsupported Fibre Channel Adapter"};
2630 case PCI_DEVICE_ID_LANCER_FCOE:
2632 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2634 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2636 m = (typeof(m)){"OCe15100", "PCIe",
2637 "Obsolete, Unsupported FCoE"};
2639 case PCI_DEVICE_ID_LANCER_G6_FC:
2640 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2642 case PCI_DEVICE_ID_LANCER_G7_FC:
2643 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2645 case PCI_DEVICE_ID_LANCER_G7P_FC:
2646 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2648 case PCI_DEVICE_ID_SKYHAWK:
2649 case PCI_DEVICE_ID_SKYHAWK_VF:
2651 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2654 m = (typeof(m)){"Unknown", "", ""};
2658 if (mdp && mdp[0] == '\0')
2659 snprintf(mdp, 79,"%s", m.name);
2661 * oneConnect hba requires special processing, they are all initiators
2662 * and we put the port number on the end
2664 if (descp && descp[0] == '\0') {
2666 snprintf(descp, 255,
2667 "Emulex OneConnect %s, %s Initiator %s",
2670 else if (max_speed == 0)
2671 snprintf(descp, 255,
2673 m.name, m.bus, m.function);
2675 snprintf(descp, 255,
2676 "Emulex %s %d%s %s %s",
2677 m.name, max_speed, (GE) ? "GE" : "Gb",
2683 * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2684 * @phba: pointer to lpfc hba data structure.
2685 * @pring: pointer to a IOCB ring.
2686 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2688 * This routine posts a given number of IOCBs with the associated DMA buffer
2689 * descriptors specified by the cnt argument to the given IOCB ring.
2692 * The number of IOCBs NOT able to be posted to the IOCB ring.
2695 lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2698 struct lpfc_iocbq *iocb;
2699 struct lpfc_dmabuf *mp1, *mp2;
2701 cnt += pring->missbufcnt;
2703 /* While there are buffers to post */
2705 /* Allocate buffer for command iocb */
2706 iocb = lpfc_sli_get_iocbq(phba);
2708 pring->missbufcnt = cnt;
2713 /* 2 buffers can be posted per command */
2714 /* Allocate buffer to post */
2715 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2717 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2718 if (!mp1 || !mp1->virt) {
2720 lpfc_sli_release_iocbq(phba, iocb);
2721 pring->missbufcnt = cnt;
2725 INIT_LIST_HEAD(&mp1->list);
2726 /* Allocate buffer to post */
2728 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2730 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2732 if (!mp2 || !mp2->virt) {
2734 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2736 lpfc_sli_release_iocbq(phba, iocb);
2737 pring->missbufcnt = cnt;
2741 INIT_LIST_HEAD(&mp2->list);
2746 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2747 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2748 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2749 icmd->ulpBdeCount = 1;
2752 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2753 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2754 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2756 icmd->ulpBdeCount = 2;
2759 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2762 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2764 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2768 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2772 lpfc_sli_release_iocbq(phba, iocb);
2773 pring->missbufcnt = cnt;
2776 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2778 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2780 pring->missbufcnt = 0;
2785 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2786 * @phba: pointer to lpfc hba data structure.
2788 * This routine posts initial receive IOCB buffers to the ELS ring. The
2789 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2790 * set to 64 IOCBs. SLI3 only.
2793 * 0 - success (currently always success)
2796 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2798 struct lpfc_sli *psli = &phba->sli;
2800 /* Ring 0, ELS / CT buffers */
2801 lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2802 /* Ring 2 - FCP no buffers needed */
2807 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2810 * lpfc_sha_init - Set up initial array of hash table entries
2811 * @HashResultPointer: pointer to an array as hash table.
2813 * This routine sets up the initial values to the array of hash table entries
2817 lpfc_sha_init(uint32_t * HashResultPointer)
2819 HashResultPointer[0] = 0x67452301;
2820 HashResultPointer[1] = 0xEFCDAB89;
2821 HashResultPointer[2] = 0x98BADCFE;
2822 HashResultPointer[3] = 0x10325476;
2823 HashResultPointer[4] = 0xC3D2E1F0;
2827 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2828 * @HashResultPointer: pointer to an initial/result hash table.
2829 * @HashWorkingPointer: pointer to an working hash table.
2831 * This routine iterates an initial hash table pointed by @HashResultPointer
2832 * with the values from the working hash table pointeed by @HashWorkingPointer.
2833 * The results are putting back to the initial hash table, returned through
2834 * the @HashResultPointer as the result hash table.
2837 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2841 uint32_t A, B, C, D, E;
2844 HashWorkingPointer[t] =
2846 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2848 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2849 } while (++t <= 79);
2851 A = HashResultPointer[0];
2852 B = HashResultPointer[1];
2853 C = HashResultPointer[2];
2854 D = HashResultPointer[3];
2855 E = HashResultPointer[4];
2859 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2860 } else if (t < 40) {
2861 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2862 } else if (t < 60) {
2863 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2865 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2867 TEMP += S(5, A) + E + HashWorkingPointer[t];
2873 } while (++t <= 79);
2875 HashResultPointer[0] += A;
2876 HashResultPointer[1] += B;
2877 HashResultPointer[2] += C;
2878 HashResultPointer[3] += D;
2879 HashResultPointer[4] += E;
2884 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2885 * @RandomChallenge: pointer to the entry of host challenge random number array.
2886 * @HashWorking: pointer to the entry of the working hash array.
2888 * This routine calculates the working hash array referred by @HashWorking
2889 * from the challenge random numbers associated with the host, referred by
2890 * @RandomChallenge. The result is put into the entry of the working hash
2891 * array and returned by reference through @HashWorking.
2894 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2896 *HashWorking = (*RandomChallenge ^ *HashWorking);
2900 * lpfc_hba_init - Perform special handling for LC HBA initialization
2901 * @phba: pointer to lpfc hba data structure.
2902 * @hbainit: pointer to an array of unsigned 32-bit integers.
2904 * This routine performs the special handling for LC HBA initialization.
2907 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2910 uint32_t *HashWorking;
2911 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2913 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2917 HashWorking[0] = HashWorking[78] = *pwwnn++;
2918 HashWorking[1] = HashWorking[79] = *pwwnn;
2920 for (t = 0; t < 7; t++)
2921 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2923 lpfc_sha_init(hbainit);
2924 lpfc_sha_iterate(hbainit, HashWorking);
2929 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2930 * @vport: pointer to a virtual N_Port data structure.
2932 * This routine performs the necessary cleanups before deleting the @vport.
2933 * It invokes the discovery state machine to perform necessary state
2934 * transitions and to release the ndlps associated with the @vport. Note,
2935 * the physical port is treated as @vport 0.
2938 lpfc_cleanup(struct lpfc_vport *vport)
2940 struct lpfc_hba *phba = vport->phba;
2941 struct lpfc_nodelist *ndlp, *next_ndlp;
2944 if (phba->link_state > LPFC_LINK_DOWN)
2945 lpfc_port_link_failure(vport);
2947 /* Clean up VMID resources */
2948 if (lpfc_is_vmid_enabled(phba))
2949 lpfc_vmid_vport_cleanup(vport);
2951 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2952 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2953 ndlp->nlp_DID == Fabric_DID) {
2954 /* Just free up ndlp with Fabric_DID for vports */
2959 if (ndlp->nlp_DID == Fabric_Cntl_DID &&
2960 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2965 /* Fabric Ports not in UNMAPPED state are cleaned up in the
2968 if (ndlp->nlp_type & NLP_FABRIC &&
2969 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
2970 lpfc_disc_state_machine(vport, ndlp, NULL,
2971 NLP_EVT_DEVICE_RECOVERY);
2973 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
2974 lpfc_disc_state_machine(vport, ndlp, NULL,
2978 /* This is a special case flush to return all
2979 * IOs before entering this loop. There are
2980 * two points in the code where a flush is
2981 * avoided if the FC_UNLOADING flag is set.
2982 * one is in the multipool destroy,
2983 * (this prevents a crash) and the other is
2984 * in the nvme abort handler, ( also prevents
2985 * a crash). Both of these exceptions are
2986 * cases where the slot is still accessible.
2987 * The flush here is only when the pci slot
2990 if (vport->load_flag & FC_UNLOADING &&
2991 pci_channel_offline(phba->pcidev))
2992 lpfc_sli_flush_io_rings(vport->phba);
2994 /* At this point, ALL ndlp's should be gone
2995 * because of the previous NLP_EVT_DEVICE_RM.
2996 * Lets wait for this to happen, if needed.
2998 while (!list_empty(&vport->fc_nodes)) {
3000 lpfc_printf_vlog(vport, KERN_ERR,
3002 "0233 Nodelist not empty\n");
3003 list_for_each_entry_safe(ndlp, next_ndlp,
3004 &vport->fc_nodes, nlp_listp) {
3005 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
3007 "0282 did:x%x ndlp:x%px "
3008 "refcnt:%d xflags x%x nflag x%x\n",
3009 ndlp->nlp_DID, (void *)ndlp,
3010 kref_read(&ndlp->kref),
3011 ndlp->fc4_xpt_flags,
3017 /* Wait for any activity on ndlps to settle */
3020 lpfc_cleanup_vports_rrqs(vport, NULL);
3024 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
3025 * @vport: pointer to a virtual N_Port data structure.
3027 * This routine stops all the timers associated with a @vport. This function
3028 * is invoked before disabling or deleting a @vport. Note that the physical
3029 * port is treated as @vport 0.
3032 lpfc_stop_vport_timers(struct lpfc_vport *vport)
3034 del_timer_sync(&vport->els_tmofunc);
3035 del_timer_sync(&vport->delayed_disc_tmo);
3036 lpfc_can_disctmo(vport);
3041 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3042 * @phba: pointer to lpfc hba data structure.
3044 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
3045 * caller of this routine should already hold the host lock.
3048 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3050 /* Clear pending FCF rediscovery wait flag */
3051 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3053 /* Now, try to stop the timer */
3054 del_timer(&phba->fcf.redisc_wait);
3058 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3059 * @phba: pointer to lpfc hba data structure.
3061 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
3062 * checks whether the FCF rediscovery wait timer is pending with the host
3063 * lock held before proceeding with disabling the timer and clearing the
3064 * wait timer pendig flag.
3067 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3069 spin_lock_irq(&phba->hbalock);
3070 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3071 /* FCF rediscovery timer already fired or stopped */
3072 spin_unlock_irq(&phba->hbalock);
3075 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3076 /* Clear failover in progress flags */
3077 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3078 spin_unlock_irq(&phba->hbalock);
3082 * lpfc_cmf_stop - Stop CMF processing
3083 * @phba: pointer to lpfc hba data structure.
3085 * This is called when the link goes down or if CMF mode is turned OFF.
3086 * It is also called when going offline or unloaded just before the
3087 * congestion info buffer is unregistered.
3090 lpfc_cmf_stop(struct lpfc_hba *phba)
3093 struct lpfc_cgn_stat *cgs;
3095 /* We only do something if CMF is enabled */
3096 if (!phba->sli4_hba.pc_sli4_params.cmf)
3099 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3100 "6221 Stop CMF / Cancel Timer\n");
3102 /* Cancel the CMF timer */
3103 hrtimer_cancel(&phba->cmf_timer);
3105 /* Zero CMF counters */
3106 atomic_set(&phba->cmf_busy, 0);
3107 for_each_present_cpu(cpu) {
3108 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3109 atomic64_set(&cgs->total_bytes, 0);
3110 atomic64_set(&cgs->rcv_bytes, 0);
3111 atomic_set(&cgs->rx_io_cnt, 0);
3112 atomic64_set(&cgs->rx_latency, 0);
3114 atomic_set(&phba->cmf_bw_wait, 0);
3116 /* Resume any blocked IO - Queue unblock on workqueue */
3117 queue_work(phba->wq, &phba->unblock_request_work);
3120 static inline uint64_t
3121 lpfc_get_max_line_rate(struct lpfc_hba *phba)
3123 uint64_t rate = lpfc_sli_port_speed_get(phba);
3125 return ((((unsigned long)rate) * 1024 * 1024) / 10);
3129 lpfc_cmf_signal_init(struct lpfc_hba *phba)
3131 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3132 "6223 Signal CMF init\n");
3134 /* Use the new fc_linkspeed to recalculate */
3135 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3136 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3137 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3138 phba->cmf_interval_rate, 1000);
3139 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3141 /* This is a signal to firmware to sync up CMF BW with link speed */
3142 lpfc_issue_cmf_sync_wqe(phba, 0, 0);
3146 * lpfc_cmf_start - Start CMF processing
3147 * @phba: pointer to lpfc hba data structure.
3149 * This is called when the link comes up or if CMF mode is turned OFF
3150 * to Monitor or Managed.
3153 lpfc_cmf_start(struct lpfc_hba *phba)
3155 struct lpfc_cgn_stat *cgs;
3158 /* We only do something if CMF is enabled */
3159 if (!phba->sli4_hba.pc_sli4_params.cmf ||
3160 phba->cmf_active_mode == LPFC_CFG_OFF)
3163 /* Reinitialize congestion buffer info */
3164 lpfc_init_congestion_buf(phba);
3166 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3167 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3168 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3169 atomic_set(&phba->cgn_sync_warn_cnt, 0);
3171 atomic_set(&phba->cmf_busy, 0);
3172 for_each_present_cpu(cpu) {
3173 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3174 atomic64_set(&cgs->total_bytes, 0);
3175 atomic64_set(&cgs->rcv_bytes, 0);
3176 atomic_set(&cgs->rx_io_cnt, 0);
3177 atomic64_set(&cgs->rx_latency, 0);
3179 phba->cmf_latency.tv_sec = 0;
3180 phba->cmf_latency.tv_nsec = 0;
3182 lpfc_cmf_signal_init(phba);
3184 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3185 "6222 Start CMF / Timer\n");
3187 phba->cmf_timer_cnt = 0;
3188 hrtimer_start(&phba->cmf_timer,
3189 ktime_set(0, LPFC_CMF_INTERVAL * 1000000),
3191 /* Setup for latency check in IO cmpl routines */
3192 ktime_get_real_ts64(&phba->cmf_latency);
3194 atomic_set(&phba->cmf_bw_wait, 0);
3195 atomic_set(&phba->cmf_stop_io, 0);
3199 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
3200 * @phba: pointer to lpfc hba data structure.
3202 * This routine stops all the timers associated with a HBA. This function is
3203 * invoked before either putting a HBA offline or unloading the driver.
3206 lpfc_stop_hba_timers(struct lpfc_hba *phba)
3209 lpfc_stop_vport_timers(phba->pport);
3210 cancel_delayed_work_sync(&phba->eq_delay_work);
3211 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3212 del_timer_sync(&phba->sli.mbox_tmo);
3213 del_timer_sync(&phba->fabric_block_timer);
3214 del_timer_sync(&phba->eratt_poll);
3215 del_timer_sync(&phba->hb_tmofunc);
3216 if (phba->sli_rev == LPFC_SLI_REV4) {
3217 del_timer_sync(&phba->rrq_tmr);
3218 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3220 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3222 switch (phba->pci_dev_grp) {
3223 case LPFC_PCI_DEV_LP:
3224 /* Stop any LightPulse device specific driver timers */
3225 del_timer_sync(&phba->fcp_poll_timer);
3227 case LPFC_PCI_DEV_OC:
3228 /* Stop any OneConnect device specific driver timers */
3229 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3232 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3233 "0297 Invalid device group (x%x)\n",
3241 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3242 * @phba: pointer to lpfc hba data structure.
3243 * @mbx_action: flag for mailbox no wait action.
3245 * This routine marks a HBA's management interface as blocked. Once the HBA's
3246 * management interface is marked as blocked, all the user space access to
3247 * the HBA, whether they are from sysfs interface or libdfc interface will
3248 * all be blocked. The HBA is set to block the management interface when the
3249 * driver prepares the HBA interface for online or offline.
3252 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3254 unsigned long iflag;
3255 uint8_t actcmd = MBX_HEARTBEAT;
3256 unsigned long timeout;
3258 spin_lock_irqsave(&phba->hbalock, iflag);
3259 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3260 spin_unlock_irqrestore(&phba->hbalock, iflag);
3261 if (mbx_action == LPFC_MBX_NO_WAIT)
3263 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3264 spin_lock_irqsave(&phba->hbalock, iflag);
3265 if (phba->sli.mbox_active) {
3266 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3267 /* Determine how long we might wait for the active mailbox
3268 * command to be gracefully completed by firmware.
3270 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3271 phba->sli.mbox_active) * 1000) + jiffies;
3273 spin_unlock_irqrestore(&phba->hbalock, iflag);
3275 /* Wait for the outstnading mailbox command to complete */
3276 while (phba->sli.mbox_active) {
3277 /* Check active mailbox complete status every 2ms */
3279 if (time_after(jiffies, timeout)) {
3280 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3281 "2813 Mgmt IO is Blocked %x "
3282 "- mbox cmd %x still active\n",
3283 phba->sli.sli_flag, actcmd);
3290 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3291 * @phba: pointer to lpfc hba data structure.
3293 * Allocate RPIs for all active remote nodes. This is needed whenever
3294 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3295 * is to fixup the temporary rpi assignments.
3298 lpfc_sli4_node_prep(struct lpfc_hba *phba)
3300 struct lpfc_nodelist *ndlp, *next_ndlp;
3301 struct lpfc_vport **vports;
3304 if (phba->sli_rev != LPFC_SLI_REV4)
3307 vports = lpfc_create_vport_work_array(phba);
3311 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3312 if (vports[i]->load_flag & FC_UNLOADING)
3315 list_for_each_entry_safe(ndlp, next_ndlp,
3316 &vports[i]->fc_nodes,
3318 rpi = lpfc_sli4_alloc_rpi(phba);
3319 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3320 /* TODO print log? */
3323 ndlp->nlp_rpi = rpi;
3324 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3325 LOG_NODE | LOG_DISCOVERY,
3326 "0009 Assign RPI x%x to ndlp x%px "
3327 "DID:x%06x flg:x%x\n",
3328 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3332 lpfc_destroy_vport_work_array(phba, vports);
3336 * lpfc_create_expedite_pool - create expedite pool
3337 * @phba: pointer to lpfc hba data structure.
3339 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3340 * to expedite pool. Mark them as expedite.
3342 static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3344 struct lpfc_sli4_hdw_queue *qp;
3345 struct lpfc_io_buf *lpfc_ncmd;
3346 struct lpfc_io_buf *lpfc_ncmd_next;
3347 struct lpfc_epd_pool *epd_pool;
3348 unsigned long iflag;
3350 epd_pool = &phba->epd_pool;
3351 qp = &phba->sli4_hba.hdwq[0];
3353 spin_lock_init(&epd_pool->lock);
3354 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3355 spin_lock(&epd_pool->lock);
3356 INIT_LIST_HEAD(&epd_pool->list);
3357 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3358 &qp->lpfc_io_buf_list_put, list) {
3359 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3360 lpfc_ncmd->expedite = true;
3363 if (epd_pool->count >= XRI_BATCH)
3366 spin_unlock(&epd_pool->lock);
3367 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3371 * lpfc_destroy_expedite_pool - destroy expedite pool
3372 * @phba: pointer to lpfc hba data structure.
3374 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3375 * of HWQ 0. Clear the mark.
3377 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3379 struct lpfc_sli4_hdw_queue *qp;
3380 struct lpfc_io_buf *lpfc_ncmd;
3381 struct lpfc_io_buf *lpfc_ncmd_next;
3382 struct lpfc_epd_pool *epd_pool;
3383 unsigned long iflag;
3385 epd_pool = &phba->epd_pool;
3386 qp = &phba->sli4_hba.hdwq[0];
3388 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3389 spin_lock(&epd_pool->lock);
3390 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3391 &epd_pool->list, list) {
3392 list_move_tail(&lpfc_ncmd->list,
3393 &qp->lpfc_io_buf_list_put);
3394 lpfc_ncmd->flags = false;
3398 spin_unlock(&epd_pool->lock);
3399 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3403 * lpfc_create_multixri_pools - create multi-XRI pools
3404 * @phba: pointer to lpfc hba data structure.
3406 * This routine initialize public, private per HWQ. Then, move XRIs from
3407 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3410 void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3415 struct lpfc_io_buf *lpfc_ncmd;
3416 struct lpfc_io_buf *lpfc_ncmd_next;
3417 unsigned long iflag;
3418 struct lpfc_sli4_hdw_queue *qp;
3419 struct lpfc_multixri_pool *multixri_pool;
3420 struct lpfc_pbl_pool *pbl_pool;
3421 struct lpfc_pvt_pool *pvt_pool;
3423 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3424 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3425 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3426 phba->sli4_hba.io_xri_cnt);
3428 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3429 lpfc_create_expedite_pool(phba);
3431 hwq_count = phba->cfg_hdw_queue;
3432 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3434 for (i = 0; i < hwq_count; i++) {
3435 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3437 if (!multixri_pool) {
3438 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3439 "1238 Failed to allocate memory for "
3442 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3443 lpfc_destroy_expedite_pool(phba);
3447 qp = &phba->sli4_hba.hdwq[j];
3448 kfree(qp->p_multixri_pool);
3451 phba->cfg_xri_rebalancing = 0;
3455 qp = &phba->sli4_hba.hdwq[i];
3456 qp->p_multixri_pool = multixri_pool;
3458 multixri_pool->xri_limit = count_per_hwq;
3459 multixri_pool->rrb_next_hwqid = i;
3461 /* Deal with public free xri pool */
3462 pbl_pool = &multixri_pool->pbl_pool;
3463 spin_lock_init(&pbl_pool->lock);
3464 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3465 spin_lock(&pbl_pool->lock);
3466 INIT_LIST_HEAD(&pbl_pool->list);
3467 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3468 &qp->lpfc_io_buf_list_put, list) {
3469 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3473 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3474 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3475 pbl_pool->count, i);
3476 spin_unlock(&pbl_pool->lock);
3477 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3479 /* Deal with private free xri pool */
3480 pvt_pool = &multixri_pool->pvt_pool;
3481 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3482 pvt_pool->low_watermark = XRI_BATCH;
3483 spin_lock_init(&pvt_pool->lock);
3484 spin_lock_irqsave(&pvt_pool->lock, iflag);
3485 INIT_LIST_HEAD(&pvt_pool->list);
3486 pvt_pool->count = 0;
3487 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3492 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3493 * @phba: pointer to lpfc hba data structure.
3495 * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3497 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3501 struct lpfc_io_buf *lpfc_ncmd;
3502 struct lpfc_io_buf *lpfc_ncmd_next;
3503 unsigned long iflag;
3504 struct lpfc_sli4_hdw_queue *qp;
3505 struct lpfc_multixri_pool *multixri_pool;
3506 struct lpfc_pbl_pool *pbl_pool;
3507 struct lpfc_pvt_pool *pvt_pool;
3509 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3510 lpfc_destroy_expedite_pool(phba);
3512 if (!(phba->pport->load_flag & FC_UNLOADING))
3513 lpfc_sli_flush_io_rings(phba);
3515 hwq_count = phba->cfg_hdw_queue;
3517 for (i = 0; i < hwq_count; i++) {
3518 qp = &phba->sli4_hba.hdwq[i];
3519 multixri_pool = qp->p_multixri_pool;
3523 qp->p_multixri_pool = NULL;
3525 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3527 /* Deal with public free xri pool */
3528 pbl_pool = &multixri_pool->pbl_pool;
3529 spin_lock(&pbl_pool->lock);
3531 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3532 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3533 pbl_pool->count, i);
3535 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3536 &pbl_pool->list, list) {
3537 list_move_tail(&lpfc_ncmd->list,
3538 &qp->lpfc_io_buf_list_put);
3543 INIT_LIST_HEAD(&pbl_pool->list);
3544 pbl_pool->count = 0;
3546 spin_unlock(&pbl_pool->lock);
3548 /* Deal with private free xri pool */
3549 pvt_pool = &multixri_pool->pvt_pool;
3550 spin_lock(&pvt_pool->lock);
3552 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3553 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3554 pvt_pool->count, i);
3556 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3557 &pvt_pool->list, list) {
3558 list_move_tail(&lpfc_ncmd->list,
3559 &qp->lpfc_io_buf_list_put);
3564 INIT_LIST_HEAD(&pvt_pool->list);
3565 pvt_pool->count = 0;
3567 spin_unlock(&pvt_pool->lock);
3568 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3570 kfree(multixri_pool);
3575 * lpfc_online - Initialize and bring a HBA online
3576 * @phba: pointer to lpfc hba data structure.
3578 * This routine initializes the HBA and brings a HBA online. During this
3579 * process, the management interface is blocked to prevent user space access
3580 * to the HBA interfering with the driver initialization.
3587 lpfc_online(struct lpfc_hba *phba)
3589 struct lpfc_vport *vport;
3590 struct lpfc_vport **vports;
3592 bool vpis_cleared = false;
3596 vport = phba->pport;
3598 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3601 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3602 "0458 Bring Adapter online\n");
3604 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3606 if (phba->sli_rev == LPFC_SLI_REV4) {
3607 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3608 lpfc_unblock_mgmt_io(phba);
3611 spin_lock_irq(&phba->hbalock);
3612 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3613 vpis_cleared = true;
3614 spin_unlock_irq(&phba->hbalock);
3616 /* Reestablish the local initiator port.
3617 * The offline process destroyed the previous lport.
3619 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3620 !phba->nvmet_support) {
3621 error = lpfc_nvme_create_localport(phba->pport);
3623 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3624 "6132 NVME restore reg failed "
3625 "on nvmei error x%x\n", error);
3628 lpfc_sli_queue_init(phba);
3629 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3630 lpfc_unblock_mgmt_io(phba);
3635 vports = lpfc_create_vport_work_array(phba);
3636 if (vports != NULL) {
3637 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3638 struct Scsi_Host *shost;
3639 shost = lpfc_shost_from_vport(vports[i]);
3640 spin_lock_irq(shost->host_lock);
3641 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3642 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3643 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3644 if (phba->sli_rev == LPFC_SLI_REV4) {
3645 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3646 if ((vpis_cleared) &&
3647 (vports[i]->port_type !=
3648 LPFC_PHYSICAL_PORT))
3651 spin_unlock_irq(shost->host_lock);
3654 lpfc_destroy_vport_work_array(phba, vports);
3656 if (phba->cfg_xri_rebalancing)
3657 lpfc_create_multixri_pools(phba);
3659 lpfc_cpuhp_add(phba);
3661 lpfc_unblock_mgmt_io(phba);
3666 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3667 * @phba: pointer to lpfc hba data structure.
3669 * This routine marks a HBA's management interface as not blocked. Once the
3670 * HBA's management interface is marked as not blocked, all the user space
3671 * access to the HBA, whether they are from sysfs interface or libdfc
3672 * interface will be allowed. The HBA is set to block the management interface
3673 * when the driver prepares the HBA interface for online or offline and then
3674 * set to unblock the management interface afterwards.
3677 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3679 unsigned long iflag;
3681 spin_lock_irqsave(&phba->hbalock, iflag);
3682 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3683 spin_unlock_irqrestore(&phba->hbalock, iflag);
3687 * lpfc_offline_prep - Prepare a HBA to be brought offline
3688 * @phba: pointer to lpfc hba data structure.
3689 * @mbx_action: flag for mailbox shutdown action.
3691 * This routine is invoked to prepare a HBA to be brought offline. It performs
3692 * unregistration login to all the nodes on all vports and flushes the mailbox
3693 * queue to make it ready to be brought offline.
3696 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3698 struct lpfc_vport *vport = phba->pport;
3699 struct lpfc_nodelist *ndlp, *next_ndlp;
3700 struct lpfc_vport **vports;
3701 struct Scsi_Host *shost;
3706 if (vport->fc_flag & FC_OFFLINE_MODE)
3709 lpfc_block_mgmt_io(phba, mbx_action);
3711 lpfc_linkdown(phba);
3713 offline = pci_channel_offline(phba->pcidev);
3714 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
3716 /* Issue an unreg_login to all nodes on all vports */
3717 vports = lpfc_create_vport_work_array(phba);
3718 if (vports != NULL) {
3719 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3720 if (vports[i]->load_flag & FC_UNLOADING)
3722 shost = lpfc_shost_from_vport(vports[i]);
3723 spin_lock_irq(shost->host_lock);
3724 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3725 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3726 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3727 spin_unlock_irq(shost->host_lock);
3729 shost = lpfc_shost_from_vport(vports[i]);
3730 list_for_each_entry_safe(ndlp, next_ndlp,
3731 &vports[i]->fc_nodes,
3734 spin_lock_irq(&ndlp->lock);
3735 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3736 spin_unlock_irq(&ndlp->lock);
3738 if (offline || hba_pci_err) {
3739 spin_lock_irq(&ndlp->lock);
3740 ndlp->nlp_flag &= ~(NLP_UNREG_INP |
3741 NLP_RPI_REGISTERED);
3742 spin_unlock_irq(&ndlp->lock);
3743 if (phba->sli_rev == LPFC_SLI_REV4)
3744 lpfc_sli_rpi_release(vports[i],
3747 lpfc_unreg_rpi(vports[i], ndlp);
3750 * Whenever an SLI4 port goes offline, free the
3751 * RPI. Get a new RPI when the adapter port
3752 * comes back online.
3754 if (phba->sli_rev == LPFC_SLI_REV4) {
3755 lpfc_printf_vlog(vports[i], KERN_INFO,
3756 LOG_NODE | LOG_DISCOVERY,
3757 "0011 Free RPI x%x on "
3758 "ndlp: x%px did x%x\n",
3759 ndlp->nlp_rpi, ndlp,
3761 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3762 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3765 if (ndlp->nlp_type & NLP_FABRIC) {
3766 lpfc_disc_state_machine(vports[i], ndlp,
3767 NULL, NLP_EVT_DEVICE_RECOVERY);
3769 /* Don't remove the node unless the node
3770 * has been unregistered with the
3771 * transport, and we're not in recovery
3772 * before dev_loss_tmo triggered.
3773 * Otherwise, let dev_loss take care of
3776 if (!(ndlp->save_flags &
3777 NLP_IN_RECOV_POST_DEV_LOSS) &&
3778 !(ndlp->fc4_xpt_flags &
3779 (NVME_XPT_REGD | SCSI_XPT_REGD)))
3780 lpfc_disc_state_machine
3788 lpfc_destroy_vport_work_array(phba, vports);
3790 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3793 flush_workqueue(phba->wq);
3797 * lpfc_offline - Bring a HBA offline
3798 * @phba: pointer to lpfc hba data structure.
3800 * This routine actually brings a HBA offline. It stops all the timers
3801 * associated with the HBA, brings down the SLI layer, and eventually
3802 * marks the HBA as in offline state for the upper layer protocol.
3805 lpfc_offline(struct lpfc_hba *phba)
3807 struct Scsi_Host *shost;
3808 struct lpfc_vport **vports;
3811 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3814 /* stop port and all timers associated with this hba */
3815 lpfc_stop_port(phba);
3817 /* Tear down the local and target port registrations. The
3818 * nvme transports need to cleanup.
3820 lpfc_nvmet_destroy_targetport(phba);
3821 lpfc_nvme_destroy_localport(phba->pport);
3823 vports = lpfc_create_vport_work_array(phba);
3825 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3826 lpfc_stop_vport_timers(vports[i]);
3827 lpfc_destroy_vport_work_array(phba, vports);
3828 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3829 "0460 Bring Adapter offline\n");
3830 /* Bring down the SLI Layer and cleanup. The HBA is offline
3832 lpfc_sli_hba_down(phba);
3833 spin_lock_irq(&phba->hbalock);
3835 spin_unlock_irq(&phba->hbalock);
3836 vports = lpfc_create_vport_work_array(phba);
3838 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3839 shost = lpfc_shost_from_vport(vports[i]);
3840 spin_lock_irq(shost->host_lock);
3841 vports[i]->work_port_events = 0;
3842 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3843 spin_unlock_irq(shost->host_lock);
3845 lpfc_destroy_vport_work_array(phba, vports);
3846 /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
3849 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3850 __lpfc_cpuhp_remove(phba);
3852 if (phba->cfg_xri_rebalancing)
3853 lpfc_destroy_multixri_pools(phba);
3857 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3858 * @phba: pointer to lpfc hba data structure.
3860 * This routine is to free all the SCSI buffers and IOCBs from the driver
3861 * list back to kernel. It is called from lpfc_pci_remove_one to free
3862 * the internal resources before the device is removed from the system.
3865 lpfc_scsi_free(struct lpfc_hba *phba)
3867 struct lpfc_io_buf *sb, *sb_next;
3869 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3872 spin_lock_irq(&phba->hbalock);
3874 /* Release all the lpfc_scsi_bufs maintained by this host. */
3876 spin_lock(&phba->scsi_buf_list_put_lock);
3877 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3879 list_del(&sb->list);
3880 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3883 phba->total_scsi_bufs--;
3885 spin_unlock(&phba->scsi_buf_list_put_lock);
3887 spin_lock(&phba->scsi_buf_list_get_lock);
3888 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3890 list_del(&sb->list);
3891 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3894 phba->total_scsi_bufs--;
3896 spin_unlock(&phba->scsi_buf_list_get_lock);
3897 spin_unlock_irq(&phba->hbalock);
3901 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
3902 * @phba: pointer to lpfc hba data structure.
3904 * This routine is to free all the IO buffers and IOCBs from the driver
3905 * list back to kernel. It is called from lpfc_pci_remove_one to free
3906 * the internal resources before the device is removed from the system.
3909 lpfc_io_free(struct lpfc_hba *phba)
3911 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3912 struct lpfc_sli4_hdw_queue *qp;
3915 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3916 qp = &phba->sli4_hba.hdwq[idx];
3917 /* Release all the lpfc_nvme_bufs maintained by this host. */
3918 spin_lock(&qp->io_buf_list_put_lock);
3919 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3920 &qp->lpfc_io_buf_list_put,
3922 list_del(&lpfc_ncmd->list);
3924 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3925 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3926 if (phba->cfg_xpsgl && !phba->nvmet_support)
3927 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3928 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3930 qp->total_io_bufs--;
3932 spin_unlock(&qp->io_buf_list_put_lock);
3934 spin_lock(&qp->io_buf_list_get_lock);
3935 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3936 &qp->lpfc_io_buf_list_get,
3938 list_del(&lpfc_ncmd->list);
3940 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3941 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3942 if (phba->cfg_xpsgl && !phba->nvmet_support)
3943 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3944 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3946 qp->total_io_bufs--;
3948 spin_unlock(&qp->io_buf_list_get_lock);
3953 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
3954 * @phba: pointer to lpfc hba data structure.
3956 * This routine first calculates the sizes of the current els and allocated
3957 * scsi sgl lists, and then goes through all sgls to updates the physical
3958 * XRIs assigned due to port function reset. During port initialization, the
3959 * current els and allocated scsi sgl lists are 0s.
3962 * 0 - successful (for now, it always returns 0)
3965 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3967 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3968 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3969 LIST_HEAD(els_sgl_list);
3973 * update on pci function's els xri-sgl list
3975 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3977 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3978 /* els xri-sgl expanded */
3979 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3980 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3981 "3157 ELS xri-sgl count increased from "
3982 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3984 /* allocate the additional els sgls */
3985 for (i = 0; i < xri_cnt; i++) {
3986 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3988 if (sglq_entry == NULL) {
3989 lpfc_printf_log(phba, KERN_ERR,
3991 "2562 Failure to allocate an "
3992 "ELS sgl entry:%d\n", i);
3996 sglq_entry->buff_type = GEN_BUFF_TYPE;
3997 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3999 if (sglq_entry->virt == NULL) {
4001 lpfc_printf_log(phba, KERN_ERR,
4003 "2563 Failure to allocate an "
4004 "ELS mbuf:%d\n", i);
4008 sglq_entry->sgl = sglq_entry->virt;
4009 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4010 sglq_entry->state = SGL_FREED;
4011 list_add_tail(&sglq_entry->list, &els_sgl_list);
4013 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4014 list_splice_init(&els_sgl_list,
4015 &phba->sli4_hba.lpfc_els_sgl_list);
4016 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4017 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
4018 /* els xri-sgl shrinked */
4019 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
4020 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4021 "3158 ELS xri-sgl count decreased from "
4022 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4024 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4025 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
4027 /* release extra els sgls from list */
4028 for (i = 0; i < xri_cnt; i++) {
4029 list_remove_head(&els_sgl_list,
4030 sglq_entry, struct lpfc_sglq, list);
4032 __lpfc_mbuf_free(phba, sglq_entry->virt,
4037 list_splice_init(&els_sgl_list,
4038 &phba->sli4_hba.lpfc_els_sgl_list);
4039 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4041 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4042 "3163 ELS xri-sgl count unchanged: %d\n",
4044 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
4046 /* update xris to els sgls on the list */
4048 sglq_entry_next = NULL;
4049 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4050 &phba->sli4_hba.lpfc_els_sgl_list, list) {
4051 lxri = lpfc_sli4_next_xritag(phba);
4052 if (lxri == NO_XRI) {
4053 lpfc_printf_log(phba, KERN_ERR,
4055 "2400 Failed to allocate xri for "
4060 sglq_entry->sli4_lxritag = lxri;
4061 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4066 lpfc_free_els_sgl_list(phba);
4071 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
4072 * @phba: pointer to lpfc hba data structure.
4074 * This routine first calculates the sizes of the current els and allocated
4075 * scsi sgl lists, and then goes through all sgls to updates the physical
4076 * XRIs assigned due to port function reset. During port initialization, the
4077 * current els and allocated scsi sgl lists are 0s.
4080 * 0 - successful (for now, it always returns 0)
4083 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
4085 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4086 uint16_t i, lxri, xri_cnt, els_xri_cnt;
4087 uint16_t nvmet_xri_cnt;
4088 LIST_HEAD(nvmet_sgl_list);
4092 * update on pci function's nvmet xri-sgl list
4094 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4096 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
4097 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4098 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4099 /* els xri-sgl expanded */
4100 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4101 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4102 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
4103 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4104 /* allocate the additional nvmet sgls */
4105 for (i = 0; i < xri_cnt; i++) {
4106 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4108 if (sglq_entry == NULL) {
4109 lpfc_printf_log(phba, KERN_ERR,
4111 "6303 Failure to allocate an "
4112 "NVMET sgl entry:%d\n", i);
4116 sglq_entry->buff_type = NVMET_BUFF_TYPE;
4117 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4119 if (sglq_entry->virt == NULL) {
4121 lpfc_printf_log(phba, KERN_ERR,
4123 "6304 Failure to allocate an "
4124 "NVMET buf:%d\n", i);
4128 sglq_entry->sgl = sglq_entry->virt;
4129 memset(sglq_entry->sgl, 0,
4130 phba->cfg_sg_dma_buf_size);
4131 sglq_entry->state = SGL_FREED;
4132 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
4134 spin_lock_irq(&phba->hbalock);
4135 spin_lock(&phba->sli4_hba.sgl_list_lock);
4136 list_splice_init(&nvmet_sgl_list,
4137 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4138 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4139 spin_unlock_irq(&phba->hbalock);
4140 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4141 /* nvmet xri-sgl shrunk */
4142 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4143 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4144 "6305 NVMET xri-sgl count decreased from "
4145 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4147 spin_lock_irq(&phba->hbalock);
4148 spin_lock(&phba->sli4_hba.sgl_list_lock);
4149 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4151 /* release extra nvmet sgls from list */
4152 for (i = 0; i < xri_cnt; i++) {
4153 list_remove_head(&nvmet_sgl_list,
4154 sglq_entry, struct lpfc_sglq, list);
4156 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4161 list_splice_init(&nvmet_sgl_list,
4162 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4163 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4164 spin_unlock_irq(&phba->hbalock);
4166 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4167 "6306 NVMET xri-sgl count unchanged: %d\n",
4169 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4171 /* update xris to nvmet sgls on the list */
4173 sglq_entry_next = NULL;
4174 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4175 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4176 lxri = lpfc_sli4_next_xritag(phba);
4177 if (lxri == NO_XRI) {
4178 lpfc_printf_log(phba, KERN_ERR,
4180 "6307 Failed to allocate xri for "
4185 sglq_entry->sli4_lxritag = lxri;
4186 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4191 lpfc_free_nvmet_sgl_list(phba);
4196 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4199 struct lpfc_sli4_hdw_queue *qp;
4200 struct lpfc_io_buf *lpfc_cmd;
4201 struct lpfc_io_buf *iobufp, *prev_iobufp;
4202 int idx, cnt, xri, inserted;
4205 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4206 qp = &phba->sli4_hba.hdwq[idx];
4207 spin_lock_irq(&qp->io_buf_list_get_lock);
4208 spin_lock(&qp->io_buf_list_put_lock);
4210 /* Take everything off the get and put lists */
4211 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4212 list_splice(&qp->lpfc_io_buf_list_put, &blist);
4213 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4214 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4215 cnt += qp->get_io_bufs + qp->put_io_bufs;
4216 qp->get_io_bufs = 0;
4217 qp->put_io_bufs = 0;
4218 qp->total_io_bufs = 0;
4219 spin_unlock(&qp->io_buf_list_put_lock);
4220 spin_unlock_irq(&qp->io_buf_list_get_lock);
4224 * Take IO buffers off blist and put on cbuf sorted by XRI.
4225 * This is because POST_SGL takes a sequential range of XRIs
4226 * to post to the firmware.
4228 for (idx = 0; idx < cnt; idx++) {
4229 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4233 list_add_tail(&lpfc_cmd->list, cbuf);
4236 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4239 list_for_each_entry(iobufp, cbuf, list) {
4240 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4242 list_add(&lpfc_cmd->list,
4243 &prev_iobufp->list);
4245 list_add(&lpfc_cmd->list, cbuf);
4249 prev_iobufp = iobufp;
4252 list_add_tail(&lpfc_cmd->list, cbuf);
4258 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4260 struct lpfc_sli4_hdw_queue *qp;
4261 struct lpfc_io_buf *lpfc_cmd;
4264 qp = phba->sli4_hba.hdwq;
4266 while (!list_empty(cbuf)) {
4267 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4268 list_remove_head(cbuf, lpfc_cmd,
4269 struct lpfc_io_buf, list);
4273 qp = &phba->sli4_hba.hdwq[idx];
4274 lpfc_cmd->hdwq_no = idx;
4275 lpfc_cmd->hdwq = qp;
4276 lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
4277 spin_lock(&qp->io_buf_list_put_lock);
4278 list_add_tail(&lpfc_cmd->list,
4279 &qp->lpfc_io_buf_list_put);
4281 qp->total_io_bufs++;
4282 spin_unlock(&qp->io_buf_list_put_lock);
4289 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4290 * @phba: pointer to lpfc hba data structure.
4292 * This routine first calculates the sizes of the current els and allocated
4293 * scsi sgl lists, and then goes through all sgls to updates the physical
4294 * XRIs assigned due to port function reset. During port initialization, the
4295 * current els and allocated scsi sgl lists are 0s.
4298 * 0 - successful (for now, it always returns 0)
4301 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4303 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4304 uint16_t i, lxri, els_xri_cnt;
4305 uint16_t io_xri_cnt, io_xri_max;
4306 LIST_HEAD(io_sgl_list);
4310 * update on pci function's allocated nvme xri-sgl list
4313 /* maximum number of xris available for nvme buffers */
4314 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4315 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4316 phba->sli4_hba.io_xri_max = io_xri_max;
4318 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4319 "6074 Current allocated XRI sgl count:%d, "
4320 "maximum XRI count:%d\n",
4321 phba->sli4_hba.io_xri_cnt,
4322 phba->sli4_hba.io_xri_max);
4324 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4326 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4327 /* max nvme xri shrunk below the allocated nvme buffers */
4328 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4329 phba->sli4_hba.io_xri_max;
4330 /* release the extra allocated nvme buffers */
4331 for (i = 0; i < io_xri_cnt; i++) {
4332 list_remove_head(&io_sgl_list, lpfc_ncmd,
4333 struct lpfc_io_buf, list);
4335 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4337 lpfc_ncmd->dma_handle);
4341 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4344 /* update xris associated to remaining allocated nvme buffers */
4346 lpfc_ncmd_next = NULL;
4347 phba->sli4_hba.io_xri_cnt = cnt;
4348 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4349 &io_sgl_list, list) {
4350 lxri = lpfc_sli4_next_xritag(phba);
4351 if (lxri == NO_XRI) {
4352 lpfc_printf_log(phba, KERN_ERR,
4354 "6075 Failed to allocate xri for "
4359 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4360 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4362 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4371 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4372 * @phba: Pointer to lpfc hba data structure.
4373 * @num_to_alloc: The requested number of buffers to allocate.
4375 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4376 * the nvme buffer contains all the necessary information needed to initiate
4377 * an I/O. After allocating up to @num_to_allocate IO buffers and put
4378 * them on a list, it post them to the port by using SGL block post.
4381 * int - number of IO buffers that were allocated and posted.
4382 * 0 = failure, less than num_to_alloc is a partial failure.
4385 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4387 struct lpfc_io_buf *lpfc_ncmd;
4388 struct lpfc_iocbq *pwqeq;
4389 uint16_t iotag, lxri = 0;
4390 int bcnt, num_posted;
4391 LIST_HEAD(prep_nblist);
4392 LIST_HEAD(post_nblist);
4393 LIST_HEAD(nvme_nblist);
4395 phba->sli4_hba.io_xri_cnt = 0;
4396 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4397 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4401 * Get memory from the pci pool to map the virt space to
4402 * pci bus space for an I/O. The DMA buffer includes the
4403 * number of SGE's necessary to support the sg_tablesize.
4405 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4407 &lpfc_ncmd->dma_handle);
4408 if (!lpfc_ncmd->data) {
4413 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4414 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4417 * 4K Page alignment is CRITICAL to BlockGuard, double
4420 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4421 (((unsigned long)(lpfc_ncmd->data) &
4422 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4423 lpfc_printf_log(phba, KERN_ERR,
4425 "3369 Memory alignment err: "
4427 (unsigned long)lpfc_ncmd->data);
4428 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4430 lpfc_ncmd->dma_handle);
4436 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4438 lxri = lpfc_sli4_next_xritag(phba);
4439 if (lxri == NO_XRI) {
4440 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4441 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4445 pwqeq = &lpfc_ncmd->cur_iocbq;
4447 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4448 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4450 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4451 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4453 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4454 "6121 Failed to allocate IOTAG for"
4455 " XRI:0x%x\n", lxri);
4456 lpfc_sli4_free_xri(phba, lxri);
4459 pwqeq->sli4_lxritag = lxri;
4460 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4461 pwqeq->context1 = lpfc_ncmd;
4463 /* Initialize local short-hand pointers. */
4464 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4465 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4466 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4467 spin_lock_init(&lpfc_ncmd->buf_lock);
4469 /* add the nvme buffer to a post list */
4470 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4471 phba->sli4_hba.io_xri_cnt++;
4473 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4474 "6114 Allocate %d out of %d requested new NVME "
4475 "buffers\n", bcnt, num_to_alloc);
4477 /* post the list of nvme buffer sgls to port if available */
4478 if (!list_empty(&post_nblist))
4479 num_posted = lpfc_sli4_post_io_sgl_list(
4480 phba, &post_nblist, bcnt);
4488 lpfc_get_wwpn(struct lpfc_hba *phba)
4492 LPFC_MBOXQ_t *mboxq;
4495 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4498 return (uint64_t)-1;
4500 /* First get WWN of HBA instance */
4501 lpfc_read_nv(phba, mboxq);
4502 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4503 if (rc != MBX_SUCCESS) {
4504 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4505 "6019 Mailbox failed , mbxCmd x%x "
4506 "READ_NV, mbxStatus x%x\n",
4507 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4508 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4509 mempool_free(mboxq, phba->mbox_mem_pool);
4510 return (uint64_t) -1;
4513 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4514 /* wwn is WWPN of HBA instance */
4515 mempool_free(mboxq, phba->mbox_mem_pool);
4516 if (phba->sli_rev == LPFC_SLI_REV4)
4517 return be64_to_cpu(wwn);
4519 return rol64(wwn, 32);
4523 * lpfc_vmid_res_alloc - Allocates resources for VMID
4524 * @phba: pointer to lpfc hba data structure.
4525 * @vport: pointer to vport data structure
4527 * This routine allocated the resources needed for the VMID.
4534 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4536 /* VMID feature is supported only on SLI4 */
4537 if (phba->sli_rev == LPFC_SLI_REV3) {
4538 phba->cfg_vmid_app_header = 0;
4539 phba->cfg_vmid_priority_tagging = 0;
4542 if (lpfc_is_vmid_enabled(phba)) {
4544 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4549 rwlock_init(&vport->vmid_lock);
4551 /* Set the VMID parameters for the vport */
4552 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4553 vport->vmid_inactivity_timeout =
4554 phba->cfg_vmid_inactivity_timeout;
4555 vport->max_vmid = phba->cfg_max_vmid;
4556 vport->cur_vmid_cnt = 0;
4558 vport->vmid_priority_range = bitmap_zalloc
4559 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4561 if (!vport->vmid_priority_range) {
4566 hash_init(vport->hash_table);
4572 * lpfc_create_port - Create an FC port
4573 * @phba: pointer to lpfc hba data structure.
4574 * @instance: a unique integer ID to this FC port.
4575 * @dev: pointer to the device data structure.
4577 * This routine creates a FC port for the upper layer protocol. The FC port
4578 * can be created on top of either a physical port or a virtual port provided
4579 * by the HBA. This routine also allocates a SCSI host data structure (shost)
4580 * and associates the FC port created before adding the shost into the SCSI
4584 * @vport - pointer to the virtual N_Port data structure.
4585 * NULL - port create failed.
4588 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4590 struct lpfc_vport *vport;
4591 struct Scsi_Host *shost = NULL;
4592 struct scsi_host_template *template;
4596 bool use_no_reset_hba = false;
4599 if (lpfc_no_hba_reset_cnt) {
4600 if (phba->sli_rev < LPFC_SLI_REV4 &&
4601 dev == &phba->pcidev->dev) {
4602 /* Reset the port first */
4603 lpfc_sli_brdrestart(phba);
4604 rc = lpfc_sli_chipset_init(phba);
4608 wwn = lpfc_get_wwpn(phba);
4611 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4612 if (wwn == lpfc_no_hba_reset[i]) {
4613 lpfc_printf_log(phba, KERN_ERR,
4615 "6020 Setting use_no_reset port=%llx\n",
4617 use_no_reset_hba = true;
4622 /* Seed template for SCSI host registration */
4623 if (dev == &phba->pcidev->dev) {
4624 template = &phba->port_template;
4626 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4627 /* Seed physical port template */
4628 memcpy(template, &lpfc_template, sizeof(*template));
4630 if (use_no_reset_hba)
4631 /* template is for a no reset SCSI Host */
4632 template->eh_host_reset_handler = NULL;
4634 /* Template for all vports this physical port creates */
4635 memcpy(&phba->vport_template, &lpfc_template,
4637 phba->vport_template.shost_groups = lpfc_vport_groups;
4638 phba->vport_template.eh_bus_reset_handler = NULL;
4639 phba->vport_template.eh_host_reset_handler = NULL;
4640 phba->vport_template.vendor_id = 0;
4642 /* Initialize the host templates with updated value */
4643 if (phba->sli_rev == LPFC_SLI_REV4) {
4644 template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4645 phba->vport_template.sg_tablesize =
4646 phba->cfg_scsi_seg_cnt;
4648 template->sg_tablesize = phba->cfg_sg_seg_cnt;
4649 phba->vport_template.sg_tablesize =
4650 phba->cfg_sg_seg_cnt;
4654 /* NVMET is for physical port only */
4655 memcpy(template, &lpfc_template_nvme,
4659 template = &phba->vport_template;
4662 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4666 vport = (struct lpfc_vport *) shost->hostdata;
4668 vport->load_flag |= FC_LOADING;
4669 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4670 vport->fc_rscn_flush = 0;
4671 lpfc_get_vport_cfgparam(vport);
4673 /* Adjust value in vport */
4674 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4676 shost->unique_id = instance;
4677 shost->max_id = LPFC_MAX_TARGET;
4678 shost->max_lun = vport->cfg_max_luns;
4679 shost->this_id = -1;
4680 shost->max_cmd_len = 16;
4682 if (phba->sli_rev == LPFC_SLI_REV4) {
4683 if (!phba->cfg_fcp_mq_threshold ||
4684 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4685 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4687 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4688 phba->cfg_fcp_mq_threshold);
4690 shost->dma_boundary =
4691 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4693 if (phba->cfg_xpsgl && !phba->nvmet_support)
4694 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4696 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4698 /* SLI-3 has a limited number of hardware queues (3),
4699 * thus there is only one for FCP processing.
4701 shost->nr_hw_queues = 1;
4704 * Set initial can_queue value since 0 is no longer supported and
4705 * scsi_add_host will fail. This will be adjusted later based on the
4706 * max xri value determined in hba setup.
4708 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4709 if (dev != &phba->pcidev->dev) {
4710 shost->transportt = lpfc_vport_transport_template;
4711 vport->port_type = LPFC_NPIV_PORT;
4713 shost->transportt = lpfc_transport_template;
4714 vport->port_type = LPFC_PHYSICAL_PORT;
4717 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4718 "9081 CreatePort TMPLATE type %x TBLsize %d "
4720 vport->port_type, shost->sg_tablesize,
4721 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4723 /* Allocate the resources for VMID */
4724 rc = lpfc_vmid_res_alloc(phba, vport);
4729 /* Initialize all internally managed lists. */
4730 INIT_LIST_HEAD(&vport->fc_nodes);
4731 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4732 spin_lock_init(&vport->work_port_lock);
4734 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4736 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4738 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4740 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4741 lpfc_setup_bg(phba, shost);
4743 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4747 spin_lock_irq(&phba->port_list_lock);
4748 list_add_tail(&vport->listentry, &phba->port_list);
4749 spin_unlock_irq(&phba->port_list_lock);
4754 bitmap_free(vport->vmid_priority_range);
4755 scsi_host_put(shost);
4761 * destroy_port - destroy an FC port
4762 * @vport: pointer to an lpfc virtual N_Port data structure.
4764 * This routine destroys a FC port from the upper layer protocol. All the
4765 * resources associated with the port are released.
4768 destroy_port(struct lpfc_vport *vport)
4770 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4771 struct lpfc_hba *phba = vport->phba;
4773 lpfc_debugfs_terminate(vport);
4774 fc_remove_host(shost);
4775 scsi_remove_host(shost);
4777 spin_lock_irq(&phba->port_list_lock);
4778 list_del_init(&vport->listentry);
4779 spin_unlock_irq(&phba->port_list_lock);
4781 lpfc_cleanup(vport);
4786 * lpfc_get_instance - Get a unique integer ID
4788 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4789 * uses the kernel idr facility to perform the task.
4792 * instance - a unique integer ID allocated as the new instance.
4793 * -1 - lpfc get instance failed.
4796 lpfc_get_instance(void)
4800 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4801 return ret < 0 ? -1 : ret;
4805 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4806 * @shost: pointer to SCSI host data structure.
4807 * @time: elapsed time of the scan in jiffies.
4809 * This routine is called by the SCSI layer with a SCSI host to determine
4810 * whether the scan host is finished.
4812 * Note: there is no scan_start function as adapter initialization will have
4813 * asynchronously kicked off the link initialization.
4816 * 0 - SCSI host scan is not over yet.
4817 * 1 - SCSI host scan is over.
4819 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4821 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4822 struct lpfc_hba *phba = vport->phba;
4825 spin_lock_irq(shost->host_lock);
4827 if (vport->load_flag & FC_UNLOADING) {
4831 if (time >= msecs_to_jiffies(30 * 1000)) {
4832 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4833 "0461 Scanning longer than 30 "
4834 "seconds. Continuing initialization\n");
4838 if (time >= msecs_to_jiffies(15 * 1000) &&
4839 phba->link_state <= LPFC_LINK_DOWN) {
4840 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4841 "0465 Link down longer than 15 "
4842 "seconds. Continuing initialization\n");
4847 if (vport->port_state != LPFC_VPORT_READY)
4849 if (vport->num_disc_nodes || vport->fc_prli_sent)
4851 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4853 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4859 spin_unlock_irq(shost->host_lock);
4863 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4865 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4866 struct lpfc_hba *phba = vport->phba;
4868 fc_host_supported_speeds(shost) = 0;
4870 * Avoid reporting supported link speed for FCoE as it can't be
4871 * controlled via FCoE.
4873 if (phba->hba_flag & HBA_FCOE_MODE)
4876 if (phba->lmt & LMT_256Gb)
4877 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
4878 if (phba->lmt & LMT_128Gb)
4879 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4880 if (phba->lmt & LMT_64Gb)
4881 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4882 if (phba->lmt & LMT_32Gb)
4883 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4884 if (phba->lmt & LMT_16Gb)
4885 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4886 if (phba->lmt & LMT_10Gb)
4887 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4888 if (phba->lmt & LMT_8Gb)
4889 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4890 if (phba->lmt & LMT_4Gb)
4891 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4892 if (phba->lmt & LMT_2Gb)
4893 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4894 if (phba->lmt & LMT_1Gb)
4895 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4899 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4900 * @shost: pointer to SCSI host data structure.
4902 * This routine initializes a given SCSI host attributes on a FC port. The
4903 * SCSI host can be either on top of a physical port or a virtual port.
4905 void lpfc_host_attrib_init(struct Scsi_Host *shost)
4907 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4908 struct lpfc_hba *phba = vport->phba;
4910 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
4913 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4914 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4915 fc_host_supported_classes(shost) = FC_COS_CLASS3;
4917 memset(fc_host_supported_fc4s(shost), 0,
4918 sizeof(fc_host_supported_fc4s(shost)));
4919 fc_host_supported_fc4s(shost)[2] = 1;
4920 fc_host_supported_fc4s(shost)[7] = 1;
4922 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4923 sizeof fc_host_symbolic_name(shost));
4925 lpfc_host_supported_speeds_set(shost);
4927 fc_host_maxframe_size(shost) =
4928 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4929 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4931 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4933 /* This value is also unchanging */
4934 memset(fc_host_active_fc4s(shost), 0,
4935 sizeof(fc_host_active_fc4s(shost)));
4936 fc_host_active_fc4s(shost)[2] = 1;
4937 fc_host_active_fc4s(shost)[7] = 1;
4939 fc_host_max_npiv_vports(shost) = phba->max_vpi;
4940 spin_lock_irq(shost->host_lock);
4941 vport->load_flag &= ~FC_LOADING;
4942 spin_unlock_irq(shost->host_lock);
4946 * lpfc_stop_port_s3 - Stop SLI3 device port
4947 * @phba: pointer to lpfc hba data structure.
4949 * This routine is invoked to stop an SLI3 device port, it stops the device
4950 * from generating interrupts and stops the device driver's timers for the
4954 lpfc_stop_port_s3(struct lpfc_hba *phba)
4956 /* Clear all interrupt enable conditions */
4957 writel(0, phba->HCregaddr);
4958 readl(phba->HCregaddr); /* flush */
4959 /* Clear all pending interrupts */
4960 writel(0xffffffff, phba->HAregaddr);
4961 readl(phba->HAregaddr); /* flush */
4963 /* Reset some HBA SLI setup states */
4964 lpfc_stop_hba_timers(phba);
4965 phba->pport->work_port_events = 0;
4969 * lpfc_stop_port_s4 - Stop SLI4 device port
4970 * @phba: pointer to lpfc hba data structure.
4972 * This routine is invoked to stop an SLI4 device port, it stops the device
4973 * from generating interrupts and stops the device driver's timers for the
4977 lpfc_stop_port_s4(struct lpfc_hba *phba)
4979 /* Reset some HBA SLI4 setup states */
4980 lpfc_stop_hba_timers(phba);
4982 phba->pport->work_port_events = 0;
4983 phba->sli4_hba.intr_enable = 0;
4987 * lpfc_stop_port - Wrapper function for stopping hba port
4988 * @phba: Pointer to HBA context object.
4990 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4991 * the API jump table function pointer from the lpfc_hba struct.
4994 lpfc_stop_port(struct lpfc_hba *phba)
4996 phba->lpfc_stop_port(phba);
4999 flush_workqueue(phba->wq);
5003 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
5004 * @phba: Pointer to hba for which this call is being executed.
5006 * This routine starts the timer waiting for the FCF rediscovery to complete.
5009 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
5011 unsigned long fcf_redisc_wait_tmo =
5012 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
5013 /* Start fcf rediscovery wait period timer */
5014 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
5015 spin_lock_irq(&phba->hbalock);
5016 /* Allow action to new fcf asynchronous event */
5017 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
5018 /* Mark the FCF rediscovery pending state */
5019 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
5020 spin_unlock_irq(&phba->hbalock);
5024 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
5025 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5027 * This routine is invoked when waiting for FCF table rediscover has been
5028 * timed out. If new FCF record(s) has (have) been discovered during the
5029 * wait period, a new FCF event shall be added to the FCOE async event
5030 * list, and then worker thread shall be waked up for processing from the
5031 * worker thread context.
5034 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
5036 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
5038 /* Don't send FCF rediscovery event if timer cancelled */
5039 spin_lock_irq(&phba->hbalock);
5040 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
5041 spin_unlock_irq(&phba->hbalock);
5044 /* Clear FCF rediscovery timer pending flag */
5045 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
5046 /* FCF rediscovery event to worker thread */
5047 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
5048 spin_unlock_irq(&phba->hbalock);
5049 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
5050 "2776 FCF rediscover quiescent timer expired\n");
5051 /* wake up worker thread */
5052 lpfc_worker_wake_up(phba);
5056 * lpfc_vmid_poll - VMID timeout detection
5057 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5059 * This routine is invoked when there is no I/O on by a VM for the specified
5060 * amount of time. When this situation is detected, the VMID has to be
5061 * deregistered from the switch and all the local resources freed. The VMID
5062 * will be reassigned to the VM once the I/O begins.
5065 lpfc_vmid_poll(struct timer_list *t)
5067 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
5070 /* check if there is a need to issue QFPA */
5071 if (phba->pport->vmid_priority_tagging) {
5073 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5076 /* Is the vmid inactivity timer enabled */
5077 if (phba->pport->vmid_inactivity_timeout ||
5078 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
5080 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5084 lpfc_worker_wake_up(phba);
5086 /* restart the timer for the next iteration */
5087 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
5092 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
5093 * @phba: pointer to lpfc hba data structure.
5094 * @acqe_link: pointer to the async link completion queue entry.
5096 * This routine is to parse the SLI4 link-attention link fault code.
5099 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
5100 struct lpfc_acqe_link *acqe_link)
5102 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
5103 case LPFC_ASYNC_LINK_FAULT_NONE:
5104 case LPFC_ASYNC_LINK_FAULT_LOCAL:
5105 case LPFC_ASYNC_LINK_FAULT_REMOTE:
5106 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
5109 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5110 "0398 Unknown link fault code: x%x\n",
5111 bf_get(lpfc_acqe_link_fault, acqe_link));
5117 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5118 * @phba: pointer to lpfc hba data structure.
5119 * @acqe_link: pointer to the async link completion queue entry.
5121 * This routine is to parse the SLI4 link attention type and translate it
5122 * into the base driver's link attention type coding.
5124 * Return: Link attention type in terms of base driver's coding.
5127 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
5128 struct lpfc_acqe_link *acqe_link)
5132 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
5133 case LPFC_ASYNC_LINK_STATUS_DOWN:
5134 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
5135 att_type = LPFC_ATT_LINK_DOWN;
5137 case LPFC_ASYNC_LINK_STATUS_UP:
5138 /* Ignore physical link up events - wait for logical link up */
5139 att_type = LPFC_ATT_RESERVED;
5141 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
5142 att_type = LPFC_ATT_LINK_UP;
5145 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5146 "0399 Invalid link attention type: x%x\n",
5147 bf_get(lpfc_acqe_link_status, acqe_link));
5148 att_type = LPFC_ATT_RESERVED;
5155 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
5156 * @phba: pointer to lpfc hba data structure.
5158 * This routine is to get an SLI3 FC port's link speed in Mbps.
5160 * Return: link speed in terms of Mbps.
5163 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
5165 uint32_t link_speed;
5167 if (!lpfc_is_link_up(phba))
5170 if (phba->sli_rev <= LPFC_SLI_REV3) {
5171 switch (phba->fc_linkspeed) {
5172 case LPFC_LINK_SPEED_1GHZ:
5175 case LPFC_LINK_SPEED_2GHZ:
5178 case LPFC_LINK_SPEED_4GHZ:
5181 case LPFC_LINK_SPEED_8GHZ:
5184 case LPFC_LINK_SPEED_10GHZ:
5187 case LPFC_LINK_SPEED_16GHZ:
5194 if (phba->sli4_hba.link_state.logical_speed)
5196 phba->sli4_hba.link_state.logical_speed;
5198 link_speed = phba->sli4_hba.link_state.speed;
5204 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5205 * @phba: pointer to lpfc hba data structure.
5206 * @evt_code: asynchronous event code.
5207 * @speed_code: asynchronous event link speed code.
5209 * This routine is to parse the giving SLI4 async event link speed code into
5210 * value of Mbps for the link speed.
5212 * Return: link speed in terms of Mbps.
5215 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5218 uint32_t port_speed;
5221 case LPFC_TRAILER_CODE_LINK:
5222 switch (speed_code) {
5223 case LPFC_ASYNC_LINK_SPEED_ZERO:
5226 case LPFC_ASYNC_LINK_SPEED_10MBPS:
5229 case LPFC_ASYNC_LINK_SPEED_100MBPS:
5232 case LPFC_ASYNC_LINK_SPEED_1GBPS:
5235 case LPFC_ASYNC_LINK_SPEED_10GBPS:
5238 case LPFC_ASYNC_LINK_SPEED_20GBPS:
5241 case LPFC_ASYNC_LINK_SPEED_25GBPS:
5244 case LPFC_ASYNC_LINK_SPEED_40GBPS:
5247 case LPFC_ASYNC_LINK_SPEED_100GBPS:
5248 port_speed = 100000;
5254 case LPFC_TRAILER_CODE_FC:
5255 switch (speed_code) {
5256 case LPFC_FC_LA_SPEED_UNKNOWN:
5259 case LPFC_FC_LA_SPEED_1G:
5262 case LPFC_FC_LA_SPEED_2G:
5265 case LPFC_FC_LA_SPEED_4G:
5268 case LPFC_FC_LA_SPEED_8G:
5271 case LPFC_FC_LA_SPEED_10G:
5274 case LPFC_FC_LA_SPEED_16G:
5277 case LPFC_FC_LA_SPEED_32G:
5280 case LPFC_FC_LA_SPEED_64G:
5283 case LPFC_FC_LA_SPEED_128G:
5284 port_speed = 128000;
5286 case LPFC_FC_LA_SPEED_256G:
5287 port_speed = 256000;
5300 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
5301 * @phba: pointer to lpfc hba data structure.
5302 * @acqe_link: pointer to the async link completion queue entry.
5304 * This routine is to handle the SLI4 asynchronous FCoE link event.
5307 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5308 struct lpfc_acqe_link *acqe_link)
5310 struct lpfc_dmabuf *mp;
5313 struct lpfc_mbx_read_top *la;
5317 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5318 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5320 phba->fcoe_eventtag = acqe_link->event_tag;
5321 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5323 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5324 "0395 The mboxq allocation failed\n");
5327 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5329 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5330 "0396 The lpfc_dmabuf allocation failed\n");
5333 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5335 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5336 "0397 The mbuf allocation failed\n");
5337 goto out_free_dmabuf;
5340 /* Cleanup any outstanding ELS commands */
5341 lpfc_els_flush_all_cmd(phba);
5343 /* Block ELS IOCBs until we have done process link event */
5344 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5346 /* Update link event statistics */
5347 phba->sli.slistat.link_event++;
5349 /* Create lpfc_handle_latt mailbox command from link ACQE */
5350 lpfc_read_topology(phba, pmb, mp);
5351 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5352 pmb->vport = phba->pport;
5354 /* Keep the link status for extra SLI4 state machine reference */
5355 phba->sli4_hba.link_state.speed =
5356 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5357 bf_get(lpfc_acqe_link_speed, acqe_link));
5358 phba->sli4_hba.link_state.duplex =
5359 bf_get(lpfc_acqe_link_duplex, acqe_link);
5360 phba->sli4_hba.link_state.status =
5361 bf_get(lpfc_acqe_link_status, acqe_link);
5362 phba->sli4_hba.link_state.type =
5363 bf_get(lpfc_acqe_link_type, acqe_link);
5364 phba->sli4_hba.link_state.number =
5365 bf_get(lpfc_acqe_link_number, acqe_link);
5366 phba->sli4_hba.link_state.fault =
5367 bf_get(lpfc_acqe_link_fault, acqe_link);
5368 phba->sli4_hba.link_state.logical_speed =
5369 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5371 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5372 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5373 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5374 "Logical speed:%dMbps Fault:%d\n",
5375 phba->sli4_hba.link_state.speed,
5376 phba->sli4_hba.link_state.topology,
5377 phba->sli4_hba.link_state.status,
5378 phba->sli4_hba.link_state.type,
5379 phba->sli4_hba.link_state.number,
5380 phba->sli4_hba.link_state.logical_speed,
5381 phba->sli4_hba.link_state.fault);
5383 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5384 * topology info. Note: Optional for non FC-AL ports.
5386 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5387 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5388 if (rc == MBX_NOT_FINISHED) {
5389 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5390 goto out_free_dmabuf;
5395 * For FCoE Mode: fill in all the topology information we need and call
5396 * the READ_TOPOLOGY completion routine to continue without actually
5397 * sending the READ_TOPOLOGY mailbox command to the port.
5399 /* Initialize completion status */
5401 mb->mbxStatus = MBX_SUCCESS;
5403 /* Parse port fault information field */
5404 lpfc_sli4_parse_latt_fault(phba, acqe_link);
5406 /* Parse and translate link attention fields */
5407 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5408 la->eventTag = acqe_link->event_tag;
5409 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5410 bf_set(lpfc_mbx_read_top_link_spd, la,
5411 (bf_get(lpfc_acqe_link_speed, acqe_link)));
5413 /* Fake the the following irrelvant fields */
5414 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5415 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5416 bf_set(lpfc_mbx_read_top_il, la, 0);
5417 bf_set(lpfc_mbx_read_top_pb, la, 0);
5418 bf_set(lpfc_mbx_read_top_fa, la, 0);
5419 bf_set(lpfc_mbx_read_top_mm, la, 0);
5421 /* Invoke the lpfc_handle_latt mailbox command callback function */
5422 lpfc_mbx_cmpl_read_topology(phba, pmb);
5429 mempool_free(pmb, phba->mbox_mem_pool);
5433 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5435 * @phba: pointer to lpfc hba data structure.
5436 * @speed_code: asynchronous event link speed code.
5438 * This routine is to parse the giving SLI4 async event link speed code into
5439 * value of Read topology link speed.
5441 * Return: link speed in terms of Read topology.
5444 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5448 switch (speed_code) {
5449 case LPFC_FC_LA_SPEED_1G:
5450 port_speed = LPFC_LINK_SPEED_1GHZ;
5452 case LPFC_FC_LA_SPEED_2G:
5453 port_speed = LPFC_LINK_SPEED_2GHZ;
5455 case LPFC_FC_LA_SPEED_4G:
5456 port_speed = LPFC_LINK_SPEED_4GHZ;
5458 case LPFC_FC_LA_SPEED_8G:
5459 port_speed = LPFC_LINK_SPEED_8GHZ;
5461 case LPFC_FC_LA_SPEED_16G:
5462 port_speed = LPFC_LINK_SPEED_16GHZ;
5464 case LPFC_FC_LA_SPEED_32G:
5465 port_speed = LPFC_LINK_SPEED_32GHZ;
5467 case LPFC_FC_LA_SPEED_64G:
5468 port_speed = LPFC_LINK_SPEED_64GHZ;
5470 case LPFC_FC_LA_SPEED_128G:
5471 port_speed = LPFC_LINK_SPEED_128GHZ;
5473 case LPFC_FC_LA_SPEED_256G:
5474 port_speed = LPFC_LINK_SPEED_256GHZ;
5485 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
5487 struct rxtable_entry *entry;
5488 int cnt = 0, head, tail, last, start;
5490 head = atomic_read(&phba->rxtable_idx_head);
5491 tail = atomic_read(&phba->rxtable_idx_tail);
5492 if (!phba->rxtable || head == tail) {
5493 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
5494 "4411 Rxtable is empty\n");
5500 /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */
5501 while (start != last) {
5505 start = LPFC_MAX_RXMONITOR_ENTRY - 1;
5506 entry = &phba->rxtable[start];
5507 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5508 "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
5509 "Lat %lld ASz %lld Info %02d BWUtil %d "
5511 cnt, entry->max_bytes_per_interval,
5512 entry->total_bytes, entry->rcv_bytes,
5513 entry->avg_io_latency, entry->avg_io_size,
5514 entry->cmf_info, entry->timer_utilization,
5515 entry->timer_interval, start);
5517 if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
5523 * lpfc_cgn_update_stat - Save data into congestion stats buffer
5524 * @phba: pointer to lpfc hba data structure.
5525 * @dtag: FPIN descriptor received
5527 * Increment the FPIN received counter/time when it happens.
5530 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
5532 struct lpfc_cgn_info *cp;
5534 struct timespec64 cur_time;
5538 /* Make sure we have a congestion info buffer */
5541 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5542 ktime_get_real_ts64(&cur_time);
5543 time64_to_tm(cur_time.tv_sec, 0, &broken);
5545 /* Update congestion statistics */
5547 case ELS_DTAG_LNK_INTEGRITY:
5548 cnt = le32_to_cpu(cp->link_integ_notification);
5550 cp->link_integ_notification = cpu_to_le32(cnt);
5552 cp->cgn_stat_lnk_month = broken.tm_mon + 1;
5553 cp->cgn_stat_lnk_day = broken.tm_mday;
5554 cp->cgn_stat_lnk_year = broken.tm_year - 100;
5555 cp->cgn_stat_lnk_hour = broken.tm_hour;
5556 cp->cgn_stat_lnk_min = broken.tm_min;
5557 cp->cgn_stat_lnk_sec = broken.tm_sec;
5559 case ELS_DTAG_DELIVERY:
5560 cnt = le32_to_cpu(cp->delivery_notification);
5562 cp->delivery_notification = cpu_to_le32(cnt);
5564 cp->cgn_stat_del_month = broken.tm_mon + 1;
5565 cp->cgn_stat_del_day = broken.tm_mday;
5566 cp->cgn_stat_del_year = broken.tm_year - 100;
5567 cp->cgn_stat_del_hour = broken.tm_hour;
5568 cp->cgn_stat_del_min = broken.tm_min;
5569 cp->cgn_stat_del_sec = broken.tm_sec;
5571 case ELS_DTAG_PEER_CONGEST:
5572 cnt = le32_to_cpu(cp->cgn_peer_notification);
5574 cp->cgn_peer_notification = cpu_to_le32(cnt);
5576 cp->cgn_stat_peer_month = broken.tm_mon + 1;
5577 cp->cgn_stat_peer_day = broken.tm_mday;
5578 cp->cgn_stat_peer_year = broken.tm_year - 100;
5579 cp->cgn_stat_peer_hour = broken.tm_hour;
5580 cp->cgn_stat_peer_min = broken.tm_min;
5581 cp->cgn_stat_peer_sec = broken.tm_sec;
5583 case ELS_DTAG_CONGESTION:
5584 cnt = le32_to_cpu(cp->cgn_notification);
5586 cp->cgn_notification = cpu_to_le32(cnt);
5588 cp->cgn_stat_cgn_month = broken.tm_mon + 1;
5589 cp->cgn_stat_cgn_day = broken.tm_mday;
5590 cp->cgn_stat_cgn_year = broken.tm_year - 100;
5591 cp->cgn_stat_cgn_hour = broken.tm_hour;
5592 cp->cgn_stat_cgn_min = broken.tm_min;
5593 cp->cgn_stat_cgn_sec = broken.tm_sec;
5595 if (phba->cgn_fpin_frequency &&
5596 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5597 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5598 cp->cgn_stat_npm = value;
5600 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5601 LPFC_CGN_CRC32_SEED);
5602 cp->cgn_info_crc = cpu_to_le32(value);
5606 * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer
5607 * @phba: pointer to lpfc hba data structure.
5609 * Save the congestion event data every minute.
5610 * On the hour collapse all the minute data into hour data. Every day
5611 * collapse all the hour data into daily data. Separate driver
5612 * and fabrc congestion event counters that will be saved out
5613 * to the registered congestion buffer every minute.
5616 lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
5618 struct lpfc_cgn_info *cp;
5620 struct timespec64 cur_time;
5622 uint16_t value, mvalue;
5625 uint32_t dvalue, wvalue, lvalue, avalue;
5631 /* Make sure we have a congestion info buffer */
5634 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5636 if (time_before(jiffies, phba->cgn_evt_timestamp))
5638 phba->cgn_evt_timestamp = jiffies +
5639 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
5640 phba->cgn_evt_minute++;
5642 /* We should get to this point in the routine on 1 minute intervals */
5644 ktime_get_real_ts64(&cur_time);
5645 time64_to_tm(cur_time.tv_sec, 0, &broken);
5647 if (phba->cgn_fpin_frequency &&
5648 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5649 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5650 cp->cgn_stat_npm = value;
5653 /* Read and clear the latency counters for this minute */
5654 lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
5655 latsum = atomic64_read(&phba->cgn_latency_evt);
5656 atomic_set(&phba->cgn_latency_evt_cnt, 0);
5657 atomic64_set(&phba->cgn_latency_evt, 0);
5659 /* We need to store MB/sec bandwidth in the congestion information.
5660 * block_cnt is count of 512 byte blocks for the entire minute,
5661 * bps will get bytes per sec before finally converting to MB/sec.
5663 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
5664 phba->rx_block_cnt = 0;
5665 mvalue = bps / (1024 * 1024); /* convert to MB/sec */
5668 /* cgn parameters */
5669 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
5670 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
5671 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
5672 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
5674 /* Fill in default LUN qdepth */
5675 value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
5676 cp->cgn_lunq = cpu_to_le16(value);
5678 /* Record congestion buffer info - every minute
5679 * cgn_driver_evt_cnt (Driver events)
5680 * cgn_fabric_warn_cnt (Congestion Warnings)
5681 * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency)
5682 * cgn_fabric_alarm_cnt (Congestion Alarms)
5684 index = ++cp->cgn_index_minute;
5685 if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
5686 cp->cgn_index_minute = 0;
5690 /* Get the number of driver events in this sample and reset counter */
5691 dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
5692 atomic_set(&phba->cgn_driver_evt_cnt, 0);
5694 /* Get the number of warning events - FPIN and Signal for this minute */
5696 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
5697 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5698 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5699 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
5700 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
5702 /* Get the number of alarm events - FPIN and Signal for this minute */
5704 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
5705 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5706 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
5707 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
5709 /* Collect the driver, warning, alarm and latency counts for this
5710 * minute into the driver congestion buffer.
5712 ptr = &cp->cgn_drvr_min[index];
5713 value = (uint16_t)dvalue;
5714 *ptr = cpu_to_le16(value);
5716 ptr = &cp->cgn_warn_min[index];
5717 value = (uint16_t)wvalue;
5718 *ptr = cpu_to_le16(value);
5720 ptr = &cp->cgn_alarm_min[index];
5721 value = (uint16_t)avalue;
5722 *ptr = cpu_to_le16(value);
5724 lptr = &cp->cgn_latency_min[index];
5726 lvalue = (uint32_t)div_u64(latsum, lvalue);
5727 *lptr = cpu_to_le32(lvalue);
5732 /* Collect the bandwidth value into the driver's congesion buffer. */
5733 mptr = &cp->cgn_bw_min[index];
5734 *mptr = cpu_to_le16(mvalue);
5736 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5737 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
5738 index, dvalue, wvalue, *lptr, mvalue, avalue);
5741 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
5742 /* Record congestion buffer info - every hour
5743 * Collapse all minutes into an hour
5745 index = ++cp->cgn_index_hour;
5746 if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
5747 cp->cgn_index_hour = 0;
5757 for (i = 0; i < LPFC_MIN_HOUR; i++) {
5758 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
5759 wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
5760 lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
5761 mbps += le16_to_cpu(cp->cgn_bw_min[i]);
5762 avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
5764 if (lvalue) /* Avg of latency averages */
5765 lvalue /= LPFC_MIN_HOUR;
5766 if (mbps) /* Avg of Bandwidth averages */
5767 mvalue = mbps / LPFC_MIN_HOUR;
5769 lptr = &cp->cgn_drvr_hr[index];
5770 *lptr = cpu_to_le32(dvalue);
5771 lptr = &cp->cgn_warn_hr[index];
5772 *lptr = cpu_to_le32(wvalue);
5773 lptr = &cp->cgn_latency_hr[index];
5774 *lptr = cpu_to_le32(lvalue);
5775 mptr = &cp->cgn_bw_hr[index];
5776 *mptr = cpu_to_le16(mvalue);
5777 lptr = &cp->cgn_alarm_hr[index];
5778 *lptr = cpu_to_le32(avalue);
5780 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5781 "2419 Congestion Info - hour "
5782 "(%d): %d %d %d %d %d\n",
5783 index, dvalue, wvalue, lvalue, mvalue, avalue);
5787 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
5788 /* Record congestion buffer info - every hour
5789 * Collapse all hours into a day. Rotate days
5790 * after LPFC_MAX_CGN_DAYS.
5792 index = ++cp->cgn_index_day;
5793 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
5794 cp->cgn_index_day = 0;
5798 /* Anytime we overwrite daily index 0, after we wrap,
5799 * we will be overwriting the oldest day, so we must
5800 * update the congestion data start time for that day.
5801 * That start time should have previously been saved after
5802 * we wrote the last days worth of data.
5804 if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) {
5805 time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken);
5807 cp->cgn_info_month = broken.tm_mon + 1;
5808 cp->cgn_info_day = broken.tm_mday;
5809 cp->cgn_info_year = broken.tm_year - 100;
5810 cp->cgn_info_hour = broken.tm_hour;
5811 cp->cgn_info_minute = broken.tm_min;
5812 cp->cgn_info_second = broken.tm_sec;
5815 (phba, KERN_INFO, LOG_CGN_MGMT,
5816 "2646 CGNInfo idx0 Start Time: "
5817 "%d/%d/%d %d:%d:%d\n",
5818 cp->cgn_info_day, cp->cgn_info_month,
5819 cp->cgn_info_year, cp->cgn_info_hour,
5820 cp->cgn_info_minute, cp->cgn_info_second);
5829 for (i = 0; i < LPFC_HOUR_DAY; i++) {
5830 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
5831 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
5832 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
5833 mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
5834 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
5836 if (lvalue) /* Avg of latency averages */
5837 lvalue /= LPFC_HOUR_DAY;
5838 if (mbps) /* Avg of Bandwidth averages */
5839 mvalue = mbps / LPFC_HOUR_DAY;
5841 lptr = &cp->cgn_drvr_day[index];
5842 *lptr = cpu_to_le32(dvalue);
5843 lptr = &cp->cgn_warn_day[index];
5844 *lptr = cpu_to_le32(wvalue);
5845 lptr = &cp->cgn_latency_day[index];
5846 *lptr = cpu_to_le32(lvalue);
5847 mptr = &cp->cgn_bw_day[index];
5848 *mptr = cpu_to_le16(mvalue);
5849 lptr = &cp->cgn_alarm_day[index];
5850 *lptr = cpu_to_le32(avalue);
5852 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5853 "2420 Congestion Info - daily (%d): "
5855 index, dvalue, wvalue, lvalue, mvalue, avalue);
5857 /* We just wrote LPFC_MAX_CGN_DAYS of data,
5858 * so we are wrapped on any data after this.
5859 * Save this as the start time for the next day.
5861 if (index == (LPFC_MAX_CGN_DAYS - 1)) {
5862 phba->hba_flag |= HBA_CGN_DAY_WRAP;
5863 ktime_get_real_ts64(&phba->cgn_daily_ts);
5867 /* Use the frequency found in the last rcv'ed FPIN */
5868 value = phba->cgn_fpin_frequency;
5869 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN)
5870 cp->cgn_warn_freq = cpu_to_le16(value);
5871 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM)
5872 cp->cgn_alarm_freq = cpu_to_le16(value);
5874 /* Frequency (in ms) Signal Warning/Signal Congestion Notifications
5875 * are received by the HBA
5877 value = phba->cgn_sig_freq;
5879 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5880 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5881 cp->cgn_warn_freq = cpu_to_le16(value);
5882 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5883 cp->cgn_alarm_freq = cpu_to_le16(value);
5885 lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5886 LPFC_CGN_CRC32_SEED);
5887 cp->cgn_info_crc = cpu_to_le32(lvalue);
5891 * lpfc_calc_cmf_latency - latency from start of rxate timer interval
5892 * @phba: The Hba for which this call is being executed.
5894 * The routine calculates the latency from the beginning of the CMF timer
5895 * interval to the current point in time. It is called from IO completion
5896 * when we exceed our Bandwidth limitation for the time interval.
5899 lpfc_calc_cmf_latency(struct lpfc_hba *phba)
5901 struct timespec64 cmpl_time;
5904 ktime_get_real_ts64(&cmpl_time);
5906 /* This routine works on a ms granularity so sec and usec are
5907 * converted accordingly.
5909 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
5910 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
5913 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
5914 msec = (cmpl_time.tv_sec -
5915 phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
5916 msec += ((cmpl_time.tv_nsec -
5917 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
5919 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
5921 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
5922 cmpl_time.tv_nsec) / NSEC_PER_MSEC);
5929 * lpfc_cmf_timer - This is the timer function for one congestion
5931 * @timer: Pointer to the high resolution timer that expired
5933 static enum hrtimer_restart
5934 lpfc_cmf_timer(struct hrtimer *timer)
5936 struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
5938 struct rxtable_entry *entry;
5940 uint32_t head, tail;
5941 uint32_t busy, max_read;
5942 uint64_t total, rcv, lat, mbpi, extra, cnt;
5943 int timer_interval = LPFC_CMF_INTERVAL;
5945 struct lpfc_cgn_stat *cgs;
5948 /* Only restart the timer if congestion mgmt is on */
5949 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
5950 !phba->cmf_latency.tv_sec) {
5951 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5952 "6224 CMF timer exit: %d %lld\n",
5953 phba->cmf_active_mode,
5954 (uint64_t)phba->cmf_latency.tv_sec);
5955 return HRTIMER_NORESTART;
5958 /* If pport is not ready yet, just exit and wait for
5959 * the next timer cycle to hit.
5964 /* Do not block SCSI IO while in the timer routine since
5965 * total_bytes will be cleared
5967 atomic_set(&phba->cmf_stop_io, 1);
5969 /* First we need to calculate the actual ms between
5970 * the last timer interrupt and this one. We ask for
5971 * LPFC_CMF_INTERVAL, however the actual time may
5972 * vary depending on system overhead.
5974 ms = lpfc_calc_cmf_latency(phba);
5977 /* Immediately after we calculate the time since the last
5978 * timer interrupt, set the start time for the next
5981 ktime_get_real_ts64(&phba->cmf_latency);
5983 phba->cmf_link_byte_count =
5984 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
5986 /* Collect all the stats from the prior timer interval */
5991 for_each_present_cpu(cpu) {
5992 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
5993 total += atomic64_xchg(&cgs->total_bytes, 0);
5994 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
5995 lat += atomic64_xchg(&cgs->rx_latency, 0);
5996 rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
5999 /* Before we issue another CMF_SYNC_WQE, retrieve the BW
6000 * returned from the last CMF_SYNC_WQE issued, from
6001 * cmf_last_sync_bw. This will be the target BW for
6002 * this next timer interval.
6004 if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
6005 phba->link_state != LPFC_LINK_DOWN &&
6006 phba->hba_flag & HBA_SETUP) {
6007 mbpi = phba->cmf_last_sync_bw;
6008 phba->cmf_last_sync_bw = 0;
6011 /* Calculate any extra bytes needed to account for the
6012 * timer accuracy. If we are less than LPFC_CMF_INTERVAL
6013 * calculate the adjustment needed for total to reflect
6014 * a full LPFC_CMF_INTERVAL.
6016 if (ms && ms < LPFC_CMF_INTERVAL) {
6017 cnt = div_u64(total, ms); /* bytes per ms */
6018 cnt *= LPFC_CMF_INTERVAL; /* what total should be */
6020 /* If the timeout is scheduled to be shorter,
6021 * this value may skew the data, so cap it at mbpi.
6023 if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi)
6026 extra = cnt - total;
6028 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
6030 /* For Monitor mode or link down we want mbpi
6031 * to be the full link speed
6033 mbpi = phba->cmf_link_byte_count;
6036 phba->cmf_timer_cnt++;
6039 /* Update congestion info buffer latency in us */
6040 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
6041 atomic64_add(lat, &phba->cgn_latency_evt);
6043 busy = atomic_xchg(&phba->cmf_busy, 0);
6044 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
6046 /* Calculate MBPI for the next timer interval */
6048 if (mbpi > phba->cmf_link_byte_count ||
6049 phba->cmf_active_mode == LPFC_CFG_MONITOR)
6050 mbpi = phba->cmf_link_byte_count;
6052 /* Change max_bytes_per_interval to what the prior
6053 * CMF_SYNC_WQE cmpl indicated.
6055 if (mbpi != phba->cmf_max_bytes_per_interval)
6056 phba->cmf_max_bytes_per_interval = mbpi;
6059 /* Save rxmonitor information for debug */
6060 if (phba->rxtable) {
6061 head = atomic_xchg(&phba->rxtable_idx_head,
6062 LPFC_RXMONITOR_TABLE_IN_USE);
6063 entry = &phba->rxtable[head];
6064 entry->total_bytes = total;
6065 entry->cmf_bytes = total + extra;
6066 entry->rcv_bytes = rcv;
6067 entry->cmf_busy = busy;
6068 entry->cmf_info = phba->cmf_active_info;
6070 entry->avg_io_latency = div_u64(lat, io_cnt);
6071 entry->avg_io_size = div_u64(rcv, io_cnt);
6073 entry->avg_io_latency = 0;
6074 entry->avg_io_size = 0;
6076 entry->max_read_cnt = max_read;
6077 entry->io_cnt = io_cnt;
6078 entry->max_bytes_per_interval = mbpi;
6079 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
6080 entry->timer_utilization = phba->cmf_last_ts;
6082 entry->timer_utilization = ms;
6083 entry->timer_interval = ms;
6084 phba->cmf_last_ts = 0;
6086 /* Increment rxtable index */
6087 head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
6088 tail = atomic_read(&phba->rxtable_idx_tail);
6090 tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
6091 atomic_set(&phba->rxtable_idx_tail, tail);
6093 atomic_set(&phba->rxtable_idx_head, head);
6096 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
6097 /* If Monitor mode, check if we are oversubscribed
6098 * against the full line rate.
6100 if (mbpi && total > mbpi)
6101 atomic_inc(&phba->cgn_driver_evt_cnt);
6103 phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */
6105 /* Each minute save Fabric and Driver congestion information */
6106 lpfc_cgn_save_evt_cnt(phba);
6108 phba->hba_flag &= ~HBA_SHORT_CMF;
6110 /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the
6111 * minute, adjust our next timer interval, if needed, to ensure a
6112 * 1 minute granularity when we get the next timer interrupt.
6114 if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL),
6115 phba->cgn_evt_timestamp)) {
6116 timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp -
6118 if (timer_interval <= 0)
6119 timer_interval = LPFC_CMF_INTERVAL;
6121 phba->hba_flag |= HBA_SHORT_CMF;
6123 /* If we adjust timer_interval, max_bytes_per_interval
6124 * needs to be adjusted as well.
6126 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
6127 timer_interval, 1000);
6128 if (phba->cmf_active_mode == LPFC_CFG_MONITOR)
6129 phba->cmf_max_bytes_per_interval =
6130 phba->cmf_link_byte_count;
6133 /* Since total_bytes has already been zero'ed, its okay to unblock
6134 * after max_bytes_per_interval is setup.
6136 if (atomic_xchg(&phba->cmf_bw_wait, 0))
6137 queue_work(phba->wq, &phba->unblock_request_work);
6139 /* SCSI IO is now unblocked */
6140 atomic_set(&phba->cmf_stop_io, 0);
6143 hrtimer_forward_now(timer,
6144 ktime_set(0, timer_interval * NSEC_PER_MSEC));
6145 return HRTIMER_RESTART;
6148 #define trunk_link_status(__idx)\
6149 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6150 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6151 "Link up" : "Link down") : "NA"
6152 /* Did port __idx reported an error */
6153 #define trunk_port_fault(__idx)\
6154 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6155 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
6158 lpfc_update_trunk_link_status(struct lpfc_hba *phba,
6159 struct lpfc_acqe_fc_la *acqe_fc)
6161 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
6162 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
6164 phba->sli4_hba.link_state.speed =
6165 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6166 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6168 phba->sli4_hba.link_state.logical_speed =
6169 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6170 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
6171 phba->fc_linkspeed =
6172 lpfc_async_link_speed_to_read_top(
6174 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6176 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
6177 phba->trunk_link.link0.state =
6178 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
6179 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6180 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
6182 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
6183 phba->trunk_link.link1.state =
6184 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
6185 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6186 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
6188 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
6189 phba->trunk_link.link2.state =
6190 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
6191 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6192 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
6194 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
6195 phba->trunk_link.link3.state =
6196 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
6197 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6198 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
6201 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6202 "2910 Async FC Trunking Event - Speed:%d\n"
6203 "\tLogical speed:%d "
6204 "port0: %s port1: %s port2: %s port3: %s\n",
6205 phba->sli4_hba.link_state.speed,
6206 phba->sli4_hba.link_state.logical_speed,
6207 trunk_link_status(0), trunk_link_status(1),
6208 trunk_link_status(2), trunk_link_status(3));
6210 if (phba->cmf_active_mode != LPFC_CFG_OFF)
6211 lpfc_cmf_signal_init(phba);
6214 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6215 "3202 trunk error:0x%x (%s) seen on port0:%s "
6217 * SLI-4: We have only 0xA error codes
6218 * defined as of now. print an appropriate
6219 * message in case driver needs to be updated.
6221 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
6222 "UNDEFINED. update driver." : trunk_errmsg[err],
6223 trunk_port_fault(0), trunk_port_fault(1),
6224 trunk_port_fault(2), trunk_port_fault(3));
6229 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
6230 * @phba: pointer to lpfc hba data structure.
6231 * @acqe_fc: pointer to the async fc completion queue entry.
6233 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
6234 * that the event was received and then issue a read_topology mailbox command so
6235 * that the rest of the driver will treat it the same as SLI3.
6238 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
6240 struct lpfc_dmabuf *mp;
6243 struct lpfc_mbx_read_top *la;
6246 if (bf_get(lpfc_trailer_type, acqe_fc) !=
6247 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
6248 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6249 "2895 Non FC link Event detected.(%d)\n",
6250 bf_get(lpfc_trailer_type, acqe_fc));
6254 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6255 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
6256 lpfc_update_trunk_link_status(phba, acqe_fc);
6260 /* Keep the link status for extra SLI4 state machine reference */
6261 phba->sli4_hba.link_state.speed =
6262 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6263 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6264 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
6265 phba->sli4_hba.link_state.topology =
6266 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
6267 phba->sli4_hba.link_state.status =
6268 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
6269 phba->sli4_hba.link_state.type =
6270 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
6271 phba->sli4_hba.link_state.number =
6272 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
6273 phba->sli4_hba.link_state.fault =
6274 bf_get(lpfc_acqe_link_fault, acqe_fc);
6276 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6277 LPFC_FC_LA_TYPE_LINK_DOWN)
6278 phba->sli4_hba.link_state.logical_speed = 0;
6279 else if (!phba->sli4_hba.conf_trunk)
6280 phba->sli4_hba.link_state.logical_speed =
6281 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6283 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6284 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
6285 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
6286 "%dMbps Fault:%d\n",
6287 phba->sli4_hba.link_state.speed,
6288 phba->sli4_hba.link_state.topology,
6289 phba->sli4_hba.link_state.status,
6290 phba->sli4_hba.link_state.type,
6291 phba->sli4_hba.link_state.number,
6292 phba->sli4_hba.link_state.logical_speed,
6293 phba->sli4_hba.link_state.fault);
6294 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6296 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6297 "2897 The mboxq allocation failed\n");
6300 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6302 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6303 "2898 The lpfc_dmabuf allocation failed\n");
6306 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
6308 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6309 "2899 The mbuf allocation failed\n");
6310 goto out_free_dmabuf;
6313 /* Cleanup any outstanding ELS commands */
6314 lpfc_els_flush_all_cmd(phba);
6316 /* Block ELS IOCBs until we have done process link event */
6317 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
6319 /* Update link event statistics */
6320 phba->sli.slistat.link_event++;
6322 /* Create lpfc_handle_latt mailbox command from link ACQE */
6323 lpfc_read_topology(phba, pmb, mp);
6324 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
6325 pmb->vport = phba->pport;
6327 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
6328 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
6330 switch (phba->sli4_hba.link_state.status) {
6331 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
6332 phba->link_flag |= LS_MDS_LINK_DOWN;
6334 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
6335 phba->link_flag |= LS_MDS_LOOPBACK;
6341 /* Initialize completion status */
6343 mb->mbxStatus = MBX_SUCCESS;
6345 /* Parse port fault information field */
6346 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
6348 /* Parse and translate link attention fields */
6349 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
6350 la->eventTag = acqe_fc->event_tag;
6352 if (phba->sli4_hba.link_state.status ==
6353 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
6354 bf_set(lpfc_mbx_read_top_att_type, la,
6355 LPFC_FC_LA_TYPE_UNEXP_WWPN);
6357 bf_set(lpfc_mbx_read_top_att_type, la,
6358 LPFC_FC_LA_TYPE_LINK_DOWN);
6360 /* Invoke the mailbox command callback function */
6361 lpfc_mbx_cmpl_read_topology(phba, pmb);
6366 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
6367 if (rc == MBX_NOT_FINISHED) {
6368 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6369 goto out_free_dmabuf;
6376 mempool_free(pmb, phba->mbox_mem_pool);
6380 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
6381 * @phba: pointer to lpfc hba data structure.
6382 * @acqe_sli: pointer to the async SLI completion queue entry.
6384 * This routine is to handle the SLI4 asynchronous SLI events.
6387 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
6393 uint8_t operational = 0;
6394 struct temp_event temp_event_data;
6395 struct lpfc_acqe_misconfigured_event *misconfigured;
6396 struct lpfc_acqe_cgn_signal *cgn_signal;
6397 struct Scsi_Host *shost;
6398 struct lpfc_vport **vports;
6401 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
6403 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6404 "2901 Async SLI event - Type:%d, Event Data: x%08x "
6405 "x%08x x%08x x%08x\n", evt_type,
6406 acqe_sli->event_data1, acqe_sli->event_data2,
6407 acqe_sli->reserved, acqe_sli->trailer);
6409 port_name = phba->Port[0];
6410 if (port_name == 0x00)
6411 port_name = '?'; /* get port name is empty */
6414 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
6415 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6416 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6417 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6419 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6420 "3190 Over Temperature:%d Celsius- Port Name %c\n",
6421 acqe_sli->event_data1, port_name);
6423 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
6424 shost = lpfc_shost_from_vport(phba->pport);
6425 fc_host_post_vendor_event(shost, fc_get_event_number(),
6426 sizeof(temp_event_data),
6427 (char *)&temp_event_data,
6428 SCSI_NL_VID_TYPE_PCI
6429 | PCI_VENDOR_ID_EMULEX);
6431 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
6432 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6433 temp_event_data.event_code = LPFC_NORMAL_TEMP;
6434 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6436 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6437 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
6438 acqe_sli->event_data1, port_name);
6440 shost = lpfc_shost_from_vport(phba->pport);
6441 fc_host_post_vendor_event(shost, fc_get_event_number(),
6442 sizeof(temp_event_data),
6443 (char *)&temp_event_data,
6444 SCSI_NL_VID_TYPE_PCI
6445 | PCI_VENDOR_ID_EMULEX);
6447 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
6448 misconfigured = (struct lpfc_acqe_misconfigured_event *)
6449 &acqe_sli->event_data1;
6451 /* fetch the status for this port */
6452 switch (phba->sli4_hba.lnk_info.lnk_no) {
6453 case LPFC_LINK_NUMBER_0:
6454 status = bf_get(lpfc_sli_misconfigured_port0_state,
6455 &misconfigured->theEvent);
6456 operational = bf_get(lpfc_sli_misconfigured_port0_op,
6457 &misconfigured->theEvent);
6459 case LPFC_LINK_NUMBER_1:
6460 status = bf_get(lpfc_sli_misconfigured_port1_state,
6461 &misconfigured->theEvent);
6462 operational = bf_get(lpfc_sli_misconfigured_port1_op,
6463 &misconfigured->theEvent);
6465 case LPFC_LINK_NUMBER_2:
6466 status = bf_get(lpfc_sli_misconfigured_port2_state,
6467 &misconfigured->theEvent);
6468 operational = bf_get(lpfc_sli_misconfigured_port2_op,
6469 &misconfigured->theEvent);
6471 case LPFC_LINK_NUMBER_3:
6472 status = bf_get(lpfc_sli_misconfigured_port3_state,
6473 &misconfigured->theEvent);
6474 operational = bf_get(lpfc_sli_misconfigured_port3_op,
6475 &misconfigured->theEvent);
6478 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6480 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
6481 "event: Invalid link %d",
6482 phba->sli4_hba.lnk_info.lnk_no);
6486 /* Skip if optic state unchanged */
6487 if (phba->sli4_hba.lnk_info.optic_state == status)
6491 case LPFC_SLI_EVENT_STATUS_VALID:
6492 sprintf(message, "Physical Link is functional");
6494 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
6495 sprintf(message, "Optics faulted/incorrectly "
6496 "installed/not installed - Reseat optics, "
6497 "if issue not resolved, replace.");
6499 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
6501 "Optics of two types installed - Remove one "
6502 "optic or install matching pair of optics.");
6504 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
6505 sprintf(message, "Incompatible optics - Replace with "
6506 "compatible optics for card to function.");
6508 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
6509 sprintf(message, "Unqualified optics - Replace with "
6510 "Avago optics for Warranty and Technical "
6511 "Support - Link is%s operational",
6512 (operational) ? " not" : "");
6514 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
6515 sprintf(message, "Uncertified optics - Replace with "
6516 "Avago-certified optics to enable link "
6517 "operation - Link is%s operational",
6518 (operational) ? " not" : "");
6521 /* firmware is reporting a status we don't know about */
6522 sprintf(message, "Unknown event status x%02x", status);
6526 /* Issue READ_CONFIG mbox command to refresh supported speeds */
6527 rc = lpfc_sli4_read_config(phba);
6530 lpfc_printf_log(phba, KERN_ERR,
6532 "3194 Unable to retrieve supported "
6533 "speeds, rc = 0x%x\n", rc);
6535 rc = lpfc_sli4_refresh_params(phba);
6537 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6538 "3174 Unable to update pls support, "
6541 vports = lpfc_create_vport_work_array(phba);
6542 if (vports != NULL) {
6543 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6545 shost = lpfc_shost_from_vport(vports[i]);
6546 lpfc_host_supported_speeds_set(shost);
6549 lpfc_destroy_vport_work_array(phba, vports);
6551 phba->sli4_hba.lnk_info.optic_state = status;
6552 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6553 "3176 Port Name %c %s\n", port_name, message);
6555 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
6556 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6557 "3192 Remote DPort Test Initiated - "
6558 "Event Data1:x%08x Event Data2: x%08x\n",
6559 acqe_sli->event_data1, acqe_sli->event_data2);
6561 case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
6562 /* Call FW to obtain active parms */
6563 lpfc_sli4_cgn_parm_chg_evt(phba);
6565 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
6566 /* Misconfigured WWN. Reports that the SLI Port is configured
6567 * to use FA-WWN, but the attached device doesn’t support it.
6568 * No driver action is required.
6569 * Event Data1 - N.A, Event Data2 - N.A
6571 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
6572 "2699 Misconfigured FA-WWN - Attached device does "
6573 "not support FA-WWN\n");
6575 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
6576 /* EEPROM failure. No driver action is required */
6577 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6578 "2518 EEPROM failure - "
6579 "Event Data1: x%08x Event Data2: x%08x\n",
6580 acqe_sli->event_data1, acqe_sli->event_data2);
6582 case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
6583 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6585 cgn_signal = (struct lpfc_acqe_cgn_signal *)
6586 &acqe_sli->event_data1;
6587 phba->cgn_acqe_cnt++;
6589 cnt = bf_get(lpfc_warn_acqe, cgn_signal);
6590 atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
6591 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
6593 /* no threshold for CMF, even 1 signal will trigger an event */
6595 /* Alarm overrides warning, so check that first */
6596 if (cgn_signal->alarm_cnt) {
6597 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6598 /* Keep track of alarm cnt for cgn_info */
6599 atomic_add(cgn_signal->alarm_cnt,
6600 &phba->cgn_fabric_alarm_cnt);
6601 /* Keep track of alarm cnt for CMF_SYNC_WQE */
6602 atomic_add(cgn_signal->alarm_cnt,
6603 &phba->cgn_sync_alarm_cnt);
6606 /* signal action needs to be taken */
6607 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
6608 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6609 /* Keep track of warning cnt for cgn_info */
6610 atomic_add(cnt, &phba->cgn_fabric_warn_cnt);
6611 /* Keep track of warning cnt for CMF_SYNC_WQE */
6612 atomic_add(cnt, &phba->cgn_sync_warn_cnt);
6617 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6618 "3193 Unrecognized SLI event, type: 0x%x",
6625 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
6626 * @vport: pointer to vport data structure.
6628 * This routine is to perform Clear Virtual Link (CVL) on a vport in
6629 * response to a CVL event.
6631 * Return the pointer to the ndlp with the vport if successful, otherwise
6634 static struct lpfc_nodelist *
6635 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
6637 struct lpfc_nodelist *ndlp;
6638 struct Scsi_Host *shost;
6639 struct lpfc_hba *phba;
6646 ndlp = lpfc_findnode_did(vport, Fabric_DID);
6648 /* Cannot find existing Fabric ndlp, so allocate a new one */
6649 ndlp = lpfc_nlp_init(vport, Fabric_DID);
6652 /* Set the node type */
6653 ndlp->nlp_type |= NLP_FABRIC;
6654 /* Put ndlp onto node list */
6655 lpfc_enqueue_node(vport, ndlp);
6657 if ((phba->pport->port_state < LPFC_FLOGI) &&
6658 (phba->pport->port_state != LPFC_VPORT_FAILED))
6660 /* If virtual link is not yet instantiated ignore CVL */
6661 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
6662 && (vport->port_state != LPFC_VPORT_FAILED))
6664 shost = lpfc_shost_from_vport(vport);
6667 lpfc_linkdown_port(vport);
6668 lpfc_cleanup_pending_mbox(vport);
6669 spin_lock_irq(shost->host_lock);
6670 vport->fc_flag |= FC_VPORT_CVL_RCVD;
6671 spin_unlock_irq(shost->host_lock);
6677 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
6678 * @phba: pointer to lpfc hba data structure.
6680 * This routine is to perform Clear Virtual Link (CVL) on all vports in
6681 * response to a FCF dead event.
6684 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
6686 struct lpfc_vport **vports;
6689 vports = lpfc_create_vport_work_array(phba);
6691 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
6692 lpfc_sli4_perform_vport_cvl(vports[i]);
6693 lpfc_destroy_vport_work_array(phba, vports);
6697 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
6698 * @phba: pointer to lpfc hba data structure.
6699 * @acqe_fip: pointer to the async fcoe completion queue entry.
6701 * This routine is to handle the SLI4 asynchronous fcoe event.
6704 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6705 struct lpfc_acqe_fip *acqe_fip)
6707 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
6709 struct lpfc_vport *vport;
6710 struct lpfc_nodelist *ndlp;
6711 int active_vlink_present;
6712 struct lpfc_vport **vports;
6715 phba->fc_eventTag = acqe_fip->event_tag;
6716 phba->fcoe_eventtag = acqe_fip->event_tag;
6717 switch (event_type) {
6718 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
6719 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
6720 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
6721 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6722 "2546 New FCF event, evt_tag:x%x, "
6724 acqe_fip->event_tag,
6727 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
6729 "2788 FCF param modified event, "
6730 "evt_tag:x%x, index:x%x\n",
6731 acqe_fip->event_tag,
6733 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6735 * During period of FCF discovery, read the FCF
6736 * table record indexed by the event to update
6737 * FCF roundrobin failover eligible FCF bmask.
6739 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6741 "2779 Read FCF (x%x) for updating "
6742 "roundrobin FCF failover bmask\n",
6744 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
6747 /* If the FCF discovery is in progress, do nothing. */
6748 spin_lock_irq(&phba->hbalock);
6749 if (phba->hba_flag & FCF_TS_INPROG) {
6750 spin_unlock_irq(&phba->hbalock);
6753 /* If fast FCF failover rescan event is pending, do nothing */
6754 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6755 spin_unlock_irq(&phba->hbalock);
6759 /* If the FCF has been in discovered state, do nothing. */
6760 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
6761 spin_unlock_irq(&phba->hbalock);
6764 spin_unlock_irq(&phba->hbalock);
6766 /* Otherwise, scan the entire FCF table and re-discover SAN */
6767 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6768 "2770 Start FCF table scan per async FCF "
6769 "event, evt_tag:x%x, index:x%x\n",
6770 acqe_fip->event_tag, acqe_fip->index);
6771 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
6772 LPFC_FCOE_FCF_GET_FIRST);
6774 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6775 "2547 Issue FCF scan read FCF mailbox "
6776 "command failed (x%x)\n", rc);
6779 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
6780 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6781 "2548 FCF Table full count 0x%x tag 0x%x\n",
6782 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
6783 acqe_fip->event_tag);
6786 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
6787 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6788 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6789 "2549 FCF (x%x) disconnected from network, "
6790 "tag:x%x\n", acqe_fip->index,
6791 acqe_fip->event_tag);
6793 * If we are in the middle of FCF failover process, clear
6794 * the corresponding FCF bit in the roundrobin bitmap.
6796 spin_lock_irq(&phba->hbalock);
6797 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
6798 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
6799 spin_unlock_irq(&phba->hbalock);
6800 /* Update FLOGI FCF failover eligible FCF bmask */
6801 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
6804 spin_unlock_irq(&phba->hbalock);
6806 /* If the event is not for currently used fcf do nothing */
6807 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
6811 * Otherwise, request the port to rediscover the entire FCF
6812 * table for a fast recovery from case that the current FCF
6813 * is no longer valid as we are not in the middle of FCF
6814 * failover process already.
6816 spin_lock_irq(&phba->hbalock);
6817 /* Mark the fast failover process in progress */
6818 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
6819 spin_unlock_irq(&phba->hbalock);
6821 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6822 "2771 Start FCF fast failover process due to "
6823 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
6824 "\n", acqe_fip->event_tag, acqe_fip->index);
6825 rc = lpfc_sli4_redisc_fcf_table(phba);
6827 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6829 "2772 Issue FCF rediscover mailbox "
6830 "command failed, fail through to FCF "
6832 spin_lock_irq(&phba->hbalock);
6833 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
6834 spin_unlock_irq(&phba->hbalock);
6836 * Last resort will fail over by treating this
6837 * as a link down to FCF registration.
6839 lpfc_sli4_fcf_dead_failthrough(phba);
6841 /* Reset FCF roundrobin bmask for new discovery */
6842 lpfc_sli4_clear_fcf_rr_bmask(phba);
6844 * Handling fast FCF failover to a DEAD FCF event is
6845 * considered equalivant to receiving CVL to all vports.
6847 lpfc_sli4_perform_all_vport_cvl(phba);
6850 case LPFC_FIP_EVENT_TYPE_CVL:
6851 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6852 lpfc_printf_log(phba, KERN_ERR,
6854 "2718 Clear Virtual Link Received for VPI 0x%x"
6855 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6857 vport = lpfc_find_vport_by_vpid(phba,
6859 ndlp = lpfc_sli4_perform_vport_cvl(vport);
6862 active_vlink_present = 0;
6864 vports = lpfc_create_vport_work_array(phba);
6866 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6868 if ((!(vports[i]->fc_flag &
6869 FC_VPORT_CVL_RCVD)) &&
6870 (vports[i]->port_state > LPFC_FDISC)) {
6871 active_vlink_present = 1;
6875 lpfc_destroy_vport_work_array(phba, vports);
6879 * Don't re-instantiate if vport is marked for deletion.
6880 * If we are here first then vport_delete is going to wait
6881 * for discovery to complete.
6883 if (!(vport->load_flag & FC_UNLOADING) &&
6884 active_vlink_present) {
6886 * If there are other active VLinks present,
6887 * re-instantiate the Vlink using FDISC.
6889 mod_timer(&ndlp->nlp_delayfunc,
6890 jiffies + msecs_to_jiffies(1000));
6891 spin_lock_irq(&ndlp->lock);
6892 ndlp->nlp_flag |= NLP_DELAY_TMO;
6893 spin_unlock_irq(&ndlp->lock);
6894 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
6895 vport->port_state = LPFC_FDISC;
6898 * Otherwise, we request port to rediscover
6899 * the entire FCF table for a fast recovery
6900 * from possible case that the current FCF
6901 * is no longer valid if we are not already
6902 * in the FCF failover process.
6904 spin_lock_irq(&phba->hbalock);
6905 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6906 spin_unlock_irq(&phba->hbalock);
6909 /* Mark the fast failover process in progress */
6910 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
6911 spin_unlock_irq(&phba->hbalock);
6912 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6914 "2773 Start FCF failover per CVL, "
6915 "evt_tag:x%x\n", acqe_fip->event_tag);
6916 rc = lpfc_sli4_redisc_fcf_table(phba);
6918 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6920 "2774 Issue FCF rediscover "
6921 "mailbox command failed, "
6922 "through to CVL event\n");
6923 spin_lock_irq(&phba->hbalock);
6924 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
6925 spin_unlock_irq(&phba->hbalock);
6927 * Last resort will be re-try on the
6928 * the current registered FCF entry.
6930 lpfc_retry_pport_discovery(phba);
6933 * Reset FCF roundrobin bmask for new
6936 lpfc_sli4_clear_fcf_rr_bmask(phba);
6940 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6941 "0288 Unknown FCoE event type 0x%x event tag "
6942 "0x%x\n", event_type, acqe_fip->event_tag);
6948 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
6949 * @phba: pointer to lpfc hba data structure.
6950 * @acqe_dcbx: pointer to the async dcbx completion queue entry.
6952 * This routine is to handle the SLI4 asynchronous dcbx event.
6955 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
6956 struct lpfc_acqe_dcbx *acqe_dcbx)
6958 phba->fc_eventTag = acqe_dcbx->event_tag;
6959 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6960 "0290 The SLI4 DCBX asynchronous event is not "
6965 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
6966 * @phba: pointer to lpfc hba data structure.
6967 * @acqe_grp5: pointer to the async grp5 completion queue entry.
6969 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
6970 * is an asynchronous notified of a logical link speed change. The Port
6971 * reports the logical link speed in units of 10Mbps.
6974 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
6975 struct lpfc_acqe_grp5 *acqe_grp5)
6977 uint16_t prev_ll_spd;
6979 phba->fc_eventTag = acqe_grp5->event_tag;
6980 phba->fcoe_eventtag = acqe_grp5->event_tag;
6981 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
6982 phba->sli4_hba.link_state.logical_speed =
6983 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
6984 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6985 "2789 GRP5 Async Event: Updating logical link speed "
6986 "from %dMbps to %dMbps\n", prev_ll_spd,
6987 phba->sli4_hba.link_state.logical_speed);
6991 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
6992 * @phba: pointer to lpfc hba data structure.
6994 * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event
6995 * is an asynchronous notification of a request to reset CM stats.
6998 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
7002 lpfc_init_congestion_stat(phba);
7006 * lpfc_cgn_params_val - Validate FW congestion parameters.
7007 * @phba: pointer to lpfc hba data structure.
7008 * @p_cfg_param: pointer to FW provided congestion parameters.
7010 * This routine validates the congestion parameters passed
7011 * by the FW to the driver via an ACQE event.
7014 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
7016 spin_lock_irq(&phba->hbalock);
7018 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
7019 LPFC_CFG_MONITOR)) {
7020 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
7021 "6225 CMF mode param out of range: %d\n",
7022 p_cfg_param->cgn_param_mode);
7023 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
7026 spin_unlock_irq(&phba->hbalock);
7030 * lpfc_cgn_params_parse - Process a FW cong parm change event
7031 * @phba: pointer to lpfc hba data structure.
7032 * @p_cgn_param: pointer to a data buffer with the FW cong params.
7033 * @len: the size of pdata in bytes.
7035 * This routine validates the congestion management buffer signature
7036 * from the FW, validates the contents and makes corrections for
7037 * valid, in-range values. If the signature magic is correct and
7038 * after parameter validation, the contents are copied to the driver's
7039 * @phba structure. If the magic is incorrect, an error message is
7043 lpfc_cgn_params_parse(struct lpfc_hba *phba,
7044 struct lpfc_cgn_param *p_cgn_param, uint32_t len)
7046 struct lpfc_cgn_info *cp;
7047 uint32_t crc, oldmode;
7049 /* Make sure the FW has encoded the correct magic number to
7050 * validate the congestion parameter in FW memory.
7052 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
7053 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7054 "4668 FW cgn parm buffer data: "
7055 "magic 0x%x version %d mode %d "
7056 "level0 %d level1 %d "
7057 "level2 %d byte13 %d "
7058 "byte14 %d byte15 %d "
7059 "byte11 %d byte12 %d activeMode %d\n",
7060 p_cgn_param->cgn_param_magic,
7061 p_cgn_param->cgn_param_version,
7062 p_cgn_param->cgn_param_mode,
7063 p_cgn_param->cgn_param_level0,
7064 p_cgn_param->cgn_param_level1,
7065 p_cgn_param->cgn_param_level2,
7066 p_cgn_param->byte13,
7067 p_cgn_param->byte14,
7068 p_cgn_param->byte15,
7069 p_cgn_param->byte11,
7070 p_cgn_param->byte12,
7071 phba->cmf_active_mode);
7073 oldmode = phba->cmf_active_mode;
7075 /* Any parameters out of range are corrected to defaults
7076 * by this routine. No need to fail.
7078 lpfc_cgn_params_val(phba, p_cgn_param);
7080 /* Parameters are verified, move them into driver storage */
7081 spin_lock_irq(&phba->hbalock);
7082 memcpy(&phba->cgn_p, p_cgn_param,
7083 sizeof(struct lpfc_cgn_param));
7085 /* Update parameters in congestion info buffer now */
7087 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
7088 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
7089 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
7090 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
7091 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
7092 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
7093 LPFC_CGN_CRC32_SEED);
7094 cp->cgn_info_crc = cpu_to_le32(crc);
7096 spin_unlock_irq(&phba->hbalock);
7098 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
7102 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
7103 /* Turning CMF on */
7104 lpfc_cmf_start(phba);
7106 if (phba->link_state >= LPFC_LINK_UP) {
7107 phba->cgn_reg_fpin =
7108 phba->cgn_init_reg_fpin;
7109 phba->cgn_reg_signal =
7110 phba->cgn_init_reg_signal;
7111 lpfc_issue_els_edc(phba->pport, 0);
7115 case LPFC_CFG_MANAGED:
7116 switch (phba->cgn_p.cgn_param_mode) {
7118 /* Turning CMF off */
7119 lpfc_cmf_stop(phba);
7120 if (phba->link_state >= LPFC_LINK_UP)
7121 lpfc_issue_els_edc(phba->pport, 0);
7123 case LPFC_CFG_MONITOR:
7124 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7125 "4661 Switch from MANAGED to "
7127 phba->cmf_max_bytes_per_interval =
7128 phba->cmf_link_byte_count;
7130 /* Resume blocked IO - unblock on workqueue */
7131 queue_work(phba->wq,
7132 &phba->unblock_request_work);
7136 case LPFC_CFG_MONITOR:
7137 switch (phba->cgn_p.cgn_param_mode) {
7139 /* Turning CMF off */
7140 lpfc_cmf_stop(phba);
7141 if (phba->link_state >= LPFC_LINK_UP)
7142 lpfc_issue_els_edc(phba->pport, 0);
7144 case LPFC_CFG_MANAGED:
7145 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7146 "4662 Switch from MONITOR to "
7148 lpfc_cmf_signal_init(phba);
7154 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7155 "4669 FW cgn parm buf wrong magic 0x%x "
7156 "version %d\n", p_cgn_param->cgn_param_magic,
7157 p_cgn_param->cgn_param_version);
7162 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
7163 * @phba: pointer to lpfc hba data structure.
7165 * This routine issues a read_object mailbox command to
7166 * get the congestion management parameters from the FW
7167 * parses it and updates the driver maintained values.
7170 * 0 if the object was empty
7171 * -Eval if an error was encountered
7172 * Count if bytes were read from object
7175 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
7178 struct lpfc_cgn_param *p_cgn_param = NULL;
7182 /* Find out if the FW has a new set of congestion parameters. */
7183 len = sizeof(struct lpfc_cgn_param);
7184 pdata = kzalloc(len, GFP_KERNEL);
7185 ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
7188 /* 0 means no data. A negative means error. A positive means
7189 * bytes were copied.
7192 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7193 "4670 CGN RD OBJ returns no data\n");
7195 } else if (ret < 0) {
7196 /* Some error. Just exit and return it to the caller.*/
7200 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7201 "6234 READ CGN PARAMS Successful %d\n", len);
7203 /* Parse data pointer over len and update the phba congestion
7204 * parameters with values passed back. The receive rate values
7205 * may have been altered in FW, but take no action here.
7207 p_cgn_param = (struct lpfc_cgn_param *)pdata;
7208 lpfc_cgn_params_parse(phba, p_cgn_param, len);
7216 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
7217 * @phba: pointer to lpfc hba data structure.
7219 * The FW generated Async ACQE SLI event calls this routine when
7220 * the event type is an SLI Internal Port Event and the Event Code
7221 * indicates a change to the FW maintained congestion parameters.
7223 * This routine executes a Read_Object mailbox call to obtain the
7224 * current congestion parameters maintained in FW and corrects
7225 * the driver's active congestion parameters.
7227 * The acqe event is not passed because there is no further data
7230 * Returns nonzero error if event processing encountered an error.
7231 * Zero otherwise for success.
7234 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
7238 if (!phba->sli4_hba.pc_sli4_params.cmf) {
7239 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7240 "4664 Cgn Evt when E2E off. Drop event\n");
7244 /* If the event is claiming an empty object, it's ok. A write
7245 * could have cleared it. Only error is a negative return
7248 ret = lpfc_sli4_cgn_params_read(phba);
7250 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7251 "4667 Error reading Cgn Params (%d)\n",
7254 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7255 "4673 CGN Event empty object.\n");
7261 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
7262 * @phba: pointer to lpfc hba data structure.
7264 * This routine is invoked by the worker thread to process all the pending
7265 * SLI4 asynchronous events.
7267 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
7269 struct lpfc_cq_event *cq_event;
7270 unsigned long iflags;
7272 /* First, declare the async event has been handled */
7273 spin_lock_irqsave(&phba->hbalock, iflags);
7274 phba->hba_flag &= ~ASYNC_EVENT;
7275 spin_unlock_irqrestore(&phba->hbalock, iflags);
7277 /* Now, handle all the async events */
7278 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7279 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
7280 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
7281 cq_event, struct lpfc_cq_event, list);
7282 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
7285 /* Process the asynchronous event */
7286 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
7287 case LPFC_TRAILER_CODE_LINK:
7288 lpfc_sli4_async_link_evt(phba,
7289 &cq_event->cqe.acqe_link);
7291 case LPFC_TRAILER_CODE_FCOE:
7292 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
7294 case LPFC_TRAILER_CODE_DCBX:
7295 lpfc_sli4_async_dcbx_evt(phba,
7296 &cq_event->cqe.acqe_dcbx);
7298 case LPFC_TRAILER_CODE_GRP5:
7299 lpfc_sli4_async_grp5_evt(phba,
7300 &cq_event->cqe.acqe_grp5);
7302 case LPFC_TRAILER_CODE_FC:
7303 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
7305 case LPFC_TRAILER_CODE_SLI:
7306 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
7308 case LPFC_TRAILER_CODE_CMSTAT:
7309 lpfc_sli4_async_cmstat_evt(phba);
7312 lpfc_printf_log(phba, KERN_ERR,
7314 "1804 Invalid asynchronous event code: "
7315 "x%x\n", bf_get(lpfc_trailer_code,
7316 &cq_event->cqe.mcqe_cmpl));
7320 /* Free the completion event processed to the free pool */
7321 lpfc_sli4_cq_event_release(phba, cq_event);
7322 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7324 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
7328 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
7329 * @phba: pointer to lpfc hba data structure.
7331 * This routine is invoked by the worker thread to process FCF table
7332 * rediscovery pending completion event.
7334 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
7338 spin_lock_irq(&phba->hbalock);
7339 /* Clear FCF rediscovery timeout event */
7340 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
7341 /* Clear driver fast failover FCF record flag */
7342 phba->fcf.failover_rec.flag = 0;
7343 /* Set state for FCF fast failover */
7344 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
7345 spin_unlock_irq(&phba->hbalock);
7347 /* Scan FCF table from the first entry to re-discover SAN */
7348 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
7349 "2777 Start post-quiescent FCF table scan\n");
7350 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
7352 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7353 "2747 Issue FCF scan read FCF mailbox "
7354 "command failed 0x%x\n", rc);
7358 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
7359 * @phba: pointer to lpfc hba data structure.
7360 * @dev_grp: The HBA PCI-Device group number.
7362 * This routine is invoked to set up the per HBA PCI-Device group function
7363 * API jump table entries.
7365 * Return: 0 if success, otherwise -ENODEV
7368 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7372 /* Set up lpfc PCI-device group */
7373 phba->pci_dev_grp = dev_grp;
7375 /* The LPFC_PCI_DEV_OC uses SLI4 */
7376 if (dev_grp == LPFC_PCI_DEV_OC)
7377 phba->sli_rev = LPFC_SLI_REV4;
7379 /* Set up device INIT API function jump table */
7380 rc = lpfc_init_api_table_setup(phba, dev_grp);
7383 /* Set up SCSI API function jump table */
7384 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
7387 /* Set up SLI API function jump table */
7388 rc = lpfc_sli_api_table_setup(phba, dev_grp);
7391 /* Set up MBOX API function jump table */
7392 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
7400 * lpfc_log_intr_mode - Log the active interrupt mode
7401 * @phba: pointer to lpfc hba data structure.
7402 * @intr_mode: active interrupt mode adopted.
7404 * This routine it invoked to log the currently used active interrupt mode
7407 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
7409 switch (intr_mode) {
7411 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7412 "0470 Enable INTx interrupt mode.\n");
7415 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7416 "0481 Enabled MSI interrupt mode.\n");
7419 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7420 "0480 Enabled MSI-X interrupt mode.\n");
7423 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7424 "0482 Illegal interrupt mode.\n");
7431 * lpfc_enable_pci_dev - Enable a generic PCI device.
7432 * @phba: pointer to lpfc hba data structure.
7434 * This routine is invoked to enable the PCI device that is common to all
7439 * other values - error
7442 lpfc_enable_pci_dev(struct lpfc_hba *phba)
7444 struct pci_dev *pdev;
7446 /* Obtain PCI device reference */
7450 pdev = phba->pcidev;
7451 /* Enable PCI device */
7452 if (pci_enable_device_mem(pdev))
7454 /* Request PCI resource for the device */
7455 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
7456 goto out_disable_device;
7457 /* Set up device as PCI master and save state for EEH */
7458 pci_set_master(pdev);
7459 pci_try_set_mwi(pdev);
7460 pci_save_state(pdev);
7462 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
7463 if (pci_is_pcie(pdev))
7464 pdev->needs_freset = 1;
7469 pci_disable_device(pdev);
7471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7472 "1401 Failed to enable pci device\n");
7477 * lpfc_disable_pci_dev - Disable a generic PCI device.
7478 * @phba: pointer to lpfc hba data structure.
7480 * This routine is invoked to disable the PCI device that is common to all
7484 lpfc_disable_pci_dev(struct lpfc_hba *phba)
7486 struct pci_dev *pdev;
7488 /* Obtain PCI device reference */
7492 pdev = phba->pcidev;
7493 /* Release PCI resource and disable PCI device */
7494 pci_release_mem_regions(pdev);
7495 pci_disable_device(pdev);
7501 * lpfc_reset_hba - Reset a hba
7502 * @phba: pointer to lpfc hba data structure.
7504 * This routine is invoked to reset a hba device. It brings the HBA
7505 * offline, performs a board restart, and then brings the board back
7506 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
7507 * on outstanding mailbox commands.
7510 lpfc_reset_hba(struct lpfc_hba *phba)
7512 /* If resets are disabled then set error state and return. */
7513 if (!phba->cfg_enable_hba_reset) {
7514 phba->link_state = LPFC_HBA_ERROR;
7518 /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
7519 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
7520 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
7522 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
7523 lpfc_sli_flush_io_rings(phba);
7526 lpfc_sli_brdrestart(phba);
7528 lpfc_unblock_mgmt_io(phba);
7532 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
7533 * @phba: pointer to lpfc hba data structure.
7535 * This function enables the PCI SR-IOV virtual functions to a physical
7536 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7537 * enable the number of virtual functions to the physical function. As
7538 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7539 * API call does not considered as an error condition for most of the device.
7542 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
7544 struct pci_dev *pdev = phba->pcidev;
7548 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
7552 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
7557 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
7558 * @phba: pointer to lpfc hba data structure.
7559 * @nr_vfn: number of virtual functions to be enabled.
7561 * This function enables the PCI SR-IOV virtual functions to a physical
7562 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7563 * enable the number of virtual functions to the physical function. As
7564 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7565 * API call does not considered as an error condition for most of the device.
7568 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
7570 struct pci_dev *pdev = phba->pcidev;
7571 uint16_t max_nr_vfn;
7574 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
7575 if (nr_vfn > max_nr_vfn) {
7576 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7577 "3057 Requested vfs (%d) greater than "
7578 "supported vfs (%d)", nr_vfn, max_nr_vfn);
7582 rc = pci_enable_sriov(pdev, nr_vfn);
7584 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7585 "2806 Failed to enable sriov on this device "
7586 "with vfn number nr_vf:%d, rc:%d\n",
7589 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7590 "2807 Successful enable sriov on this device "
7591 "with vfn number nr_vf:%d\n", nr_vfn);
7596 lpfc_unblock_requests_work(struct work_struct *work)
7598 struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
7599 unblock_request_work);
7601 lpfc_unblock_requests(phba);
7605 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
7606 * @phba: pointer to lpfc hba data structure.
7608 * This routine is invoked to set up the driver internal resources before the
7609 * device specific resource setup to support the HBA device it attached to.
7613 * other values - error
7616 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
7618 struct lpfc_sli *psli = &phba->sli;
7621 * Driver resources common to all SLI revisions
7623 atomic_set(&phba->fast_event_count, 0);
7624 atomic_set(&phba->dbg_log_idx, 0);
7625 atomic_set(&phba->dbg_log_cnt, 0);
7626 atomic_set(&phba->dbg_log_dmping, 0);
7627 spin_lock_init(&phba->hbalock);
7629 /* Initialize port_list spinlock */
7630 spin_lock_init(&phba->port_list_lock);
7631 INIT_LIST_HEAD(&phba->port_list);
7633 INIT_LIST_HEAD(&phba->work_list);
7634 init_waitqueue_head(&phba->wait_4_mlo_m_q);
7636 /* Initialize the wait queue head for the kernel thread */
7637 init_waitqueue_head(&phba->work_waitq);
7639 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7640 "1403 Protocols supported %s %s %s\n",
7641 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
7643 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
7645 (phba->nvmet_support ? "NVMET" : " "));
7647 /* Initialize the IO buffer list used by driver for SLI3 SCSI */
7648 spin_lock_init(&phba->scsi_buf_list_get_lock);
7649 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
7650 spin_lock_init(&phba->scsi_buf_list_put_lock);
7651 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
7653 /* Initialize the fabric iocb list */
7654 INIT_LIST_HEAD(&phba->fabric_iocb_list);
7656 /* Initialize list to save ELS buffers */
7657 INIT_LIST_HEAD(&phba->elsbuf);
7659 /* Initialize FCF connection rec list */
7660 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
7662 /* Initialize OAS configuration list */
7663 spin_lock_init(&phba->devicelock);
7664 INIT_LIST_HEAD(&phba->luns);
7666 /* MBOX heartbeat timer */
7667 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
7668 /* Fabric block timer */
7669 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
7670 /* EA polling mode timer */
7671 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
7672 /* Heartbeat timer */
7673 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
7675 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
7677 INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
7678 lpfc_idle_stat_delay_work);
7679 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
7684 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
7685 * @phba: pointer to lpfc hba data structure.
7687 * This routine is invoked to set up the driver internal resources specific to
7688 * support the SLI-3 HBA device it attached to.
7692 * other values - error
7695 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
7700 * Initialize timers used by driver
7703 /* FCP polling mode timer */
7704 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
7706 /* Host attention work mask setup */
7707 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
7708 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
7710 /* Get all the module params for configuring this host */
7711 lpfc_get_cfgparam(phba);
7712 /* Set up phase-1 common device driver resources */
7714 rc = lpfc_setup_driver_resource_phase1(phba);
7718 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
7719 phba->menlo_flag |= HBA_MENLO_SUPPORT;
7720 /* check for menlo minimum sg count */
7721 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
7722 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
7725 if (!phba->sli.sli3_ring)
7726 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
7727 sizeof(struct lpfc_sli_ring),
7729 if (!phba->sli.sli3_ring)
7733 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
7734 * used to create the sg_dma_buf_pool must be dynamically calculated.
7737 if (phba->sli_rev == LPFC_SLI_REV4)
7738 entry_sz = sizeof(struct sli4_sge);
7740 entry_sz = sizeof(struct ulp_bde64);
7742 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
7743 if (phba->cfg_enable_bg) {
7745 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
7746 * the FCP rsp, and a BDE for each. Sice we have no control
7747 * over how many protection data segments the SCSI Layer
7748 * will hand us (ie: there could be one for every block
7749 * in the IO), we just allocate enough BDEs to accomidate
7750 * our max amount and we need to limit lpfc_sg_seg_cnt to
7751 * minimize the risk of running out.
7753 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7754 sizeof(struct fcp_rsp) +
7755 (LPFC_MAX_SG_SEG_CNT * entry_sz);
7757 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
7758 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
7760 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
7761 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
7764 * The scsi_buf for a regular I/O will hold the FCP cmnd,
7765 * the FCP rsp, a BDE for each, and a BDE for up to
7766 * cfg_sg_seg_cnt data segments.
7768 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7769 sizeof(struct fcp_rsp) +
7770 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
7772 /* Total BDEs in BPL for scsi_sg_list */
7773 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
7776 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7777 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
7778 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7779 phba->cfg_total_seg_cnt);
7781 phba->max_vpi = LPFC_MAX_VPI;
7782 /* This will be set to correct value after config_port mbox */
7783 phba->max_vports = 0;
7786 * Initialize the SLI Layer to run with lpfc HBAs.
7788 lpfc_sli_setup(phba);
7789 lpfc_sli_queue_init(phba);
7791 /* Allocate device driver memory */
7792 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
7795 phba->lpfc_sg_dma_buf_pool =
7796 dma_pool_create("lpfc_sg_dma_buf_pool",
7797 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
7800 if (!phba->lpfc_sg_dma_buf_pool)
7803 phba->lpfc_cmd_rsp_buf_pool =
7804 dma_pool_create("lpfc_cmd_rsp_buf_pool",
7806 sizeof(struct fcp_cmnd) +
7807 sizeof(struct fcp_rsp),
7810 if (!phba->lpfc_cmd_rsp_buf_pool)
7811 goto fail_free_dma_buf_pool;
7814 * Enable sr-iov virtual functions if supported and configured
7815 * through the module parameter.
7817 if (phba->cfg_sriov_nr_virtfn > 0) {
7818 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7819 phba->cfg_sriov_nr_virtfn);
7821 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7822 "2808 Requested number of SR-IOV "
7823 "virtual functions (%d) is not "
7825 phba->cfg_sriov_nr_virtfn);
7826 phba->cfg_sriov_nr_virtfn = 0;
7832 fail_free_dma_buf_pool:
7833 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7834 phba->lpfc_sg_dma_buf_pool = NULL;
7836 lpfc_mem_free(phba);
7841 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
7842 * @phba: pointer to lpfc hba data structure.
7844 * This routine is invoked to unset the driver internal resources set up
7845 * specific for supporting the SLI-3 HBA device it attached to.
7848 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
7850 /* Free device driver memory allocated */
7851 lpfc_mem_free_all(phba);
7857 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
7858 * @phba: pointer to lpfc hba data structure.
7860 * This routine is invoked to set up the driver internal resources specific to
7861 * support the SLI-4 HBA device it attached to.
7865 * other values - error
7868 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
7870 LPFC_MBOXQ_t *mboxq;
7872 int rc, i, max_buf_size;
7879 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7880 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
7881 phba->sli4_hba.curr_disp_cpu = 0;
7883 /* Get all the module params for configuring this host */
7884 lpfc_get_cfgparam(phba);
7886 /* Set up phase-1 common device driver resources */
7887 rc = lpfc_setup_driver_resource_phase1(phba);
7891 /* Before proceed, wait for POST done and device ready */
7892 rc = lpfc_sli4_post_status_check(phba);
7896 /* Allocate all driver workqueues here */
7898 /* The lpfc_wq workqueue for deferred irq use */
7899 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7902 * Initialize timers used by driver
7905 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
7907 /* FCF rediscover timer */
7908 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
7910 /* CMF congestion timer */
7911 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7912 phba->cmf_timer.function = lpfc_cmf_timer;
7915 * Control structure for handling external multi-buffer mailbox
7916 * command pass-through.
7918 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
7919 sizeof(struct lpfc_mbox_ext_buf_ctx));
7920 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
7922 phba->max_vpi = LPFC_MAX_VPI;
7924 /* This will be set to correct value after the read_config mbox */
7925 phba->max_vports = 0;
7927 /* Program the default value of vlan_id and fc_map */
7928 phba->valid_vlan = 0;
7929 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
7930 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
7931 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
7934 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
7935 * we will associate a new ring, for each EQ/CQ/WQ tuple.
7936 * The WQ create will allocate the ring.
7939 /* Initialize buffer queue management fields */
7940 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
7941 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
7942 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
7944 /* for VMID idle timeout if VMID is enabled */
7945 if (lpfc_is_vmid_enabled(phba))
7946 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
7949 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
7951 /* Initialize the Abort buffer list used by driver */
7952 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
7953 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
7955 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
7956 /* Initialize the Abort nvme buffer list used by driver */
7957 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
7958 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7959 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
7960 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
7961 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
7964 /* This abort list used by worker thread */
7965 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
7966 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
7967 spin_lock_init(&phba->sli4_hba.asynce_list_lock);
7968 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
7971 * Initialize driver internal slow-path work queues
7974 /* Driver internel slow-path CQ Event pool */
7975 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
7976 /* Response IOCB work queue list */
7977 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
7978 /* Asynchronous event CQ Event work queue list */
7979 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
7980 /* Slow-path XRI aborted CQ Event work queue list */
7981 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
7982 /* Receive queue CQ Event work queue list */
7983 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
7985 /* Initialize extent block lists. */
7986 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
7987 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
7988 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
7989 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
7991 /* Initialize mboxq lists. If the early init routines fail
7992 * these lists need to be correctly initialized.
7994 INIT_LIST_HEAD(&phba->sli.mboxq);
7995 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
7997 /* initialize optic_state to 0xFF */
7998 phba->sli4_hba.lnk_info.optic_state = 0xff;
8000 /* Allocate device driver memory */
8001 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
8005 /* IF Type 2 ports get initialized now. */
8006 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
8007 LPFC_SLI_INTF_IF_TYPE_2) {
8008 rc = lpfc_pci_function_reset(phba);
8013 phba->temp_sensor_support = 1;
8016 /* Create the bootstrap mailbox command */
8017 rc = lpfc_create_bootstrap_mbox(phba);
8021 /* Set up the host's endian order with the device. */
8022 rc = lpfc_setup_endian_order(phba);
8024 goto out_free_bsmbx;
8026 /* Set up the hba's configuration parameters. */
8027 rc = lpfc_sli4_read_config(phba);
8029 goto out_free_bsmbx;
8030 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
8032 goto out_free_bsmbx;
8034 /* IF Type 0 ports get initialized now. */
8035 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8036 LPFC_SLI_INTF_IF_TYPE_0) {
8037 rc = lpfc_pci_function_reset(phba);
8039 goto out_free_bsmbx;
8042 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8046 goto out_free_bsmbx;
8049 /* Check for NVMET being configured */
8050 phba->nvmet_support = 0;
8051 if (lpfc_enable_nvmet_cnt) {
8053 /* First get WWN of HBA instance */
8054 lpfc_read_nv(phba, mboxq);
8055 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8056 if (rc != MBX_SUCCESS) {
8057 lpfc_printf_log(phba, KERN_ERR,
8059 "6016 Mailbox failed , mbxCmd x%x "
8060 "READ_NV, mbxStatus x%x\n",
8061 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8062 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
8063 mempool_free(mboxq, phba->mbox_mem_pool);
8065 goto out_free_bsmbx;
8068 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
8070 wwn = cpu_to_be64(wwn);
8071 phba->sli4_hba.wwnn.u.name = wwn;
8072 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
8074 /* wwn is WWPN of HBA instance */
8075 wwn = cpu_to_be64(wwn);
8076 phba->sli4_hba.wwpn.u.name = wwn;
8078 /* Check to see if it matches any module parameter */
8079 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
8080 if (wwn == lpfc_enable_nvmet[i]) {
8081 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
8082 if (lpfc_nvmet_mem_alloc(phba))
8085 phba->nvmet_support = 1; /* a match */
8087 lpfc_printf_log(phba, KERN_ERR,
8089 "6017 NVME Target %016llx\n",
8092 lpfc_printf_log(phba, KERN_ERR,
8094 "6021 Can't enable NVME Target."
8095 " NVME_TARGET_FC infrastructure"
8096 " is not in kernel\n");
8098 /* Not supported for NVMET */
8099 phba->cfg_xri_rebalancing = 0;
8100 if (phba->irq_chann_mode == NHT_MODE) {
8101 phba->cfg_irq_chann =
8102 phba->sli4_hba.num_present_cpu;
8103 phba->cfg_hdw_queue =
8104 phba->sli4_hba.num_present_cpu;
8105 phba->irq_chann_mode = NORMAL_MODE;
8112 lpfc_nvme_mod_param_dep(phba);
8115 * Get sli4 parameters that override parameters from Port capabilities.
8116 * If this call fails, it isn't critical unless the SLI4 parameters come
8119 rc = lpfc_get_sli4_parameters(phba, mboxq);
8121 if_type = bf_get(lpfc_sli_intf_if_type,
8122 &phba->sli4_hba.sli_intf);
8123 if_fam = bf_get(lpfc_sli_intf_sli_family,
8124 &phba->sli4_hba.sli_intf);
8125 if (phba->sli4_hba.extents_in_use &&
8126 phba->sli4_hba.rpi_hdrs_in_use) {
8127 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8128 "2999 Unsupported SLI4 Parameters "
8129 "Extents and RPI headers enabled.\n");
8130 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8131 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
8132 mempool_free(mboxq, phba->mbox_mem_pool);
8134 goto out_free_bsmbx;
8137 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8138 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
8139 mempool_free(mboxq, phba->mbox_mem_pool);
8141 goto out_free_bsmbx;
8146 * 1 for cmd, 1 for rsp, NVME adds an extra one
8147 * for boundary conditions in its max_sgl_segment template.
8150 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
8154 * It doesn't matter what family our adapter is in, we are
8155 * limited to 2 Pages, 512 SGEs, for our SGL.
8156 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
8158 max_buf_size = (2 * SLI4_PAGE_SIZE);
8161 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
8162 * used to create the sg_dma_buf_pool must be calculated.
8164 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8165 /* Both cfg_enable_bg and cfg_external_dif code paths */
8168 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
8169 * the FCP rsp, and a SGE. Sice we have no control
8170 * over how many protection segments the SCSI Layer
8171 * will hand us (ie: there could be one for every block
8172 * in the IO), just allocate enough SGEs to accomidate
8173 * our max amount and we need to limit lpfc_sg_seg_cnt
8174 * to minimize the risk of running out.
8176 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8177 sizeof(struct fcp_rsp) + max_buf_size;
8179 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
8180 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
8183 * If supporting DIF, reduce the seg count for scsi to
8184 * allow room for the DIF sges.
8186 if (phba->cfg_enable_bg &&
8187 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
8188 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
8190 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8194 * The scsi_buf for a regular I/O holds the FCP cmnd,
8195 * the FCP rsp, a SGE for each, and a SGE for up to
8196 * cfg_sg_seg_cnt data segments.
8198 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8199 sizeof(struct fcp_rsp) +
8200 ((phba->cfg_sg_seg_cnt + extra) *
8201 sizeof(struct sli4_sge));
8203 /* Total SGEs for scsi_sg_list */
8204 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
8205 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8208 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
8209 * need to post 1 page for the SGL.
8213 if (phba->cfg_xpsgl && !phba->nvmet_support)
8214 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
8215 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
8216 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
8218 phba->cfg_sg_dma_buf_size =
8219 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
8221 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
8222 sizeof(struct sli4_sge);
8224 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
8225 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8226 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
8227 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
8228 "6300 Reducing NVME sg segment "
8230 LPFC_MAX_NVME_SEG_CNT);
8231 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
8233 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
8236 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
8237 "9087 sg_seg_cnt:%d dmabuf_size:%d "
8238 "total:%d scsi:%d nvme:%d\n",
8239 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
8240 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
8241 phba->cfg_nvme_seg_cnt);
8243 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
8244 i = phba->cfg_sg_dma_buf_size;
8248 phba->lpfc_sg_dma_buf_pool =
8249 dma_pool_create("lpfc_sg_dma_buf_pool",
8251 phba->cfg_sg_dma_buf_size,
8253 if (!phba->lpfc_sg_dma_buf_pool)
8254 goto out_free_bsmbx;
8256 phba->lpfc_cmd_rsp_buf_pool =
8257 dma_pool_create("lpfc_cmd_rsp_buf_pool",
8259 sizeof(struct fcp_cmnd) +
8260 sizeof(struct fcp_rsp),
8262 if (!phba->lpfc_cmd_rsp_buf_pool)
8263 goto out_free_sg_dma_buf;
8265 mempool_free(mboxq, phba->mbox_mem_pool);
8267 /* Verify OAS is supported */
8268 lpfc_sli4_oas_verify(phba);
8270 /* Verify RAS support on adapter */
8271 lpfc_sli4_ras_init(phba);
8273 /* Verify all the SLI4 queues */
8274 rc = lpfc_sli4_queue_verify(phba);
8276 goto out_free_cmd_rsp_buf;
8278 /* Create driver internal CQE event pool */
8279 rc = lpfc_sli4_cq_event_pool_create(phba);
8281 goto out_free_cmd_rsp_buf;
8283 /* Initialize sgl lists per host */
8284 lpfc_init_sgl_list(phba);
8286 /* Allocate and initialize active sgl array */
8287 rc = lpfc_init_active_sgl_array(phba);
8289 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8290 "1430 Failed to initialize sgl list.\n");
8291 goto out_destroy_cq_event_pool;
8293 rc = lpfc_sli4_init_rpi_hdrs(phba);
8295 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8296 "1432 Failed to initialize rpi headers.\n");
8297 goto out_free_active_sgl;
8300 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
8301 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
8302 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
8304 if (!phba->fcf.fcf_rr_bmask) {
8305 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8306 "2759 Failed allocate memory for FCF round "
8307 "robin failover bmask\n");
8309 goto out_remove_rpi_hdrs;
8312 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
8313 sizeof(struct lpfc_hba_eq_hdl),
8315 if (!phba->sli4_hba.hba_eq_hdl) {
8316 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8317 "2572 Failed allocate memory for "
8318 "fast-path per-EQ handle array\n");
8320 goto out_free_fcf_rr_bmask;
8323 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
8324 sizeof(struct lpfc_vector_map_info),
8326 if (!phba->sli4_hba.cpu_map) {
8327 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8328 "3327 Failed allocate memory for msi-x "
8329 "interrupt vector mapping\n");
8331 goto out_free_hba_eq_hdl;
8334 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
8335 if (!phba->sli4_hba.eq_info) {
8336 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8337 "3321 Failed allocation for per_cpu stats\n");
8339 goto out_free_hba_cpu_map;
8342 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
8343 sizeof(*phba->sli4_hba.idle_stat),
8345 if (!phba->sli4_hba.idle_stat) {
8346 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8347 "3390 Failed allocation for idle_stat\n");
8349 goto out_free_hba_eq_info;
8352 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8353 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
8354 if (!phba->sli4_hba.c_stat) {
8355 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8356 "3332 Failed allocating per cpu hdwq stats\n");
8358 goto out_free_hba_idle_stat;
8362 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
8363 if (!phba->cmf_stat) {
8364 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8365 "3331 Failed allocating per cpu cgn stats\n");
8367 goto out_free_hba_hdwq_info;
8371 * Enable sr-iov virtual functions if supported and configured
8372 * through the module parameter.
8374 if (phba->cfg_sriov_nr_virtfn > 0) {
8375 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
8376 phba->cfg_sriov_nr_virtfn);
8378 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8379 "3020 Requested number of SR-IOV "
8380 "virtual functions (%d) is not "
8382 phba->cfg_sriov_nr_virtfn);
8383 phba->cfg_sriov_nr_virtfn = 0;
8389 out_free_hba_hdwq_info:
8390 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8391 free_percpu(phba->sli4_hba.c_stat);
8392 out_free_hba_idle_stat:
8394 kfree(phba->sli4_hba.idle_stat);
8395 out_free_hba_eq_info:
8396 free_percpu(phba->sli4_hba.eq_info);
8397 out_free_hba_cpu_map:
8398 kfree(phba->sli4_hba.cpu_map);
8399 out_free_hba_eq_hdl:
8400 kfree(phba->sli4_hba.hba_eq_hdl);
8401 out_free_fcf_rr_bmask:
8402 kfree(phba->fcf.fcf_rr_bmask);
8403 out_remove_rpi_hdrs:
8404 lpfc_sli4_remove_rpi_hdrs(phba);
8405 out_free_active_sgl:
8406 lpfc_free_active_sgl(phba);
8407 out_destroy_cq_event_pool:
8408 lpfc_sli4_cq_event_pool_destroy(phba);
8409 out_free_cmd_rsp_buf:
8410 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
8411 phba->lpfc_cmd_rsp_buf_pool = NULL;
8412 out_free_sg_dma_buf:
8413 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
8414 phba->lpfc_sg_dma_buf_pool = NULL;
8416 lpfc_destroy_bootstrap_mbox(phba);
8418 lpfc_mem_free(phba);
8423 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
8424 * @phba: pointer to lpfc hba data structure.
8426 * This routine is invoked to unset the driver internal resources set up
8427 * specific for supporting the SLI-4 HBA device it attached to.
8430 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
8432 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
8434 free_percpu(phba->sli4_hba.eq_info);
8435 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8436 free_percpu(phba->sli4_hba.c_stat);
8438 free_percpu(phba->cmf_stat);
8439 kfree(phba->sli4_hba.idle_stat);
8441 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
8442 kfree(phba->sli4_hba.cpu_map);
8443 phba->sli4_hba.num_possible_cpu = 0;
8444 phba->sli4_hba.num_present_cpu = 0;
8445 phba->sli4_hba.curr_disp_cpu = 0;
8446 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
8448 /* Free memory allocated for fast-path work queue handles */
8449 kfree(phba->sli4_hba.hba_eq_hdl);
8451 /* Free the allocated rpi headers. */
8452 lpfc_sli4_remove_rpi_hdrs(phba);
8453 lpfc_sli4_remove_rpis(phba);
8455 /* Free eligible FCF index bmask */
8456 kfree(phba->fcf.fcf_rr_bmask);
8458 /* Free the ELS sgl list */
8459 lpfc_free_active_sgl(phba);
8460 lpfc_free_els_sgl_list(phba);
8461 lpfc_free_nvmet_sgl_list(phba);
8463 /* Free the completion queue EQ event pool */
8464 lpfc_sli4_cq_event_release_all(phba);
8465 lpfc_sli4_cq_event_pool_destroy(phba);
8467 /* Release resource identifiers. */
8468 lpfc_sli4_dealloc_resource_identifiers(phba);
8470 /* Free the bsmbx region. */
8471 lpfc_destroy_bootstrap_mbox(phba);
8473 /* Free the SLI Layer memory with SLI4 HBAs */
8474 lpfc_mem_free_all(phba);
8476 /* Free the current connect table */
8477 list_for_each_entry_safe(conn_entry, next_conn_entry,
8478 &phba->fcf_conn_rec_list, list) {
8479 list_del_init(&conn_entry->list);
8487 * lpfc_init_api_table_setup - Set up init api function jump table
8488 * @phba: The hba struct for which this call is being executed.
8489 * @dev_grp: The HBA PCI-Device group number.
8491 * This routine sets up the device INIT interface API function jump table
8494 * Returns: 0 - success, -ENODEV - failure.
8497 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8499 phba->lpfc_hba_init_link = lpfc_hba_init_link;
8500 phba->lpfc_hba_down_link = lpfc_hba_down_link;
8501 phba->lpfc_selective_reset = lpfc_selective_reset;
8503 case LPFC_PCI_DEV_LP:
8504 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
8505 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
8506 phba->lpfc_stop_port = lpfc_stop_port_s3;
8508 case LPFC_PCI_DEV_OC:
8509 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
8510 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
8511 phba->lpfc_stop_port = lpfc_stop_port_s4;
8514 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8515 "1431 Invalid HBA PCI-device group: 0x%x\n",
8523 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
8524 * @phba: pointer to lpfc hba data structure.
8526 * This routine is invoked to set up the driver internal resources after the
8527 * device specific resource setup to support the HBA device it attached to.
8531 * other values - error
8534 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
8538 /* Startup the kernel thread for this host adapter. */
8539 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8540 "lpfc_worker_%d", phba->brd_no);
8541 if (IS_ERR(phba->worker_thread)) {
8542 error = PTR_ERR(phba->worker_thread);
8550 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
8551 * @phba: pointer to lpfc hba data structure.
8553 * This routine is invoked to unset the driver internal resources set up after
8554 * the device specific resource setup for supporting the HBA device it
8558 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
8561 destroy_workqueue(phba->wq);
8565 /* Stop kernel worker thread */
8566 if (phba->worker_thread)
8567 kthread_stop(phba->worker_thread);
8571 * lpfc_free_iocb_list - Free iocb list.
8572 * @phba: pointer to lpfc hba data structure.
8574 * This routine is invoked to free the driver's IOCB list and memory.
8577 lpfc_free_iocb_list(struct lpfc_hba *phba)
8579 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
8581 spin_lock_irq(&phba->hbalock);
8582 list_for_each_entry_safe(iocbq_entry, iocbq_next,
8583 &phba->lpfc_iocb_list, list) {
8584 list_del(&iocbq_entry->list);
8586 phba->total_iocbq_bufs--;
8588 spin_unlock_irq(&phba->hbalock);
8594 * lpfc_init_iocb_list - Allocate and initialize iocb list.
8595 * @phba: pointer to lpfc hba data structure.
8596 * @iocb_count: number of requested iocbs
8598 * This routine is invoked to allocate and initizlize the driver's IOCB
8599 * list and set up the IOCB tag array accordingly.
8603 * other values - error
8606 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
8608 struct lpfc_iocbq *iocbq_entry = NULL;
8612 /* Initialize and populate the iocb list per host. */
8613 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
8614 for (i = 0; i < iocb_count; i++) {
8615 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
8616 if (iocbq_entry == NULL) {
8617 printk(KERN_ERR "%s: only allocated %d iocbs of "
8618 "expected %d count. Unloading driver.\n",
8619 __func__, i, iocb_count);
8620 goto out_free_iocbq;
8623 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
8626 printk(KERN_ERR "%s: failed to allocate IOTAG. "
8627 "Unloading driver.\n", __func__);
8628 goto out_free_iocbq;
8630 iocbq_entry->sli4_lxritag = NO_XRI;
8631 iocbq_entry->sli4_xritag = NO_XRI;
8633 spin_lock_irq(&phba->hbalock);
8634 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
8635 phba->total_iocbq_bufs++;
8636 spin_unlock_irq(&phba->hbalock);
8642 lpfc_free_iocb_list(phba);
8648 * lpfc_free_sgl_list - Free a given sgl list.
8649 * @phba: pointer to lpfc hba data structure.
8650 * @sglq_list: pointer to the head of sgl list.
8652 * This routine is invoked to free a give sgl list and memory.
8655 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
8657 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8659 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
8660 list_del(&sglq_entry->list);
8661 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
8667 * lpfc_free_els_sgl_list - Free els sgl list.
8668 * @phba: pointer to lpfc hba data structure.
8670 * This routine is invoked to free the driver's els sgl list and memory.
8673 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
8675 LIST_HEAD(sglq_list);
8677 /* Retrieve all els sgls from driver list */
8678 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
8679 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
8680 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8682 /* Now free the sgl list */
8683 lpfc_free_sgl_list(phba, &sglq_list);
8687 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
8688 * @phba: pointer to lpfc hba data structure.
8690 * This routine is invoked to free the driver's nvmet sgl list and memory.
8693 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
8695 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8696 LIST_HEAD(sglq_list);
8698 /* Retrieve all nvmet sgls from driver list */
8699 spin_lock_irq(&phba->hbalock);
8700 spin_lock(&phba->sli4_hba.sgl_list_lock);
8701 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
8702 spin_unlock(&phba->sli4_hba.sgl_list_lock);
8703 spin_unlock_irq(&phba->hbalock);
8705 /* Now free the sgl list */
8706 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
8707 list_del(&sglq_entry->list);
8708 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
8712 /* Update the nvmet_xri_cnt to reflect no current sgls.
8713 * The next initialization cycle sets the count and allocates
8714 * the sgls over again.
8716 phba->sli4_hba.nvmet_xri_cnt = 0;
8720 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
8721 * @phba: pointer to lpfc hba data structure.
8723 * This routine is invoked to allocate the driver's active sgl memory.
8724 * This array will hold the sglq_entry's for active IOs.
8727 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
8730 size = sizeof(struct lpfc_sglq *);
8731 size *= phba->sli4_hba.max_cfg_param.max_xri;
8733 phba->sli4_hba.lpfc_sglq_active_list =
8734 kzalloc(size, GFP_KERNEL);
8735 if (!phba->sli4_hba.lpfc_sglq_active_list)
8741 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
8742 * @phba: pointer to lpfc hba data structure.
8744 * This routine is invoked to walk through the array of active sglq entries
8745 * and free all of the resources.
8746 * This is just a place holder for now.
8749 lpfc_free_active_sgl(struct lpfc_hba *phba)
8751 kfree(phba->sli4_hba.lpfc_sglq_active_list);
8755 * lpfc_init_sgl_list - Allocate and initialize sgl list.
8756 * @phba: pointer to lpfc hba data structure.
8758 * This routine is invoked to allocate and initizlize the driver's sgl
8759 * list and set up the sgl xritag tag array accordingly.
8763 lpfc_init_sgl_list(struct lpfc_hba *phba)
8765 /* Initialize and populate the sglq list per host/VF. */
8766 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
8767 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8768 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
8769 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8771 /* els xri-sgl book keeping */
8772 phba->sli4_hba.els_xri_cnt = 0;
8774 /* nvme xri-buffer book keeping */
8775 phba->sli4_hba.io_xri_cnt = 0;
8779 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
8780 * @phba: pointer to lpfc hba data structure.
8782 * This routine is invoked to post rpi header templates to the
8783 * port for those SLI4 ports that do not support extents. This routine
8784 * posts a PAGE_SIZE memory region to the port to hold up to
8785 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
8786 * and should be called only when interrupts are disabled.
8790 * -ERROR - otherwise.
8793 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
8796 struct lpfc_rpi_hdr *rpi_hdr;
8798 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
8799 if (!phba->sli4_hba.rpi_hdrs_in_use)
8801 if (phba->sli4_hba.extents_in_use)
8804 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
8806 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8807 "0391 Error during rpi post operation\n");
8808 lpfc_sli4_remove_rpis(phba);
8816 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
8817 * @phba: pointer to lpfc hba data structure.
8819 * This routine is invoked to allocate a single 4KB memory region to
8820 * support rpis and stores them in the phba. This single region
8821 * provides support for up to 64 rpis. The region is used globally
8825 * A valid rpi hdr on success.
8826 * A NULL pointer on any failure.
8828 struct lpfc_rpi_hdr *
8829 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
8831 uint16_t rpi_limit, curr_rpi_range;
8832 struct lpfc_dmabuf *dmabuf;
8833 struct lpfc_rpi_hdr *rpi_hdr;
8836 * If the SLI4 port supports extents, posting the rpi header isn't
8837 * required. Set the expected maximum count and let the actual value
8838 * get set when extents are fully allocated.
8840 if (!phba->sli4_hba.rpi_hdrs_in_use)
8842 if (phba->sli4_hba.extents_in_use)
8845 /* The limit on the logical index is just the max_rpi count. */
8846 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
8848 spin_lock_irq(&phba->hbalock);
8850 * Establish the starting RPI in this header block. The starting
8851 * rpi is normalized to a zero base because the physical rpi is
8854 curr_rpi_range = phba->sli4_hba.next_rpi;
8855 spin_unlock_irq(&phba->hbalock);
8857 /* Reached full RPI range */
8858 if (curr_rpi_range == rpi_limit)
8862 * First allocate the protocol header region for the port. The
8863 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
8865 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8869 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8870 LPFC_HDR_TEMPLATE_SIZE,
8871 &dmabuf->phys, GFP_KERNEL);
8872 if (!dmabuf->virt) {
8874 goto err_free_dmabuf;
8877 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
8879 goto err_free_coherent;
8882 /* Save the rpi header data for cleanup later. */
8883 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
8885 goto err_free_coherent;
8887 rpi_hdr->dmabuf = dmabuf;
8888 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
8889 rpi_hdr->page_count = 1;
8890 spin_lock_irq(&phba->hbalock);
8892 /* The rpi_hdr stores the logical index only. */
8893 rpi_hdr->start_rpi = curr_rpi_range;
8894 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
8895 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
8897 spin_unlock_irq(&phba->hbalock);
8901 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
8902 dmabuf->virt, dmabuf->phys);
8909 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
8910 * @phba: pointer to lpfc hba data structure.
8912 * This routine is invoked to remove all memory resources allocated
8913 * to support rpis for SLI4 ports not supporting extents. This routine
8914 * presumes the caller has released all rpis consumed by fabric or port
8915 * logins and is prepared to have the header pages removed.
8918 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
8920 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
8922 if (!phba->sli4_hba.rpi_hdrs_in_use)
8925 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
8926 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
8927 list_del(&rpi_hdr->list);
8928 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
8929 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
8930 kfree(rpi_hdr->dmabuf);
8934 /* There are no rpis available to the port now. */
8935 phba->sli4_hba.next_rpi = 0;
8939 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
8940 * @pdev: pointer to pci device data structure.
8942 * This routine is invoked to allocate the driver hba data structure for an
8943 * HBA device. If the allocation is successful, the phba reference to the
8944 * PCI device data structure is set.
8947 * pointer to @phba - successful
8950 static struct lpfc_hba *
8951 lpfc_hba_alloc(struct pci_dev *pdev)
8953 struct lpfc_hba *phba;
8955 /* Allocate memory for HBA structure */
8956 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
8958 dev_err(&pdev->dev, "failed to allocate hba struct\n");
8962 /* Set reference to PCI device in HBA structure */
8963 phba->pcidev = pdev;
8965 /* Assign an unused board number */
8966 phba->brd_no = lpfc_get_instance();
8967 if (phba->brd_no < 0) {
8971 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
8973 spin_lock_init(&phba->ct_ev_lock);
8974 INIT_LIST_HEAD(&phba->ct_ev_waiters);
8980 * lpfc_hba_free - Free driver hba data structure with a device.
8981 * @phba: pointer to lpfc hba data structure.
8983 * This routine is invoked to free the driver hba data structure with an
8987 lpfc_hba_free(struct lpfc_hba *phba)
8989 if (phba->sli_rev == LPFC_SLI_REV4)
8990 kfree(phba->sli4_hba.hdwq);
8992 /* Release the driver assigned board number */
8993 idr_remove(&lpfc_hba_index, phba->brd_no);
8995 /* Free memory allocated with sli3 rings */
8996 kfree(phba->sli.sli3_ring);
8997 phba->sli.sli3_ring = NULL;
9004 * lpfc_create_shost - Create hba physical port with associated scsi host.
9005 * @phba: pointer to lpfc hba data structure.
9007 * This routine is invoked to create HBA physical port and associate a SCSI
9012 * other values - error
9015 lpfc_create_shost(struct lpfc_hba *phba)
9017 struct lpfc_vport *vport;
9018 struct Scsi_Host *shost;
9020 /* Initialize HBA FC structure */
9021 phba->fc_edtov = FF_DEF_EDTOV;
9022 phba->fc_ratov = FF_DEF_RATOV;
9023 phba->fc_altov = FF_DEF_ALTOV;
9024 phba->fc_arbtov = FF_DEF_ARBTOV;
9026 atomic_set(&phba->sdev_cnt, 0);
9027 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
9031 shost = lpfc_shost_from_vport(vport);
9032 phba->pport = vport;
9034 if (phba->nvmet_support) {
9035 /* Only 1 vport (pport) will support NVME target */
9036 phba->targetport = NULL;
9037 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
9038 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
9039 "6076 NVME Target Found\n");
9042 lpfc_debugfs_initialize(vport);
9043 /* Put reference to SCSI host to driver's device private data */
9044 pci_set_drvdata(phba->pcidev, shost);
9047 * At this point we are fully registered with PSA. In addition,
9048 * any initial discovery should be completed.
9050 vport->load_flag |= FC_ALLOW_FDMI;
9051 if (phba->cfg_enable_SmartSAN ||
9052 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
9054 /* Setup appropriate attribute masks */
9055 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
9056 if (phba->cfg_enable_SmartSAN)
9057 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
9059 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
9065 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
9066 * @phba: pointer to lpfc hba data structure.
9068 * This routine is invoked to destroy HBA physical port and the associated
9072 lpfc_destroy_shost(struct lpfc_hba *phba)
9074 struct lpfc_vport *vport = phba->pport;
9076 /* Destroy physical port that associated with the SCSI host */
9077 destroy_port(vport);
9083 * lpfc_setup_bg - Setup Block guard structures and debug areas.
9084 * @phba: pointer to lpfc hba data structure.
9085 * @shost: the shost to be used to detect Block guard settings.
9087 * This routine sets up the local Block guard protocol settings for @shost.
9088 * This routine also allocates memory for debugging bg buffers.
9091 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
9096 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9097 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9098 "1478 Registering BlockGuard with the "
9101 old_mask = phba->cfg_prot_mask;
9102 old_guard = phba->cfg_prot_guard;
9104 /* Only allow supported values */
9105 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
9106 SHOST_DIX_TYPE0_PROTECTION |
9107 SHOST_DIX_TYPE1_PROTECTION);
9108 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
9109 SHOST_DIX_GUARD_CRC);
9111 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
9112 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
9113 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
9115 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9116 if ((old_mask != phba->cfg_prot_mask) ||
9117 (old_guard != phba->cfg_prot_guard))
9118 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9119 "1475 Registering BlockGuard with the "
9120 "SCSI layer: mask %d guard %d\n",
9121 phba->cfg_prot_mask,
9122 phba->cfg_prot_guard);
9124 scsi_host_set_prot(shost, phba->cfg_prot_mask);
9125 scsi_host_set_guard(shost, phba->cfg_prot_guard);
9127 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9128 "1479 Not Registering BlockGuard with the SCSI "
9129 "layer, Bad protection parameters: %d %d\n",
9130 old_mask, old_guard);
9135 * lpfc_post_init_setup - Perform necessary device post initialization setup.
9136 * @phba: pointer to lpfc hba data structure.
9138 * This routine is invoked to perform all the necessary post initialization
9139 * setup for the device.
9142 lpfc_post_init_setup(struct lpfc_hba *phba)
9144 struct Scsi_Host *shost;
9145 struct lpfc_adapter_event_header adapter_event;
9147 /* Get the default values for Model Name and Description */
9148 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9151 * hba setup may have changed the hba_queue_depth so we need to
9152 * adjust the value of can_queue.
9154 shost = pci_get_drvdata(phba->pcidev);
9155 shost->can_queue = phba->cfg_hba_queue_depth - 10;
9157 lpfc_host_attrib_init(shost);
9159 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9160 spin_lock_irq(shost->host_lock);
9161 lpfc_poll_start_timer(phba);
9162 spin_unlock_irq(shost->host_lock);
9165 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9166 "0428 Perform SCSI scan\n");
9167 /* Send board arrival event to upper layer */
9168 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
9169 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
9170 fc_host_post_vendor_event(shost, fc_get_event_number(),
9171 sizeof(adapter_event),
9172 (char *) &adapter_event,
9178 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
9179 * @phba: pointer to lpfc hba data structure.
9181 * This routine is invoked to set up the PCI device memory space for device
9182 * with SLI-3 interface spec.
9186 * other values - error
9189 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
9191 struct pci_dev *pdev = phba->pcidev;
9192 unsigned long bar0map_len, bar2map_len;
9200 /* Set the device DMA mask size */
9201 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9203 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9208 /* Get the bus address of Bar0 and Bar2 and the number of bytes
9209 * required by each mapping.
9211 phba->pci_bar0_map = pci_resource_start(pdev, 0);
9212 bar0map_len = pci_resource_len(pdev, 0);
9214 phba->pci_bar2_map = pci_resource_start(pdev, 2);
9215 bar2map_len = pci_resource_len(pdev, 2);
9217 /* Map HBA SLIM to a kernel virtual address. */
9218 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
9219 if (!phba->slim_memmap_p) {
9220 dev_printk(KERN_ERR, &pdev->dev,
9221 "ioremap failed for SLIM memory.\n");
9225 /* Map HBA Control Registers to a kernel virtual address. */
9226 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
9227 if (!phba->ctrl_regs_memmap_p) {
9228 dev_printk(KERN_ERR, &pdev->dev,
9229 "ioremap failed for HBA control registers.\n");
9230 goto out_iounmap_slim;
9233 /* Allocate memory for SLI-2 structures */
9234 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9235 &phba->slim2p.phys, GFP_KERNEL);
9236 if (!phba->slim2p.virt)
9239 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
9240 phba->mbox_ext = (phba->slim2p.virt +
9241 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
9242 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
9243 phba->IOCBs = (phba->slim2p.virt +
9244 offsetof(struct lpfc_sli2_slim, IOCBs));
9246 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
9247 lpfc_sli_hbq_size(),
9248 &phba->hbqslimp.phys,
9250 if (!phba->hbqslimp.virt)
9253 hbq_count = lpfc_sli_hbq_count();
9254 ptr = phba->hbqslimp.virt;
9255 for (i = 0; i < hbq_count; ++i) {
9256 phba->hbqs[i].hbq_virt = ptr;
9257 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
9258 ptr += (lpfc_hbq_defs[i]->entry_count *
9259 sizeof(struct lpfc_hbq_entry));
9261 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
9262 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
9264 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
9266 phba->MBslimaddr = phba->slim_memmap_p;
9267 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
9268 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
9269 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
9270 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
9275 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9276 phba->slim2p.virt, phba->slim2p.phys);
9278 iounmap(phba->ctrl_regs_memmap_p);
9280 iounmap(phba->slim_memmap_p);
9286 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
9287 * @phba: pointer to lpfc hba data structure.
9289 * This routine is invoked to unset the PCI device memory space for device
9290 * with SLI-3 interface spec.
9293 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
9295 struct pci_dev *pdev;
9297 /* Obtain PCI device reference */
9301 pdev = phba->pcidev;
9303 /* Free coherent DMA memory allocated */
9304 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9305 phba->hbqslimp.virt, phba->hbqslimp.phys);
9306 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9307 phba->slim2p.virt, phba->slim2p.phys);
9309 /* I/O memory unmap */
9310 iounmap(phba->ctrl_regs_memmap_p);
9311 iounmap(phba->slim_memmap_p);
9317 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
9318 * @phba: pointer to lpfc hba data structure.
9320 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
9321 * done and check status.
9323 * Return 0 if successful, otherwise -ENODEV.
9326 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
9328 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
9329 struct lpfc_register reg_data;
9330 int i, port_error = 0;
9333 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
9334 memset(®_data, 0, sizeof(reg_data));
9335 if (!phba->sli4_hba.PSMPHRregaddr)
9338 /* Wait up to 30 seconds for the SLI Port POST done and ready */
9339 for (i = 0; i < 3000; i++) {
9340 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9341 &portsmphr_reg.word0) ||
9342 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
9343 /* Port has a fatal POST error, break out */
9344 port_error = -ENODEV;
9347 if (LPFC_POST_STAGE_PORT_READY ==
9348 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
9354 * If there was a port error during POST, then don't proceed with
9355 * other register reads as the data may not be valid. Just exit.
9358 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9359 "1408 Port Failed POST - portsmphr=0x%x, "
9360 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
9361 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
9362 portsmphr_reg.word0,
9363 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
9364 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
9365 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
9366 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
9367 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
9368 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
9369 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
9370 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
9372 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9373 "2534 Device Info: SLIFamily=0x%x, "
9374 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
9375 "SLIHint_2=0x%x, FT=0x%x\n",
9376 bf_get(lpfc_sli_intf_sli_family,
9377 &phba->sli4_hba.sli_intf),
9378 bf_get(lpfc_sli_intf_slirev,
9379 &phba->sli4_hba.sli_intf),
9380 bf_get(lpfc_sli_intf_if_type,
9381 &phba->sli4_hba.sli_intf),
9382 bf_get(lpfc_sli_intf_sli_hint1,
9383 &phba->sli4_hba.sli_intf),
9384 bf_get(lpfc_sli_intf_sli_hint2,
9385 &phba->sli4_hba.sli_intf),
9386 bf_get(lpfc_sli_intf_func_type,
9387 &phba->sli4_hba.sli_intf));
9389 * Check for other Port errors during the initialization
9390 * process. Fail the load if the port did not come up
9393 if_type = bf_get(lpfc_sli_intf_if_type,
9394 &phba->sli4_hba.sli_intf);
9396 case LPFC_SLI_INTF_IF_TYPE_0:
9397 phba->sli4_hba.ue_mask_lo =
9398 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
9399 phba->sli4_hba.ue_mask_hi =
9400 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
9402 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
9404 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
9405 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
9406 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
9407 lpfc_printf_log(phba, KERN_ERR,
9409 "1422 Unrecoverable Error "
9410 "Detected during POST "
9411 "uerr_lo_reg=0x%x, "
9412 "uerr_hi_reg=0x%x, "
9413 "ue_mask_lo_reg=0x%x, "
9414 "ue_mask_hi_reg=0x%x\n",
9417 phba->sli4_hba.ue_mask_lo,
9418 phba->sli4_hba.ue_mask_hi);
9419 port_error = -ENODEV;
9422 case LPFC_SLI_INTF_IF_TYPE_2:
9423 case LPFC_SLI_INTF_IF_TYPE_6:
9424 /* Final checks. The port status should be clean. */
9425 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9427 (bf_get(lpfc_sliport_status_err, ®_data) &&
9428 !bf_get(lpfc_sliport_status_rn, ®_data))) {
9429 phba->work_status[0] =
9430 readl(phba->sli4_hba.u.if_type2.
9432 phba->work_status[1] =
9433 readl(phba->sli4_hba.u.if_type2.
9435 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9436 "2888 Unrecoverable port error "
9437 "following POST: port status reg "
9438 "0x%x, port_smphr reg 0x%x, "
9439 "error 1=0x%x, error 2=0x%x\n",
9441 portsmphr_reg.word0,
9442 phba->work_status[0],
9443 phba->work_status[1]);
9444 port_error = -ENODEV;
9448 if (lpfc_pldv_detect &&
9449 bf_get(lpfc_sli_intf_sli_family,
9450 &phba->sli4_hba.sli_intf) ==
9451 LPFC_SLI_INTF_FAMILY_G6)
9452 pci_write_config_byte(phba->pcidev,
9453 LPFC_SLI_INTF, CFG_PLD);
9455 case LPFC_SLI_INTF_IF_TYPE_1:
9464 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
9465 * @phba: pointer to lpfc hba data structure.
9466 * @if_type: The SLI4 interface type getting configured.
9468 * This routine is invoked to set up SLI4 BAR0 PCI config space register
9472 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9475 case LPFC_SLI_INTF_IF_TYPE_0:
9476 phba->sli4_hba.u.if_type0.UERRLOregaddr =
9477 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
9478 phba->sli4_hba.u.if_type0.UERRHIregaddr =
9479 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
9480 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
9481 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
9482 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
9483 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
9484 phba->sli4_hba.SLIINTFregaddr =
9485 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9487 case LPFC_SLI_INTF_IF_TYPE_2:
9488 phba->sli4_hba.u.if_type2.EQDregaddr =
9489 phba->sli4_hba.conf_regs_memmap_p +
9490 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9491 phba->sli4_hba.u.if_type2.ERR1regaddr =
9492 phba->sli4_hba.conf_regs_memmap_p +
9493 LPFC_CTL_PORT_ER1_OFFSET;
9494 phba->sli4_hba.u.if_type2.ERR2regaddr =
9495 phba->sli4_hba.conf_regs_memmap_p +
9496 LPFC_CTL_PORT_ER2_OFFSET;
9497 phba->sli4_hba.u.if_type2.CTRLregaddr =
9498 phba->sli4_hba.conf_regs_memmap_p +
9499 LPFC_CTL_PORT_CTL_OFFSET;
9500 phba->sli4_hba.u.if_type2.STATUSregaddr =
9501 phba->sli4_hba.conf_regs_memmap_p +
9502 LPFC_CTL_PORT_STA_OFFSET;
9503 phba->sli4_hba.SLIINTFregaddr =
9504 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9505 phba->sli4_hba.PSMPHRregaddr =
9506 phba->sli4_hba.conf_regs_memmap_p +
9507 LPFC_CTL_PORT_SEM_OFFSET;
9508 phba->sli4_hba.RQDBregaddr =
9509 phba->sli4_hba.conf_regs_memmap_p +
9510 LPFC_ULP0_RQ_DOORBELL;
9511 phba->sli4_hba.WQDBregaddr =
9512 phba->sli4_hba.conf_regs_memmap_p +
9513 LPFC_ULP0_WQ_DOORBELL;
9514 phba->sli4_hba.CQDBregaddr =
9515 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9516 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9517 phba->sli4_hba.MQDBregaddr =
9518 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
9519 phba->sli4_hba.BMBXregaddr =
9520 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9522 case LPFC_SLI_INTF_IF_TYPE_6:
9523 phba->sli4_hba.u.if_type2.EQDregaddr =
9524 phba->sli4_hba.conf_regs_memmap_p +
9525 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9526 phba->sli4_hba.u.if_type2.ERR1regaddr =
9527 phba->sli4_hba.conf_regs_memmap_p +
9528 LPFC_CTL_PORT_ER1_OFFSET;
9529 phba->sli4_hba.u.if_type2.ERR2regaddr =
9530 phba->sli4_hba.conf_regs_memmap_p +
9531 LPFC_CTL_PORT_ER2_OFFSET;
9532 phba->sli4_hba.u.if_type2.CTRLregaddr =
9533 phba->sli4_hba.conf_regs_memmap_p +
9534 LPFC_CTL_PORT_CTL_OFFSET;
9535 phba->sli4_hba.u.if_type2.STATUSregaddr =
9536 phba->sli4_hba.conf_regs_memmap_p +
9537 LPFC_CTL_PORT_STA_OFFSET;
9538 phba->sli4_hba.PSMPHRregaddr =
9539 phba->sli4_hba.conf_regs_memmap_p +
9540 LPFC_CTL_PORT_SEM_OFFSET;
9541 phba->sli4_hba.BMBXregaddr =
9542 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9544 case LPFC_SLI_INTF_IF_TYPE_1:
9546 dev_printk(KERN_ERR, &phba->pcidev->dev,
9547 "FATAL - unsupported SLI4 interface type - %d\n",
9554 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
9555 * @phba: pointer to lpfc hba data structure.
9556 * @if_type: sli if type to operate on.
9558 * This routine is invoked to set up SLI4 BAR1 register memory map.
9561 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9564 case LPFC_SLI_INTF_IF_TYPE_0:
9565 phba->sli4_hba.PSMPHRregaddr =
9566 phba->sli4_hba.ctrl_regs_memmap_p +
9567 LPFC_SLIPORT_IF0_SMPHR;
9568 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9570 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9572 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9575 case LPFC_SLI_INTF_IF_TYPE_6:
9576 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9577 LPFC_IF6_RQ_DOORBELL;
9578 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9579 LPFC_IF6_WQ_DOORBELL;
9580 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9581 LPFC_IF6_CQ_DOORBELL;
9582 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9583 LPFC_IF6_EQ_DOORBELL;
9584 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9585 LPFC_IF6_MQ_DOORBELL;
9587 case LPFC_SLI_INTF_IF_TYPE_2:
9588 case LPFC_SLI_INTF_IF_TYPE_1:
9590 dev_err(&phba->pcidev->dev,
9591 "FATAL - unsupported SLI4 interface type - %d\n",
9598 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
9599 * @phba: pointer to lpfc hba data structure.
9600 * @vf: virtual function number
9602 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
9603 * based on the given viftual function number, @vf.
9605 * Return 0 if successful, otherwise -ENODEV.
9608 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
9610 if (vf > LPFC_VIR_FUNC_MAX)
9613 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9614 vf * LPFC_VFR_PAGE_SIZE +
9615 LPFC_ULP0_RQ_DOORBELL);
9616 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9617 vf * LPFC_VFR_PAGE_SIZE +
9618 LPFC_ULP0_WQ_DOORBELL);
9619 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9620 vf * LPFC_VFR_PAGE_SIZE +
9621 LPFC_EQCQ_DOORBELL);
9622 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9623 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9624 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
9625 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9626 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
9631 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
9632 * @phba: pointer to lpfc hba data structure.
9634 * This routine is invoked to create the bootstrap mailbox
9635 * region consistent with the SLI-4 interface spec. This
9636 * routine allocates all memory necessary to communicate
9637 * mailbox commands to the port and sets up all alignment
9638 * needs. No locks are expected to be held when calling
9643 * -ENOMEM - could not allocated memory.
9646 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
9649 struct lpfc_dmabuf *dmabuf;
9650 struct dma_address *dma_address;
9654 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9659 * The bootstrap mailbox region is comprised of 2 parts
9660 * plus an alignment restriction of 16 bytes.
9662 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
9663 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
9664 &dmabuf->phys, GFP_KERNEL);
9665 if (!dmabuf->virt) {
9671 * Initialize the bootstrap mailbox pointers now so that the register
9672 * operations are simple later. The mailbox dma address is required
9673 * to be 16-byte aligned. Also align the virtual memory as each
9674 * maibox is copied into the bmbx mailbox region before issuing the
9675 * command to the port.
9677 phba->sli4_hba.bmbx.dmabuf = dmabuf;
9678 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
9680 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
9681 LPFC_ALIGN_16_BYTE);
9682 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
9683 LPFC_ALIGN_16_BYTE);
9686 * Set the high and low physical addresses now. The SLI4 alignment
9687 * requirement is 16 bytes and the mailbox is posted to the port
9688 * as two 30-bit addresses. The other data is a bit marking whether
9689 * the 30-bit address is the high or low address.
9690 * Upcast bmbx aphys to 64bits so shift instruction compiles
9691 * clean on 32 bit machines.
9693 dma_address = &phba->sli4_hba.bmbx.dma_address;
9694 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
9695 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
9696 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
9697 LPFC_BMBX_BIT1_ADDR_HI);
9699 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
9700 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
9701 LPFC_BMBX_BIT1_ADDR_LO);
9706 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
9707 * @phba: pointer to lpfc hba data structure.
9709 * This routine is invoked to teardown the bootstrap mailbox
9710 * region and release all host resources. This routine requires
9711 * the caller to ensure all mailbox commands recovered, no
9712 * additional mailbox comands are sent, and interrupts are disabled
9713 * before calling this routine.
9717 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
9719 dma_free_coherent(&phba->pcidev->dev,
9720 phba->sli4_hba.bmbx.bmbx_size,
9721 phba->sli4_hba.bmbx.dmabuf->virt,
9722 phba->sli4_hba.bmbx.dmabuf->phys);
9724 kfree(phba->sli4_hba.bmbx.dmabuf);
9725 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
9728 static const char * const lpfc_topo_to_str[] = {
9738 #define LINK_FLAGS_DEF 0x0
9739 #define LINK_FLAGS_P2P 0x1
9740 #define LINK_FLAGS_LOOP 0x2
9742 * lpfc_map_topology - Map the topology read from READ_CONFIG
9743 * @phba: pointer to lpfc hba data structure.
9744 * @rd_config: pointer to read config data
9746 * This routine is invoked to map the topology values as read
9747 * from the read config mailbox command. If the persistent
9748 * topology feature is supported, the firmware will provide the
9749 * saved topology information to be used in INIT_LINK
9752 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
9756 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
9757 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
9758 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
9760 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9761 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
9764 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9765 "2019 FW does not support persistent topology "
9766 "Using driver parameter defined value [%s]",
9767 lpfc_topo_to_str[phba->cfg_topology]);
9770 /* FW supports persistent topology - override module parameter value */
9771 phba->hba_flag |= HBA_PERSISTENT_TOPO;
9773 /* if ASIC_GEN_NUM >= 0xC) */
9774 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9775 LPFC_SLI_INTF_IF_TYPE_6) ||
9776 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
9777 LPFC_SLI_INTF_FAMILY_G6)) {
9779 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
9780 ? FLAGS_TOPOLOGY_MODE_LOOP
9781 : FLAGS_TOPOLOGY_MODE_PT_PT);
9783 phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
9787 /* If topology failover set - pt is '0' or '1' */
9788 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
9789 FLAGS_TOPOLOGY_MODE_LOOP_PT);
9791 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
9792 ? FLAGS_TOPOLOGY_MODE_PT_PT
9793 : FLAGS_TOPOLOGY_MODE_LOOP);
9796 if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
9797 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9798 "2020 Using persistent topology value [%s]",
9799 lpfc_topo_to_str[phba->cfg_topology]);
9801 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9802 "2021 Invalid topology values from FW "
9803 "Using driver parameter defined value [%s]",
9804 lpfc_topo_to_str[phba->cfg_topology]);
9809 * lpfc_sli4_read_config - Get the config parameters.
9810 * @phba: pointer to lpfc hba data structure.
9812 * This routine is invoked to read the configuration parameters from the HBA.
9813 * The configuration parameters are used to set the base and maximum values
9814 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
9815 * allocation for the port.
9819 * -ENOMEM - No available memory
9820 * -EIO - The mailbox failed to complete successfully.
9823 lpfc_sli4_read_config(struct lpfc_hba *phba)
9826 struct lpfc_mbx_read_config *rd_config;
9827 union lpfc_sli4_cfg_shdr *shdr;
9828 uint32_t shdr_status, shdr_add_status;
9829 struct lpfc_mbx_get_func_cfg *get_func_cfg;
9830 struct lpfc_rsrc_desc_fcfcoe *desc;
9832 uint16_t forced_link_speed;
9833 uint32_t if_type, qmin;
9834 int length, i, rc = 0, rc2;
9836 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9838 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9839 "2011 Unable to allocate memory for issuing "
9840 "SLI_CONFIG_SPECIAL mailbox command\n");
9844 lpfc_read_config(phba, pmb);
9846 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9847 if (rc != MBX_SUCCESS) {
9848 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9849 "2012 Mailbox failed , mbxCmd x%x "
9850 "READ_CONFIG, mbxStatus x%x\n",
9851 bf_get(lpfc_mqe_command, &pmb->u.mqe),
9852 bf_get(lpfc_mqe_status, &pmb->u.mqe));
9855 rd_config = &pmb->u.mqe.un.rd_config;
9856 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
9857 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
9858 phba->sli4_hba.lnk_info.lnk_tp =
9859 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
9860 phba->sli4_hba.lnk_info.lnk_no =
9861 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
9862 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9863 "3081 lnk_type:%d, lnk_numb:%d\n",
9864 phba->sli4_hba.lnk_info.lnk_tp,
9865 phba->sli4_hba.lnk_info.lnk_no);
9867 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9868 "3082 Mailbox (x%x) returned ldv:x0\n",
9869 bf_get(lpfc_mqe_command, &pmb->u.mqe));
9870 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
9871 phba->bbcredit_support = 1;
9872 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
9875 phba->sli4_hba.conf_trunk =
9876 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
9877 phba->sli4_hba.extents_in_use =
9878 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
9879 phba->sli4_hba.max_cfg_param.max_xri =
9880 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
9881 /* Reduce resource usage in kdump environment */
9882 if (is_kdump_kernel() &&
9883 phba->sli4_hba.max_cfg_param.max_xri > 512)
9884 phba->sli4_hba.max_cfg_param.max_xri = 512;
9885 phba->sli4_hba.max_cfg_param.xri_base =
9886 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
9887 phba->sli4_hba.max_cfg_param.max_vpi =
9888 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
9889 /* Limit the max we support */
9890 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
9891 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
9892 phba->sli4_hba.max_cfg_param.vpi_base =
9893 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
9894 phba->sli4_hba.max_cfg_param.max_rpi =
9895 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
9896 phba->sli4_hba.max_cfg_param.rpi_base =
9897 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
9898 phba->sli4_hba.max_cfg_param.max_vfi =
9899 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
9900 phba->sli4_hba.max_cfg_param.vfi_base =
9901 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
9902 phba->sli4_hba.max_cfg_param.max_fcfi =
9903 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
9904 phba->sli4_hba.max_cfg_param.max_eq =
9905 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
9906 phba->sli4_hba.max_cfg_param.max_rq =
9907 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
9908 phba->sli4_hba.max_cfg_param.max_wq =
9909 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
9910 phba->sli4_hba.max_cfg_param.max_cq =
9911 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
9912 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
9913 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
9914 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
9915 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
9916 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
9917 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
9918 phba->max_vports = phba->max_vpi;
9920 /* Next decide on FPIN or Signal E2E CGN support
9921 * For congestion alarms and warnings valid combination are:
9922 * 1. FPIN alarms / FPIN warnings
9923 * 2. Signal alarms / Signal warnings
9924 * 3. FPIN alarms / Signal warnings
9925 * 4. Signal alarms / FPIN warnings
9927 * Initialize the adapter frequency to 100 mSecs
9929 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
9930 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
9931 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
9933 if (lpfc_use_cgn_signal) {
9934 if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
9935 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
9936 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
9938 if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
9939 /* MUST support both alarm and warning
9940 * because EDC does not support alarm alone.
9942 if (phba->cgn_reg_signal !=
9943 EDC_CG_SIG_WARN_ONLY) {
9944 /* Must support both or none */
9945 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
9946 phba->cgn_reg_signal =
9947 EDC_CG_SIG_NOTSUPPORTED;
9949 phba->cgn_reg_signal =
9950 EDC_CG_SIG_WARN_ALARM;
9951 phba->cgn_reg_fpin =
9957 /* Set the congestion initial signal and fpin values. */
9958 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
9959 phba->cgn_init_reg_signal = phba->cgn_reg_signal;
9961 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
9962 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
9963 phba->cgn_reg_signal, phba->cgn_reg_fpin);
9965 lpfc_map_topology(phba, rd_config);
9966 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9967 "2003 cfg params Extents? %d "
9972 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
9973 phba->sli4_hba.extents_in_use,
9974 phba->sli4_hba.max_cfg_param.xri_base,
9975 phba->sli4_hba.max_cfg_param.max_xri,
9976 phba->sli4_hba.max_cfg_param.vpi_base,
9977 phba->sli4_hba.max_cfg_param.max_vpi,
9978 phba->sli4_hba.max_cfg_param.vfi_base,
9979 phba->sli4_hba.max_cfg_param.max_vfi,
9980 phba->sli4_hba.max_cfg_param.rpi_base,
9981 phba->sli4_hba.max_cfg_param.max_rpi,
9982 phba->sli4_hba.max_cfg_param.max_fcfi,
9983 phba->sli4_hba.max_cfg_param.max_eq,
9984 phba->sli4_hba.max_cfg_param.max_cq,
9985 phba->sli4_hba.max_cfg_param.max_wq,
9986 phba->sli4_hba.max_cfg_param.max_rq,
9990 * Calculate queue resources based on how
9991 * many WQ/CQ/EQs are available.
9993 qmin = phba->sli4_hba.max_cfg_param.max_wq;
9994 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
9995 qmin = phba->sli4_hba.max_cfg_param.max_cq;
9996 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
9997 qmin = phba->sli4_hba.max_cfg_param.max_eq;
9999 * Whats left after this can go toward NVME / FCP.
10000 * The minus 4 accounts for ELS, NVME LS, MBOX
10001 * plus one extra. When configured for
10002 * NVMET, FCP io channel WQs are not created.
10006 /* Check to see if there is enough for NVME */
10007 if ((phba->cfg_irq_chann > qmin) ||
10008 (phba->cfg_hdw_queue > qmin)) {
10009 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10010 "2005 Reducing Queues - "
10011 "FW resource limitation: "
10012 "WQ %d CQ %d EQ %d: min %d: "
10013 "IRQ %d HDWQ %d\n",
10014 phba->sli4_hba.max_cfg_param.max_wq,
10015 phba->sli4_hba.max_cfg_param.max_cq,
10016 phba->sli4_hba.max_cfg_param.max_eq,
10017 qmin, phba->cfg_irq_chann,
10018 phba->cfg_hdw_queue);
10020 if (phba->cfg_irq_chann > qmin)
10021 phba->cfg_irq_chann = qmin;
10022 if (phba->cfg_hdw_queue > qmin)
10023 phba->cfg_hdw_queue = qmin;
10030 /* Update link speed if forced link speed is supported */
10031 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10032 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10033 forced_link_speed =
10034 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
10035 if (forced_link_speed) {
10036 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
10038 switch (forced_link_speed) {
10039 case LINK_SPEED_1G:
10040 phba->cfg_link_speed =
10041 LPFC_USER_LINK_SPEED_1G;
10043 case LINK_SPEED_2G:
10044 phba->cfg_link_speed =
10045 LPFC_USER_LINK_SPEED_2G;
10047 case LINK_SPEED_4G:
10048 phba->cfg_link_speed =
10049 LPFC_USER_LINK_SPEED_4G;
10051 case LINK_SPEED_8G:
10052 phba->cfg_link_speed =
10053 LPFC_USER_LINK_SPEED_8G;
10055 case LINK_SPEED_10G:
10056 phba->cfg_link_speed =
10057 LPFC_USER_LINK_SPEED_10G;
10059 case LINK_SPEED_16G:
10060 phba->cfg_link_speed =
10061 LPFC_USER_LINK_SPEED_16G;
10063 case LINK_SPEED_32G:
10064 phba->cfg_link_speed =
10065 LPFC_USER_LINK_SPEED_32G;
10067 case LINK_SPEED_64G:
10068 phba->cfg_link_speed =
10069 LPFC_USER_LINK_SPEED_64G;
10072 phba->cfg_link_speed =
10073 LPFC_USER_LINK_SPEED_AUTO;
10076 lpfc_printf_log(phba, KERN_ERR,
10078 "0047 Unrecognized link "
10080 forced_link_speed);
10081 phba->cfg_link_speed =
10082 LPFC_USER_LINK_SPEED_AUTO;
10087 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
10088 length = phba->sli4_hba.max_cfg_param.max_xri -
10089 lpfc_sli4_get_els_iocb_cnt(phba);
10090 if (phba->cfg_hba_queue_depth > length) {
10091 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10092 "3361 HBA queue depth changed from %d to %d\n",
10093 phba->cfg_hba_queue_depth, length);
10094 phba->cfg_hba_queue_depth = length;
10097 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
10098 LPFC_SLI_INTF_IF_TYPE_2)
10101 /* get the pf# and vf# for SLI4 if_type 2 port */
10102 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
10103 sizeof(struct lpfc_sli4_cfg_mhdr));
10104 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
10105 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
10106 length, LPFC_SLI4_MBX_EMBED);
10108 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10109 shdr = (union lpfc_sli4_cfg_shdr *)
10110 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
10111 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10112 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10113 if (rc2 || shdr_status || shdr_add_status) {
10114 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10115 "3026 Mailbox failed , mbxCmd x%x "
10116 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
10117 bf_get(lpfc_mqe_command, &pmb->u.mqe),
10118 bf_get(lpfc_mqe_status, &pmb->u.mqe));
10122 /* search for fc_fcoe resrouce descriptor */
10123 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
10125 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
10126 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
10127 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
10128 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
10129 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
10130 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
10133 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
10134 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
10135 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
10136 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
10137 phba->sli4_hba.iov.pf_number =
10138 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
10139 phba->sli4_hba.iov.vf_number =
10140 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
10145 if (i < LPFC_RSRC_DESC_MAX_NUM)
10146 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10147 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
10148 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
10149 phba->sli4_hba.iov.vf_number);
10151 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10152 "3028 GET_FUNCTION_CONFIG: failed to find "
10153 "Resource Descriptor:x%x\n",
10154 LPFC_RSRC_DESC_TYPE_FCFCOE);
10157 mempool_free(pmb, phba->mbox_mem_pool);
10162 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
10163 * @phba: pointer to lpfc hba data structure.
10165 * This routine is invoked to setup the port-side endian order when
10166 * the port if_type is 0. This routine has no function for other
10171 * -ENOMEM - No available memory
10172 * -EIO - The mailbox failed to complete successfully.
10175 lpfc_setup_endian_order(struct lpfc_hba *phba)
10177 LPFC_MBOXQ_t *mboxq;
10178 uint32_t if_type, rc = 0;
10179 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
10180 HOST_ENDIAN_HIGH_WORD1};
10182 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10184 case LPFC_SLI_INTF_IF_TYPE_0:
10185 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10188 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10189 "0492 Unable to allocate memory for "
10190 "issuing SLI_CONFIG_SPECIAL mailbox "
10196 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
10197 * two words to contain special data values and no other data.
10199 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
10200 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
10201 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10202 if (rc != MBX_SUCCESS) {
10203 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10204 "0493 SLI_CONFIG_SPECIAL mailbox "
10205 "failed with status x%x\n",
10209 mempool_free(mboxq, phba->mbox_mem_pool);
10211 case LPFC_SLI_INTF_IF_TYPE_6:
10212 case LPFC_SLI_INTF_IF_TYPE_2:
10213 case LPFC_SLI_INTF_IF_TYPE_1:
10221 * lpfc_sli4_queue_verify - Verify and update EQ counts
10222 * @phba: pointer to lpfc hba data structure.
10224 * This routine is invoked to check the user settable queue counts for EQs.
10225 * After this routine is called the counts will be set to valid values that
10226 * adhere to the constraints of the system's interrupt vectors and the port's
10231 * -ENOMEM - No available memory
10234 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
10237 * Sanity check for configured queue parameters against the run-time
10238 * device parameters
10241 if (phba->nvmet_support) {
10242 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
10243 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
10244 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
10245 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
10248 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10249 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
10250 phba->cfg_hdw_queue, phba->cfg_irq_chann,
10251 phba->cfg_nvmet_mrq);
10253 /* Get EQ depth from module parameter, fake the default for now */
10254 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10255 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10257 /* Get CQ depth from module parameter, fake the default for now */
10258 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10259 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10264 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
10266 struct lpfc_queue *qdesc;
10270 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
10271 /* Create Fast Path IO CQs */
10272 if (phba->enab_exp_wqcq_pages)
10273 /* Increase the CQ size when WQEs contain an embedded cdb */
10274 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10275 phba->sli4_hba.cq_esize,
10276 LPFC_CQE_EXP_COUNT, cpu);
10279 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10280 phba->sli4_hba.cq_esize,
10281 phba->sli4_hba.cq_ecount, cpu);
10283 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10284 "0499 Failed allocate fast-path IO CQ (%d)\n",
10288 qdesc->qe_valid = 1;
10290 qdesc->chann = cpu;
10291 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
10293 /* Create Fast Path IO WQs */
10294 if (phba->enab_exp_wqcq_pages) {
10295 /* Increase the WQ size when WQEs contain an embedded cdb */
10296 wqesize = (phba->fcp_embed_io) ?
10297 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10298 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10300 LPFC_WQE_EXP_COUNT, cpu);
10302 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10303 phba->sli4_hba.wq_esize,
10304 phba->sli4_hba.wq_ecount, cpu);
10307 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10308 "0503 Failed allocate fast-path IO WQ (%d)\n",
10313 qdesc->chann = cpu;
10314 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
10315 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10320 * lpfc_sli4_queue_create - Create all the SLI4 queues
10321 * @phba: pointer to lpfc hba data structure.
10323 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
10324 * operation. For each SLI4 queue type, the parameters such as queue entry
10325 * count (queue depth) shall be taken from the module parameter. For now,
10326 * we just use some constant number as place holder.
10330 * -ENOMEM - No availble memory
10331 * -EIO - The mailbox failed to complete successfully.
10334 lpfc_sli4_queue_create(struct lpfc_hba *phba)
10336 struct lpfc_queue *qdesc;
10337 int idx, cpu, eqcpu;
10338 struct lpfc_sli4_hdw_queue *qp;
10339 struct lpfc_vector_map_info *cpup;
10340 struct lpfc_vector_map_info *eqcpup;
10341 struct lpfc_eq_intr_info *eqi;
10344 * Create HBA Record arrays.
10345 * Both NVME and FCP will share that same vectors / EQs
10347 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
10348 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
10349 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
10350 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
10351 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
10352 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
10353 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10354 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10355 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10356 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10358 if (!phba->sli4_hba.hdwq) {
10359 phba->sli4_hba.hdwq = kcalloc(
10360 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
10362 if (!phba->sli4_hba.hdwq) {
10363 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10364 "6427 Failed allocate memory for "
10365 "fast-path Hardware Queue array\n");
10368 /* Prepare hardware queues to take IO buffers */
10369 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10370 qp = &phba->sli4_hba.hdwq[idx];
10371 spin_lock_init(&qp->io_buf_list_get_lock);
10372 spin_lock_init(&qp->io_buf_list_put_lock);
10373 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
10374 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
10375 qp->get_io_bufs = 0;
10376 qp->put_io_bufs = 0;
10377 qp->total_io_bufs = 0;
10378 spin_lock_init(&qp->abts_io_buf_list_lock);
10379 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
10380 qp->abts_scsi_io_bufs = 0;
10381 qp->abts_nvme_io_bufs = 0;
10382 INIT_LIST_HEAD(&qp->sgl_list);
10383 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
10384 spin_lock_init(&qp->hdwq_lock);
10388 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10389 if (phba->nvmet_support) {
10390 phba->sli4_hba.nvmet_cqset = kcalloc(
10391 phba->cfg_nvmet_mrq,
10392 sizeof(struct lpfc_queue *),
10394 if (!phba->sli4_hba.nvmet_cqset) {
10395 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10396 "3121 Fail allocate memory for "
10397 "fast-path CQ set array\n");
10400 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
10401 phba->cfg_nvmet_mrq,
10402 sizeof(struct lpfc_queue *),
10404 if (!phba->sli4_hba.nvmet_mrq_hdr) {
10405 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10406 "3122 Fail allocate memory for "
10407 "fast-path RQ set hdr array\n");
10410 phba->sli4_hba.nvmet_mrq_data = kcalloc(
10411 phba->cfg_nvmet_mrq,
10412 sizeof(struct lpfc_queue *),
10414 if (!phba->sli4_hba.nvmet_mrq_data) {
10415 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10416 "3124 Fail allocate memory for "
10417 "fast-path RQ set data array\n");
10423 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10425 /* Create HBA Event Queues (EQs) */
10426 for_each_present_cpu(cpu) {
10427 /* We only want to create 1 EQ per vector, even though
10428 * multiple CPUs might be using that vector. so only
10429 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
10431 cpup = &phba->sli4_hba.cpu_map[cpu];
10432 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10435 /* Get a ptr to the Hardware Queue associated with this CPU */
10436 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10438 /* Allocate an EQ */
10439 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10440 phba->sli4_hba.eq_esize,
10441 phba->sli4_hba.eq_ecount, cpu);
10443 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10444 "0497 Failed allocate EQ (%d)\n",
10448 qdesc->qe_valid = 1;
10449 qdesc->hdwq = cpup->hdwq;
10450 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
10451 qdesc->last_cpu = qdesc->chann;
10453 /* Save the allocated EQ in the Hardware Queue */
10454 qp->hba_eq = qdesc;
10456 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
10457 list_add(&qdesc->cpu_list, &eqi->list);
10460 /* Now we need to populate the other Hardware Queues, that share
10461 * an IRQ vector, with the associated EQ ptr.
10463 for_each_present_cpu(cpu) {
10464 cpup = &phba->sli4_hba.cpu_map[cpu];
10466 /* Check for EQ already allocated in previous loop */
10467 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10470 /* Check for multiple CPUs per hdwq */
10471 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10475 /* We need to share an EQ for this hdwq */
10476 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
10477 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
10478 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
10481 /* Allocate IO Path SLI4 CQ/WQs */
10482 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10483 if (lpfc_alloc_io_wq_cq(phba, idx))
10487 if (phba->nvmet_support) {
10488 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10489 cpu = lpfc_find_cpu_handle(phba, idx,
10490 LPFC_FIND_BY_HDWQ);
10491 qdesc = lpfc_sli4_queue_alloc(phba,
10492 LPFC_DEFAULT_PAGE_SIZE,
10493 phba->sli4_hba.cq_esize,
10494 phba->sli4_hba.cq_ecount,
10497 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10498 "3142 Failed allocate NVME "
10499 "CQ Set (%d)\n", idx);
10502 qdesc->qe_valid = 1;
10504 qdesc->chann = cpu;
10505 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
10510 * Create Slow Path Completion Queues (CQs)
10513 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
10514 /* Create slow-path Mailbox Command Complete Queue */
10515 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10516 phba->sli4_hba.cq_esize,
10517 phba->sli4_hba.cq_ecount, cpu);
10519 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10520 "0500 Failed allocate slow-path mailbox CQ\n");
10523 qdesc->qe_valid = 1;
10524 phba->sli4_hba.mbx_cq = qdesc;
10526 /* Create slow-path ELS Complete Queue */
10527 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10528 phba->sli4_hba.cq_esize,
10529 phba->sli4_hba.cq_ecount, cpu);
10531 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10532 "0501 Failed allocate slow-path ELS CQ\n");
10535 qdesc->qe_valid = 1;
10536 qdesc->chann = cpu;
10537 phba->sli4_hba.els_cq = qdesc;
10541 * Create Slow Path Work Queues (WQs)
10544 /* Create Mailbox Command Queue */
10546 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10547 phba->sli4_hba.mq_esize,
10548 phba->sli4_hba.mq_ecount, cpu);
10550 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10551 "0505 Failed allocate slow-path MQ\n");
10554 qdesc->chann = cpu;
10555 phba->sli4_hba.mbx_wq = qdesc;
10558 * Create ELS Work Queues
10561 /* Create slow-path ELS Work Queue */
10562 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10563 phba->sli4_hba.wq_esize,
10564 phba->sli4_hba.wq_ecount, cpu);
10566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10567 "0504 Failed allocate slow-path ELS WQ\n");
10570 qdesc->chann = cpu;
10571 phba->sli4_hba.els_wq = qdesc;
10572 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10574 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10575 /* Create NVME LS Complete Queue */
10576 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10577 phba->sli4_hba.cq_esize,
10578 phba->sli4_hba.cq_ecount, cpu);
10580 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10581 "6079 Failed allocate NVME LS CQ\n");
10584 qdesc->chann = cpu;
10585 qdesc->qe_valid = 1;
10586 phba->sli4_hba.nvmels_cq = qdesc;
10588 /* Create NVME LS Work Queue */
10589 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10590 phba->sli4_hba.wq_esize,
10591 phba->sli4_hba.wq_ecount, cpu);
10593 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10594 "6080 Failed allocate NVME LS WQ\n");
10597 qdesc->chann = cpu;
10598 phba->sli4_hba.nvmels_wq = qdesc;
10599 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10603 * Create Receive Queue (RQ)
10606 /* Create Receive Queue for header */
10607 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10608 phba->sli4_hba.rq_esize,
10609 phba->sli4_hba.rq_ecount, cpu);
10611 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10612 "0506 Failed allocate receive HRQ\n");
10615 phba->sli4_hba.hdr_rq = qdesc;
10617 /* Create Receive Queue for data */
10618 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10619 phba->sli4_hba.rq_esize,
10620 phba->sli4_hba.rq_ecount, cpu);
10622 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10623 "0507 Failed allocate receive DRQ\n");
10626 phba->sli4_hba.dat_rq = qdesc;
10628 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
10629 phba->nvmet_support) {
10630 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10631 cpu = lpfc_find_cpu_handle(phba, idx,
10632 LPFC_FIND_BY_HDWQ);
10633 /* Create NVMET Receive Queue for header */
10634 qdesc = lpfc_sli4_queue_alloc(phba,
10635 LPFC_DEFAULT_PAGE_SIZE,
10636 phba->sli4_hba.rq_esize,
10637 LPFC_NVMET_RQE_DEF_COUNT,
10640 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10641 "3146 Failed allocate "
10646 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
10648 /* Only needed for header of RQ pair */
10649 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
10652 if (qdesc->rqbp == NULL) {
10653 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10654 "6131 Failed allocate "
10659 /* Put list in known state in case driver load fails. */
10660 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
10662 /* Create NVMET Receive Queue for data */
10663 qdesc = lpfc_sli4_queue_alloc(phba,
10664 LPFC_DEFAULT_PAGE_SIZE,
10665 phba->sli4_hba.rq_esize,
10666 LPFC_NVMET_RQE_DEF_COUNT,
10669 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10670 "3156 Failed allocate "
10675 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
10679 /* Clear NVME stats */
10680 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10681 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10682 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
10683 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
10687 /* Clear SCSI stats */
10688 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
10689 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10690 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
10691 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
10698 lpfc_sli4_queue_destroy(phba);
10703 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
10706 lpfc_sli4_queue_free(*qp);
10712 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
10719 for (idx = 0; idx < max; idx++)
10720 __lpfc_sli4_release_queue(&(*qs)[idx]);
10727 lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
10729 struct lpfc_sli4_hdw_queue *hdwq;
10730 struct lpfc_queue *eq;
10733 hdwq = phba->sli4_hba.hdwq;
10735 /* Loop thru all Hardware Queues */
10736 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10737 /* Free the CQ/WQ corresponding to the Hardware Queue */
10738 lpfc_sli4_queue_free(hdwq[idx].io_cq);
10739 lpfc_sli4_queue_free(hdwq[idx].io_wq);
10740 hdwq[idx].hba_eq = NULL;
10741 hdwq[idx].io_cq = NULL;
10742 hdwq[idx].io_wq = NULL;
10743 if (phba->cfg_xpsgl && !phba->nvmet_support)
10744 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
10745 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
10747 /* Loop thru all IRQ vectors */
10748 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10749 /* Free the EQ corresponding to the IRQ vector */
10750 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
10751 lpfc_sli4_queue_free(eq);
10752 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
10757 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
10758 * @phba: pointer to lpfc hba data structure.
10760 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
10765 * -ENOMEM - No available memory
10766 * -EIO - The mailbox failed to complete successfully.
10769 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
10772 * Set FREE_INIT before beginning to free the queues.
10773 * Wait until the users of queues to acknowledge to
10774 * release queues by clearing FREE_WAIT.
10776 spin_lock_irq(&phba->hbalock);
10777 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
10778 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
10779 spin_unlock_irq(&phba->hbalock);
10781 spin_lock_irq(&phba->hbalock);
10783 spin_unlock_irq(&phba->hbalock);
10785 lpfc_sli4_cleanup_poll_list(phba);
10787 /* Release HBA eqs */
10788 if (phba->sli4_hba.hdwq)
10789 lpfc_sli4_release_hdwq(phba);
10791 if (phba->nvmet_support) {
10792 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
10793 phba->cfg_nvmet_mrq);
10795 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
10796 phba->cfg_nvmet_mrq);
10797 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
10798 phba->cfg_nvmet_mrq);
10801 /* Release mailbox command work queue */
10802 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
10804 /* Release ELS work queue */
10805 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
10807 /* Release ELS work queue */
10808 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
10810 /* Release unsolicited receive queue */
10811 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
10812 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
10814 /* Release ELS complete queue */
10815 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
10817 /* Release NVME LS complete queue */
10818 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
10820 /* Release mailbox command complete queue */
10821 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
10823 /* Everything on this list has been freed */
10824 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10826 /* Done with freeing the queues */
10827 spin_lock_irq(&phba->hbalock);
10828 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
10829 spin_unlock_irq(&phba->hbalock);
10833 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
10835 struct lpfc_rqb *rqbp;
10836 struct lpfc_dmabuf *h_buf;
10837 struct rqb_dmabuf *rqb_buffer;
10840 while (!list_empty(&rqbp->rqb_buffer_list)) {
10841 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
10842 struct lpfc_dmabuf, list);
10844 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
10845 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
10846 rqbp->buffer_count--;
10852 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
10853 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
10854 int qidx, uint32_t qtype)
10856 struct lpfc_sli_ring *pring;
10859 if (!eq || !cq || !wq) {
10860 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10861 "6085 Fast-path %s (%d) not allocated\n",
10862 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
10866 /* create the Cq first */
10867 rc = lpfc_cq_create(phba, cq, eq,
10868 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
10870 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10871 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
10872 qidx, (uint32_t)rc);
10876 if (qtype != LPFC_MBOX) {
10877 /* Setup cq_map for fast lookup */
10879 *cq_map = cq->queue_id;
10881 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10882 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
10883 qidx, cq->queue_id, qidx, eq->queue_id);
10885 /* create the wq */
10886 rc = lpfc_wq_create(phba, wq, cq, qtype);
10888 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10889 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
10890 qidx, (uint32_t)rc);
10891 /* no need to tear down cq - caller will do so */
10895 /* Bind this CQ/WQ to the NVME ring */
10897 pring->sli.sli4.wqp = (void *)wq;
10900 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10901 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
10902 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
10904 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
10906 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10907 "0539 Failed setup of slow-path MQ: "
10908 "rc = 0x%x\n", rc);
10909 /* no need to tear down cq - caller will do so */
10913 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10914 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
10915 phba->sli4_hba.mbx_wq->queue_id,
10916 phba->sli4_hba.mbx_cq->queue_id);
10923 * lpfc_setup_cq_lookup - Setup the CQ lookup table
10924 * @phba: pointer to lpfc hba data structure.
10926 * This routine will populate the cq_lookup table by all
10927 * available CQ queue_id's.
10930 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
10932 struct lpfc_queue *eq, *childq;
10935 memset(phba->sli4_hba.cq_lookup, 0,
10936 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
10937 /* Loop thru all IRQ vectors */
10938 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
10939 /* Get the EQ corresponding to the IRQ vector */
10940 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
10943 /* Loop through all CQs associated with that EQ */
10944 list_for_each_entry(childq, &eq->child_list, list) {
10945 if (childq->queue_id > phba->sli4_hba.cq_max)
10947 if (childq->subtype == LPFC_IO)
10948 phba->sli4_hba.cq_lookup[childq->queue_id] =
10955 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
10956 * @phba: pointer to lpfc hba data structure.
10958 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
10963 * -ENOMEM - No available memory
10964 * -EIO - The mailbox failed to complete successfully.
10967 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
10969 uint32_t shdr_status, shdr_add_status;
10970 union lpfc_sli4_cfg_shdr *shdr;
10971 struct lpfc_vector_map_info *cpup;
10972 struct lpfc_sli4_hdw_queue *qp;
10973 LPFC_MBOXQ_t *mboxq;
10975 uint32_t length, usdelay;
10978 /* Check for dual-ULP support */
10979 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10981 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10982 "3249 Unable to allocate memory for "
10983 "QUERY_FW_CFG mailbox command\n");
10986 length = (sizeof(struct lpfc_mbx_query_fw_config) -
10987 sizeof(struct lpfc_sli4_cfg_mhdr));
10988 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10989 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
10990 length, LPFC_SLI4_MBX_EMBED);
10992 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10994 shdr = (union lpfc_sli4_cfg_shdr *)
10995 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10996 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10997 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10998 if (shdr_status || shdr_add_status || rc) {
10999 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11000 "3250 QUERY_FW_CFG mailbox failed with status "
11001 "x%x add_status x%x, mbx status x%x\n",
11002 shdr_status, shdr_add_status, rc);
11003 mempool_free(mboxq, phba->mbox_mem_pool);
11008 phba->sli4_hba.fw_func_mode =
11009 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
11010 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
11011 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
11012 phba->sli4_hba.physical_port =
11013 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
11014 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11015 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
11016 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
11017 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
11019 mempool_free(mboxq, phba->mbox_mem_pool);
11022 * Set up HBA Event Queues (EQs)
11024 qp = phba->sli4_hba.hdwq;
11026 /* Set up HBA event queue */
11028 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11029 "3147 Fast-path EQs not allocated\n");
11034 /* Loop thru all IRQ vectors */
11035 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11036 /* Create HBA Event Queues (EQs) in order */
11037 for_each_present_cpu(cpu) {
11038 cpup = &phba->sli4_hba.cpu_map[cpu];
11040 /* Look for the CPU thats using that vector with
11041 * LPFC_CPU_FIRST_IRQ set.
11043 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11045 if (qidx != cpup->eq)
11048 /* Create an EQ for that vector */
11049 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
11050 phba->cfg_fcp_imax);
11052 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11053 "0523 Failed setup of fast-path"
11054 " EQ (%d), rc = 0x%x\n",
11055 cpup->eq, (uint32_t)rc);
11059 /* Save the EQ for that vector in the hba_eq_hdl */
11060 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
11061 qp[cpup->hdwq].hba_eq;
11063 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11064 "2584 HBA EQ setup: queue[%d]-id=%d\n",
11066 qp[cpup->hdwq].hba_eq->queue_id);
11070 /* Loop thru all Hardware Queues */
11071 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11072 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
11073 cpup = &phba->sli4_hba.cpu_map[cpu];
11075 /* Create the CQ/WQ corresponding to the Hardware Queue */
11076 rc = lpfc_create_wq_cq(phba,
11077 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
11080 &phba->sli4_hba.hdwq[qidx].io_cq_map,
11084 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11085 "0535 Failed to setup fastpath "
11086 "IO WQ/CQ (%d), rc = 0x%x\n",
11087 qidx, (uint32_t)rc);
11093 * Set up Slow Path Complete Queues (CQs)
11096 /* Set up slow-path MBOX CQ/MQ */
11098 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
11099 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11100 "0528 %s not allocated\n",
11101 phba->sli4_hba.mbx_cq ?
11102 "Mailbox WQ" : "Mailbox CQ");
11107 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11108 phba->sli4_hba.mbx_cq,
11109 phba->sli4_hba.mbx_wq,
11110 NULL, 0, LPFC_MBOX);
11112 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11113 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
11117 if (phba->nvmet_support) {
11118 if (!phba->sli4_hba.nvmet_cqset) {
11119 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11120 "3165 Fast-path NVME CQ Set "
11121 "array not allocated\n");
11125 if (phba->cfg_nvmet_mrq > 1) {
11126 rc = lpfc_cq_create_set(phba,
11127 phba->sli4_hba.nvmet_cqset,
11129 LPFC_WCQ, LPFC_NVMET);
11131 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11132 "3164 Failed setup of NVME CQ "
11133 "Set, rc = 0x%x\n",
11138 /* Set up NVMET Receive Complete Queue */
11139 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
11141 LPFC_WCQ, LPFC_NVMET);
11143 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11144 "6089 Failed setup NVMET CQ: "
11145 "rc = 0x%x\n", (uint32_t)rc);
11148 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
11150 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11151 "6090 NVMET CQ setup: cq-id=%d, "
11152 "parent eq-id=%d\n",
11153 phba->sli4_hba.nvmet_cqset[0]->queue_id,
11154 qp[0].hba_eq->queue_id);
11158 /* Set up slow-path ELS WQ/CQ */
11159 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
11160 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11161 "0530 ELS %s not allocated\n",
11162 phba->sli4_hba.els_cq ? "WQ" : "CQ");
11166 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11167 phba->sli4_hba.els_cq,
11168 phba->sli4_hba.els_wq,
11169 NULL, 0, LPFC_ELS);
11171 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11172 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
11176 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11177 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
11178 phba->sli4_hba.els_wq->queue_id,
11179 phba->sli4_hba.els_cq->queue_id);
11181 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11182 /* Set up NVME LS Complete Queue */
11183 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
11184 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11185 "6091 LS %s not allocated\n",
11186 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
11190 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11191 phba->sli4_hba.nvmels_cq,
11192 phba->sli4_hba.nvmels_wq,
11193 NULL, 0, LPFC_NVME_LS);
11195 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11196 "0526 Failed setup of NVVME LS WQ/CQ: "
11197 "rc = 0x%x\n", (uint32_t)rc);
11201 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11202 "6096 ELS WQ setup: wq-id=%d, "
11203 "parent cq-id=%d\n",
11204 phba->sli4_hba.nvmels_wq->queue_id,
11205 phba->sli4_hba.nvmels_cq->queue_id);
11209 * Create NVMET Receive Queue (RQ)
11211 if (phba->nvmet_support) {
11212 if ((!phba->sli4_hba.nvmet_cqset) ||
11213 (!phba->sli4_hba.nvmet_mrq_hdr) ||
11214 (!phba->sli4_hba.nvmet_mrq_data)) {
11215 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11216 "6130 MRQ CQ Queues not "
11221 if (phba->cfg_nvmet_mrq > 1) {
11222 rc = lpfc_mrq_create(phba,
11223 phba->sli4_hba.nvmet_mrq_hdr,
11224 phba->sli4_hba.nvmet_mrq_data,
11225 phba->sli4_hba.nvmet_cqset,
11228 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11229 "6098 Failed setup of NVMET "
11230 "MRQ: rc = 0x%x\n",
11236 rc = lpfc_rq_create(phba,
11237 phba->sli4_hba.nvmet_mrq_hdr[0],
11238 phba->sli4_hba.nvmet_mrq_data[0],
11239 phba->sli4_hba.nvmet_cqset[0],
11242 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11243 "6057 Failed setup of NVMET "
11244 "Receive Queue: rc = 0x%x\n",
11250 phba, KERN_INFO, LOG_INIT,
11251 "6099 NVMET RQ setup: hdr-rq-id=%d, "
11252 "dat-rq-id=%d parent cq-id=%d\n",
11253 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
11254 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
11255 phba->sli4_hba.nvmet_cqset[0]->queue_id);
11260 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
11261 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11262 "0540 Receive Queue not allocated\n");
11267 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
11268 phba->sli4_hba.els_cq, LPFC_USOL);
11270 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11271 "0541 Failed setup of Receive Queue: "
11272 "rc = 0x%x\n", (uint32_t)rc);
11276 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11277 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
11278 "parent cq-id=%d\n",
11279 phba->sli4_hba.hdr_rq->queue_id,
11280 phba->sli4_hba.dat_rq->queue_id,
11281 phba->sli4_hba.els_cq->queue_id);
11283 if (phba->cfg_fcp_imax)
11284 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
11288 for (qidx = 0; qidx < phba->cfg_irq_chann;
11289 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
11290 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
11293 if (phba->sli4_hba.cq_max) {
11294 kfree(phba->sli4_hba.cq_lookup);
11295 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
11296 sizeof(struct lpfc_queue *), GFP_KERNEL);
11297 if (!phba->sli4_hba.cq_lookup) {
11298 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11299 "0549 Failed setup of CQ Lookup table: "
11300 "size 0x%x\n", phba->sli4_hba.cq_max);
11304 lpfc_setup_cq_lookup(phba);
11309 lpfc_sli4_queue_unset(phba);
11315 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
11316 * @phba: pointer to lpfc hba data structure.
11318 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
11323 * -ENOMEM - No available memory
11324 * -EIO - The mailbox failed to complete successfully.
11327 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
11329 struct lpfc_sli4_hdw_queue *qp;
11330 struct lpfc_queue *eq;
11333 /* Unset mailbox command work queue */
11334 if (phba->sli4_hba.mbx_wq)
11335 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
11337 /* Unset NVME LS work queue */
11338 if (phba->sli4_hba.nvmels_wq)
11339 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
11341 /* Unset ELS work queue */
11342 if (phba->sli4_hba.els_wq)
11343 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
11345 /* Unset unsolicited receive queue */
11346 if (phba->sli4_hba.hdr_rq)
11347 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
11348 phba->sli4_hba.dat_rq);
11350 /* Unset mailbox command complete queue */
11351 if (phba->sli4_hba.mbx_cq)
11352 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
11354 /* Unset ELS complete queue */
11355 if (phba->sli4_hba.els_cq)
11356 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
11358 /* Unset NVME LS complete queue */
11359 if (phba->sli4_hba.nvmels_cq)
11360 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
11362 if (phba->nvmet_support) {
11363 /* Unset NVMET MRQ queue */
11364 if (phba->sli4_hba.nvmet_mrq_hdr) {
11365 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11368 phba->sli4_hba.nvmet_mrq_hdr[qidx],
11369 phba->sli4_hba.nvmet_mrq_data[qidx]);
11372 /* Unset NVMET CQ Set complete queue */
11373 if (phba->sli4_hba.nvmet_cqset) {
11374 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11376 phba, phba->sli4_hba.nvmet_cqset[qidx]);
11380 /* Unset fast-path SLI4 queues */
11381 if (phba->sli4_hba.hdwq) {
11382 /* Loop thru all Hardware Queues */
11383 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11384 /* Destroy the CQ/WQ corresponding to Hardware Queue */
11385 qp = &phba->sli4_hba.hdwq[qidx];
11386 lpfc_wq_destroy(phba, qp->io_wq);
11387 lpfc_cq_destroy(phba, qp->io_cq);
11389 /* Loop thru all IRQ vectors */
11390 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11391 /* Destroy the EQ corresponding to the IRQ vector */
11392 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11393 lpfc_eq_destroy(phba, eq);
11397 kfree(phba->sli4_hba.cq_lookup);
11398 phba->sli4_hba.cq_lookup = NULL;
11399 phba->sli4_hba.cq_max = 0;
11403 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
11404 * @phba: pointer to lpfc hba data structure.
11406 * This routine is invoked to allocate and set up a pool of completion queue
11407 * events. The body of the completion queue event is a completion queue entry
11408 * CQE. For now, this pool is used for the interrupt service routine to queue
11409 * the following HBA completion queue events for the worker thread to process:
11410 * - Mailbox asynchronous events
11411 * - Receive queue completion unsolicited events
11412 * Later, this can be used for all the slow-path events.
11416 * -ENOMEM - No available memory
11419 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
11421 struct lpfc_cq_event *cq_event;
11424 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
11425 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
11427 goto out_pool_create_fail;
11428 list_add_tail(&cq_event->list,
11429 &phba->sli4_hba.sp_cqe_event_pool);
11433 out_pool_create_fail:
11434 lpfc_sli4_cq_event_pool_destroy(phba);
11439 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
11440 * @phba: pointer to lpfc hba data structure.
11442 * This routine is invoked to free the pool of completion queue events at
11443 * driver unload time. Note that, it is the responsibility of the driver
11444 * cleanup routine to free all the outstanding completion-queue events
11445 * allocated from this pool back into the pool before invoking this routine
11446 * to destroy the pool.
11449 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
11451 struct lpfc_cq_event *cq_event, *next_cq_event;
11453 list_for_each_entry_safe(cq_event, next_cq_event,
11454 &phba->sli4_hba.sp_cqe_event_pool, list) {
11455 list_del(&cq_event->list);
11461 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11462 * @phba: pointer to lpfc hba data structure.
11464 * This routine is the lock free version of the API invoked to allocate a
11465 * completion-queue event from the free pool.
11467 * Return: Pointer to the newly allocated completion-queue event if successful
11470 struct lpfc_cq_event *
11471 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11473 struct lpfc_cq_event *cq_event = NULL;
11475 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
11476 struct lpfc_cq_event, list);
11481 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11482 * @phba: pointer to lpfc hba data structure.
11484 * This routine is the lock version of the API invoked to allocate a
11485 * completion-queue event from the free pool.
11487 * Return: Pointer to the newly allocated completion-queue event if successful
11490 struct lpfc_cq_event *
11491 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11493 struct lpfc_cq_event *cq_event;
11494 unsigned long iflags;
11496 spin_lock_irqsave(&phba->hbalock, iflags);
11497 cq_event = __lpfc_sli4_cq_event_alloc(phba);
11498 spin_unlock_irqrestore(&phba->hbalock, iflags);
11503 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11504 * @phba: pointer to lpfc hba data structure.
11505 * @cq_event: pointer to the completion queue event to be freed.
11507 * This routine is the lock free version of the API invoked to release a
11508 * completion-queue event back into the free pool.
11511 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11512 struct lpfc_cq_event *cq_event)
11514 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
11518 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11519 * @phba: pointer to lpfc hba data structure.
11520 * @cq_event: pointer to the completion queue event to be freed.
11522 * This routine is the lock version of the API invoked to release a
11523 * completion-queue event back into the free pool.
11526 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11527 struct lpfc_cq_event *cq_event)
11529 unsigned long iflags;
11530 spin_lock_irqsave(&phba->hbalock, iflags);
11531 __lpfc_sli4_cq_event_release(phba, cq_event);
11532 spin_unlock_irqrestore(&phba->hbalock, iflags);
11536 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
11537 * @phba: pointer to lpfc hba data structure.
11539 * This routine is to free all the pending completion-queue events to the
11540 * back into the free pool for device reset.
11543 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
11545 LIST_HEAD(cq_event_list);
11546 struct lpfc_cq_event *cq_event;
11547 unsigned long iflags;
11549 /* Retrieve all the pending WCQEs from pending WCQE lists */
11551 /* Pending ELS XRI abort events */
11552 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11553 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11555 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11557 /* Pending asynnc events */
11558 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
11559 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
11561 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
11563 while (!list_empty(&cq_event_list)) {
11564 list_remove_head(&cq_event_list, cq_event,
11565 struct lpfc_cq_event, list);
11566 lpfc_sli4_cq_event_release(phba, cq_event);
11571 * lpfc_pci_function_reset - Reset pci function.
11572 * @phba: pointer to lpfc hba data structure.
11574 * This routine is invoked to request a PCI function reset. It will destroys
11575 * all resources assigned to the PCI function which originates this request.
11579 * -ENOMEM - No available memory
11580 * -EIO - The mailbox failed to complete successfully.
11583 lpfc_pci_function_reset(struct lpfc_hba *phba)
11585 LPFC_MBOXQ_t *mboxq;
11586 uint32_t rc = 0, if_type;
11587 uint32_t shdr_status, shdr_add_status;
11589 uint32_t port_reset = 0;
11590 union lpfc_sli4_cfg_shdr *shdr;
11591 struct lpfc_register reg_data;
11594 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11596 case LPFC_SLI_INTF_IF_TYPE_0:
11597 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
11600 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11601 "0494 Unable to allocate memory for "
11602 "issuing SLI_FUNCTION_RESET mailbox "
11607 /* Setup PCI function reset mailbox-ioctl command */
11608 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11609 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
11610 LPFC_SLI4_MBX_EMBED);
11611 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11612 shdr = (union lpfc_sli4_cfg_shdr *)
11613 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11614 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11615 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
11617 mempool_free(mboxq, phba->mbox_mem_pool);
11618 if (shdr_status || shdr_add_status || rc) {
11619 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11620 "0495 SLI_FUNCTION_RESET mailbox "
11621 "failed with status x%x add_status x%x,"
11622 " mbx status x%x\n",
11623 shdr_status, shdr_add_status, rc);
11627 case LPFC_SLI_INTF_IF_TYPE_2:
11628 case LPFC_SLI_INTF_IF_TYPE_6:
11631 * Poll the Port Status Register and wait for RDY for
11632 * up to 30 seconds. If the port doesn't respond, treat
11635 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
11636 if (lpfc_readl(phba->sli4_hba.u.if_type2.
11637 STATUSregaddr, ®_data.word0)) {
11641 if (bf_get(lpfc_sliport_status_rdy, ®_data))
11646 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
11647 phba->work_status[0] = readl(
11648 phba->sli4_hba.u.if_type2.ERR1regaddr);
11649 phba->work_status[1] = readl(
11650 phba->sli4_hba.u.if_type2.ERR2regaddr);
11651 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11652 "2890 Port not ready, port status reg "
11653 "0x%x error 1=0x%x, error 2=0x%x\n",
11655 phba->work_status[0],
11656 phba->work_status[1]);
11661 if (bf_get(lpfc_sliport_status_pldv, ®_data))
11662 lpfc_pldv_detect = true;
11666 * Reset the port now
11668 reg_data.word0 = 0;
11669 bf_set(lpfc_sliport_ctrl_end, ®_data,
11670 LPFC_SLIPORT_LITTLE_ENDIAN);
11671 bf_set(lpfc_sliport_ctrl_ip, ®_data,
11672 LPFC_SLIPORT_INIT_PORT);
11673 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
11676 pci_read_config_word(phba->pcidev,
11677 PCI_DEVICE_ID, &devid);
11682 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
11688 case LPFC_SLI_INTF_IF_TYPE_1:
11694 /* Catch the not-ready port failure after a port reset. */
11696 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11697 "3317 HBA not functional: IP Reset Failed "
11698 "try: echo fw_reset > board_mode\n");
11706 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
11707 * @phba: pointer to lpfc hba data structure.
11709 * This routine is invoked to set up the PCI device memory space for device
11710 * with SLI-4 interface spec.
11714 * other values - error
11717 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
11719 struct pci_dev *pdev = phba->pcidev;
11720 unsigned long bar0map_len, bar1map_len, bar2map_len;
11727 /* Set the device DMA mask size */
11728 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11730 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11735 * The BARs and register set definitions and offset locations are
11736 * dependent on the if_type.
11738 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
11739 &phba->sli4_hba.sli_intf.word0)) {
11743 /* There is no SLI3 failback for SLI4 devices. */
11744 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
11745 LPFC_SLI_INTF_VALID) {
11746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11747 "2894 SLI_INTF reg contents invalid "
11748 "sli_intf reg 0x%x\n",
11749 phba->sli4_hba.sli_intf.word0);
11753 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11755 * Get the bus address of SLI4 device Bar regions and the
11756 * number of bytes required by each mapping. The mapping of the
11757 * particular PCI BARs regions is dependent on the type of
11760 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
11761 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
11762 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
11765 * Map SLI4 PCI Config Space Register base to a kernel virtual
11768 phba->sli4_hba.conf_regs_memmap_p =
11769 ioremap(phba->pci_bar0_map, bar0map_len);
11770 if (!phba->sli4_hba.conf_regs_memmap_p) {
11771 dev_printk(KERN_ERR, &pdev->dev,
11772 "ioremap failed for SLI4 PCI config "
11776 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
11777 /* Set up BAR0 PCI config space register memory map */
11778 lpfc_sli4_bar0_register_memmap(phba, if_type);
11780 phba->pci_bar0_map = pci_resource_start(pdev, 1);
11781 bar0map_len = pci_resource_len(pdev, 1);
11782 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
11783 dev_printk(KERN_ERR, &pdev->dev,
11784 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
11787 phba->sli4_hba.conf_regs_memmap_p =
11788 ioremap(phba->pci_bar0_map, bar0map_len);
11789 if (!phba->sli4_hba.conf_regs_memmap_p) {
11790 dev_printk(KERN_ERR, &pdev->dev,
11791 "ioremap failed for SLI4 PCI config "
11795 lpfc_sli4_bar0_register_memmap(phba, if_type);
11798 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11799 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
11801 * Map SLI4 if type 0 HBA Control Register base to a
11802 * kernel virtual address and setup the registers.
11804 phba->pci_bar1_map = pci_resource_start(pdev,
11806 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11807 phba->sli4_hba.ctrl_regs_memmap_p =
11808 ioremap(phba->pci_bar1_map,
11810 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
11811 dev_err(&pdev->dev,
11812 "ioremap failed for SLI4 HBA "
11813 "control registers.\n");
11815 goto out_iounmap_conf;
11817 phba->pci_bar2_memmap_p =
11818 phba->sli4_hba.ctrl_regs_memmap_p;
11819 lpfc_sli4_bar1_register_memmap(phba, if_type);
11822 goto out_iounmap_conf;
11826 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
11827 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
11829 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
11830 * virtual address and setup the registers.
11832 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
11833 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11834 phba->sli4_hba.drbl_regs_memmap_p =
11835 ioremap(phba->pci_bar1_map, bar1map_len);
11836 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11837 dev_err(&pdev->dev,
11838 "ioremap failed for SLI4 HBA doorbell registers.\n");
11840 goto out_iounmap_conf;
11842 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
11843 lpfc_sli4_bar1_register_memmap(phba, if_type);
11846 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11847 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11849 * Map SLI4 if type 0 HBA Doorbell Register base to
11850 * a kernel virtual address and setup the registers.
11852 phba->pci_bar2_map = pci_resource_start(pdev,
11854 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11855 phba->sli4_hba.drbl_regs_memmap_p =
11856 ioremap(phba->pci_bar2_map,
11858 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11859 dev_err(&pdev->dev,
11860 "ioremap failed for SLI4 HBA"
11861 " doorbell registers.\n");
11863 goto out_iounmap_ctrl;
11865 phba->pci_bar4_memmap_p =
11866 phba->sli4_hba.drbl_regs_memmap_p;
11867 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
11869 goto out_iounmap_all;
11872 goto out_iounmap_all;
11876 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
11877 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11879 * Map SLI4 if type 6 HBA DPP Register base to a kernel
11880 * virtual address and setup the registers.
11882 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
11883 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11884 phba->sli4_hba.dpp_regs_memmap_p =
11885 ioremap(phba->pci_bar2_map, bar2map_len);
11886 if (!phba->sli4_hba.dpp_regs_memmap_p) {
11887 dev_err(&pdev->dev,
11888 "ioremap failed for SLI4 HBA dpp registers.\n");
11890 goto out_iounmap_ctrl;
11892 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
11895 /* Set up the EQ/CQ register handeling functions now */
11897 case LPFC_SLI_INTF_IF_TYPE_0:
11898 case LPFC_SLI_INTF_IF_TYPE_2:
11899 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
11900 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
11901 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
11903 case LPFC_SLI_INTF_IF_TYPE_6:
11904 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
11905 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
11906 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
11915 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
11917 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
11919 iounmap(phba->sli4_hba.conf_regs_memmap_p);
11925 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
11926 * @phba: pointer to lpfc hba data structure.
11928 * This routine is invoked to unset the PCI device memory space for device
11929 * with SLI-4 interface spec.
11932 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
11935 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11938 case LPFC_SLI_INTF_IF_TYPE_0:
11939 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
11940 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
11941 iounmap(phba->sli4_hba.conf_regs_memmap_p);
11943 case LPFC_SLI_INTF_IF_TYPE_2:
11944 iounmap(phba->sli4_hba.conf_regs_memmap_p);
11946 case LPFC_SLI_INTF_IF_TYPE_6:
11947 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
11948 iounmap(phba->sli4_hba.conf_regs_memmap_p);
11949 if (phba->sli4_hba.dpp_regs_memmap_p)
11950 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
11952 case LPFC_SLI_INTF_IF_TYPE_1:
11954 dev_printk(KERN_ERR, &phba->pcidev->dev,
11955 "FATAL - unsupported SLI4 interface type - %d\n",
11962 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
11963 * @phba: pointer to lpfc hba data structure.
11965 * This routine is invoked to enable the MSI-X interrupt vectors to device
11966 * with SLI-3 interface specs.
11970 * other values - error
11973 lpfc_sli_enable_msix(struct lpfc_hba *phba)
11978 /* Set up MSI-X multi-message vectors */
11979 rc = pci_alloc_irq_vectors(phba->pcidev,
11980 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
11982 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11983 "0420 PCI enable MSI-X failed (%d)\n", rc);
11988 * Assign MSI-X vectors to interrupt handlers
11991 /* vector-0 is associated to slow-path handler */
11992 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
11993 &lpfc_sli_sp_intr_handler, 0,
11994 LPFC_SP_DRIVER_HANDLER_NAME, phba);
11996 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11997 "0421 MSI-X slow-path request_irq failed "
12002 /* vector-1 is associated to fast-path handler */
12003 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
12004 &lpfc_sli_fp_intr_handler, 0,
12005 LPFC_FP_DRIVER_HANDLER_NAME, phba);
12008 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12009 "0429 MSI-X fast-path request_irq failed "
12015 * Configure HBA MSI-X attention conditions to messages
12017 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12021 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12022 "0474 Unable to allocate memory for issuing "
12023 "MBOX_CONFIG_MSI command\n");
12026 rc = lpfc_config_msi(phba, pmb);
12029 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
12030 if (rc != MBX_SUCCESS) {
12031 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
12032 "0351 Config MSI mailbox command failed, "
12033 "mbxCmd x%x, mbxStatus x%x\n",
12034 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
12038 /* Free memory allocated for mailbox command */
12039 mempool_free(pmb, phba->mbox_mem_pool);
12043 /* Free memory allocated for mailbox command */
12044 mempool_free(pmb, phba->mbox_mem_pool);
12047 /* free the irq already requested */
12048 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
12051 /* free the irq already requested */
12052 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
12055 /* Unconfigure MSI-X capability structure */
12056 pci_free_irq_vectors(phba->pcidev);
12063 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
12064 * @phba: pointer to lpfc hba data structure.
12066 * This routine is invoked to enable the MSI interrupt mode to device with
12067 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
12068 * enable the MSI vector. The device driver is responsible for calling the
12069 * request_irq() to register MSI vector with a interrupt the handler, which
12070 * is done in this function.
12074 * other values - error
12077 lpfc_sli_enable_msi(struct lpfc_hba *phba)
12081 rc = pci_enable_msi(phba->pcidev);
12083 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12084 "0462 PCI enable MSI mode success.\n");
12086 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12087 "0471 PCI enable MSI mode failed (%d)\n", rc);
12091 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12092 0, LPFC_DRIVER_NAME, phba);
12094 pci_disable_msi(phba->pcidev);
12095 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12096 "0478 MSI request_irq failed (%d)\n", rc);
12102 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
12103 * @phba: pointer to lpfc hba data structure.
12104 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
12106 * This routine is invoked to enable device interrupt and associate driver's
12107 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
12108 * spec. Depends on the interrupt mode configured to the driver, the driver
12109 * will try to fallback from the configured interrupt mode to an interrupt
12110 * mode which is supported by the platform, kernel, and device in the order
12112 * MSI-X -> MSI -> IRQ.
12116 * other values - error
12119 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12121 uint32_t intr_mode = LPFC_INTR_ERROR;
12124 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
12125 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
12128 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
12130 if (cfg_mode == 2) {
12131 /* Now, try to enable MSI-X interrupt mode */
12132 retval = lpfc_sli_enable_msix(phba);
12134 /* Indicate initialization to MSI-X mode */
12135 phba->intr_type = MSIX;
12140 /* Fallback to MSI if MSI-X initialization failed */
12141 if (cfg_mode >= 1 && phba->intr_type == NONE) {
12142 retval = lpfc_sli_enable_msi(phba);
12144 /* Indicate initialization to MSI mode */
12145 phba->intr_type = MSI;
12150 /* Fallback to INTx if both MSI-X/MSI initalization failed */
12151 if (phba->intr_type == NONE) {
12152 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12153 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
12155 /* Indicate initialization to INTx mode */
12156 phba->intr_type = INTx;
12164 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
12165 * @phba: pointer to lpfc hba data structure.
12167 * This routine is invoked to disable device interrupt and disassociate the
12168 * driver's interrupt handler(s) from interrupt vector(s) to device with
12169 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
12170 * release the interrupt vector(s) for the message signaled interrupt.
12173 lpfc_sli_disable_intr(struct lpfc_hba *phba)
12177 if (phba->intr_type == MSIX)
12178 nr_irqs = LPFC_MSIX_VECTORS;
12182 for (i = 0; i < nr_irqs; i++)
12183 free_irq(pci_irq_vector(phba->pcidev, i), phba);
12184 pci_free_irq_vectors(phba->pcidev);
12186 /* Reset interrupt management states */
12187 phba->intr_type = NONE;
12188 phba->sli.slistat.sli_intr = 0;
12192 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
12193 * @phba: pointer to lpfc hba data structure.
12194 * @id: EQ vector index or Hardware Queue index
12195 * @match: LPFC_FIND_BY_EQ = match by EQ
12196 * LPFC_FIND_BY_HDWQ = match by Hardware Queue
12197 * Return the CPU that matches the selection criteria
12200 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
12202 struct lpfc_vector_map_info *cpup;
12205 /* Loop through all CPUs */
12206 for_each_present_cpu(cpu) {
12207 cpup = &phba->sli4_hba.cpu_map[cpu];
12209 /* If we are matching by EQ, there may be multiple CPUs using
12210 * using the same vector, so select the one with
12211 * LPFC_CPU_FIRST_IRQ set.
12213 if ((match == LPFC_FIND_BY_EQ) &&
12214 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
12218 /* If matching by HDWQ, select the first CPU that matches */
12219 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
12227 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
12228 * @phba: pointer to lpfc hba data structure.
12229 * @cpu: CPU map index
12230 * @phys_id: CPU package physical id
12231 * @core_id: CPU core id
12234 lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
12235 uint16_t phys_id, uint16_t core_id)
12237 struct lpfc_vector_map_info *cpup;
12240 for_each_present_cpu(idx) {
12241 cpup = &phba->sli4_hba.cpu_map[idx];
12242 /* Does the cpup match the one we are looking for */
12243 if ((cpup->phys_id == phys_id) &&
12244 (cpup->core_id == core_id) &&
12253 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
12254 * @phba: pointer to lpfc hba data structure.
12255 * @eqidx: index for eq and irq vector
12256 * @flag: flags to set for vector_map structure
12257 * @cpu: cpu used to index vector_map structure
12259 * The routine assigns eq info into vector_map structure
12262 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
12265 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
12266 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
12269 cpup->flag |= flag;
12271 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12272 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
12273 cpu, eqhdl->irq, cpup->eq, cpup->flag);
12277 * lpfc_cpu_map_array_init - Initialize cpu_map structure
12278 * @phba: pointer to lpfc hba data structure.
12280 * The routine initializes the cpu_map array structure
12283 lpfc_cpu_map_array_init(struct lpfc_hba *phba)
12285 struct lpfc_vector_map_info *cpup;
12286 struct lpfc_eq_intr_info *eqi;
12289 for_each_possible_cpu(cpu) {
12290 cpup = &phba->sli4_hba.cpu_map[cpu];
12291 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
12292 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
12293 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
12294 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
12296 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
12297 INIT_LIST_HEAD(&eqi->list);
12303 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
12304 * @phba: pointer to lpfc hba data structure.
12306 * The routine initializes the hba_eq_hdl array structure
12309 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
12311 struct lpfc_hba_eq_hdl *eqhdl;
12314 for (i = 0; i < phba->cfg_irq_chann; i++) {
12315 eqhdl = lpfc_get_eq_hdl(i);
12316 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
12317 eqhdl->phba = phba;
12322 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
12323 * @phba: pointer to lpfc hba data structure.
12324 * @vectors: number of msix vectors allocated.
12326 * The routine will figure out the CPU affinity assignment for every
12327 * MSI-X vector allocated for the HBA.
12328 * In addition, the CPU to IO channel mapping will be calculated
12329 * and the phba->sli4_hba.cpu_map array will reflect this.
12332 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
12334 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
12335 int max_phys_id, min_phys_id;
12336 int max_core_id, min_core_id;
12337 struct lpfc_vector_map_info *cpup;
12338 struct lpfc_vector_map_info *new_cpup;
12340 struct cpuinfo_x86 *cpuinfo;
12342 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12343 struct lpfc_hdwq_stat *c_stat;
12347 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
12349 min_core_id = LPFC_VECTOR_MAP_EMPTY;
12351 /* Update CPU map with physical id and core id of each CPU */
12352 for_each_present_cpu(cpu) {
12353 cpup = &phba->sli4_hba.cpu_map[cpu];
12355 cpuinfo = &cpu_data(cpu);
12356 cpup->phys_id = cpuinfo->phys_proc_id;
12357 cpup->core_id = cpuinfo->cpu_core_id;
12358 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
12359 cpup->flag |= LPFC_CPU_MAP_HYPER;
12361 /* No distinction between CPUs for other platforms */
12363 cpup->core_id = cpu;
12366 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12367 "3328 CPU %d physid %d coreid %d flag x%x\n",
12368 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
12370 if (cpup->phys_id > max_phys_id)
12371 max_phys_id = cpup->phys_id;
12372 if (cpup->phys_id < min_phys_id)
12373 min_phys_id = cpup->phys_id;
12375 if (cpup->core_id > max_core_id)
12376 max_core_id = cpup->core_id;
12377 if (cpup->core_id < min_core_id)
12378 min_core_id = cpup->core_id;
12381 /* After looking at each irq vector assigned to this pcidev, its
12382 * possible to see that not ALL CPUs have been accounted for.
12383 * Next we will set any unassigned (unaffinitized) cpu map
12384 * entries to a IRQ on the same phys_id.
12386 first_cpu = cpumask_first(cpu_present_mask);
12387 start_cpu = first_cpu;
12389 for_each_present_cpu(cpu) {
12390 cpup = &phba->sli4_hba.cpu_map[cpu];
12392 /* Is this CPU entry unassigned */
12393 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12394 /* Mark CPU as IRQ not assigned by the kernel */
12395 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12397 /* If so, find a new_cpup thats on the the SAME
12398 * phys_id as cpup. start_cpu will start where we
12399 * left off so all unassigned entries don't get assgined
12400 * the IRQ of the first entry.
12402 new_cpu = start_cpu;
12403 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12404 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12405 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12406 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
12407 (new_cpup->phys_id == cpup->phys_id))
12409 new_cpu = cpumask_next(
12410 new_cpu, cpu_present_mask);
12411 if (new_cpu == nr_cpumask_bits)
12412 new_cpu = first_cpu;
12414 /* At this point, we leave the CPU as unassigned */
12417 /* We found a matching phys_id, so copy the IRQ info */
12418 cpup->eq = new_cpup->eq;
12420 /* Bump start_cpu to the next slot to minmize the
12421 * chance of having multiple unassigned CPU entries
12422 * selecting the same IRQ.
12424 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12425 if (start_cpu == nr_cpumask_bits)
12426 start_cpu = first_cpu;
12428 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12429 "3337 Set Affinity: CPU %d "
12430 "eq %d from peer cpu %d same "
12432 cpu, cpup->eq, new_cpu,
12437 /* Set any unassigned cpu map entries to a IRQ on any phys_id */
12438 start_cpu = first_cpu;
12440 for_each_present_cpu(cpu) {
12441 cpup = &phba->sli4_hba.cpu_map[cpu];
12443 /* Is this entry unassigned */
12444 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12445 /* Mark it as IRQ not assigned by the kernel */
12446 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12448 /* If so, find a new_cpup thats on ANY phys_id
12449 * as the cpup. start_cpu will start where we
12450 * left off so all unassigned entries don't get
12451 * assigned the IRQ of the first entry.
12453 new_cpu = start_cpu;
12454 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12455 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12456 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12457 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
12459 new_cpu = cpumask_next(
12460 new_cpu, cpu_present_mask);
12461 if (new_cpu == nr_cpumask_bits)
12462 new_cpu = first_cpu;
12464 /* We should never leave an entry unassigned */
12465 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12466 "3339 Set Affinity: CPU %d "
12467 "eq %d UNASSIGNED\n",
12468 cpup->hdwq, cpup->eq);
12471 /* We found an available entry, copy the IRQ info */
12472 cpup->eq = new_cpup->eq;
12474 /* Bump start_cpu to the next slot to minmize the
12475 * chance of having multiple unassigned CPU entries
12476 * selecting the same IRQ.
12478 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12479 if (start_cpu == nr_cpumask_bits)
12480 start_cpu = first_cpu;
12482 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12483 "3338 Set Affinity: CPU %d "
12484 "eq %d from peer cpu %d (%d/%d)\n",
12485 cpu, cpup->eq, new_cpu,
12486 new_cpup->phys_id, new_cpup->core_id);
12490 /* Assign hdwq indices that are unique across all cpus in the map
12491 * that are also FIRST_CPUs.
12494 for_each_present_cpu(cpu) {
12495 cpup = &phba->sli4_hba.cpu_map[cpu];
12497 /* Only FIRST IRQs get a hdwq index assignment. */
12498 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12501 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
12504 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12505 "3333 Set Affinity: CPU %d (phys %d core %d): "
12506 "hdwq %d eq %d flg x%x\n",
12507 cpu, cpup->phys_id, cpup->core_id,
12508 cpup->hdwq, cpup->eq, cpup->flag);
12510 /* Associate a hdwq with each cpu_map entry
12511 * This will be 1 to 1 - hdwq to cpu, unless there are less
12512 * hardware queues then CPUs. For that case we will just round-robin
12513 * the available hardware queues as they get assigned to CPUs.
12514 * The next_idx is the idx from the FIRST_CPU loop above to account
12515 * for irq_chann < hdwq. The idx is used for round-robin assignments
12516 * and needs to start at 0.
12521 for_each_present_cpu(cpu) {
12522 cpup = &phba->sli4_hba.cpu_map[cpu];
12524 /* FIRST cpus are already mapped. */
12525 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
12528 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
12529 * of the unassigned cpus to the next idx so that all
12530 * hdw queues are fully utilized.
12532 if (next_idx < phba->cfg_hdw_queue) {
12533 cpup->hdwq = next_idx;
12538 /* Not a First CPU and all hdw_queues are used. Reuse a
12539 * Hardware Queue for another CPU, so be smart about it
12540 * and pick one that has its IRQ/EQ mapped to the same phys_id
12541 * (CPU package) and core_id.
12543 new_cpu = start_cpu;
12544 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12545 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12546 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12547 new_cpup->phys_id == cpup->phys_id &&
12548 new_cpup->core_id == cpup->core_id) {
12551 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12552 if (new_cpu == nr_cpumask_bits)
12553 new_cpu = first_cpu;
12556 /* If we can't match both phys_id and core_id,
12557 * settle for just a phys_id match.
12559 new_cpu = start_cpu;
12560 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12561 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12562 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12563 new_cpup->phys_id == cpup->phys_id)
12566 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12567 if (new_cpu == nr_cpumask_bits)
12568 new_cpu = first_cpu;
12571 /* Otherwise just round robin on cfg_hdw_queue */
12572 cpup->hdwq = idx % phba->cfg_hdw_queue;
12576 /* We found an available entry, copy the IRQ info */
12577 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12578 if (start_cpu == nr_cpumask_bits)
12579 start_cpu = first_cpu;
12580 cpup->hdwq = new_cpup->hdwq;
12582 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12583 "3335 Set Affinity: CPU %d (phys %d core %d): "
12584 "hdwq %d eq %d flg x%x\n",
12585 cpu, cpup->phys_id, cpup->core_id,
12586 cpup->hdwq, cpup->eq, cpup->flag);
12590 * Initialize the cpu_map slots for not-present cpus in case
12591 * a cpu is hot-added. Perform a simple hdwq round robin assignment.
12594 for_each_possible_cpu(cpu) {
12595 cpup = &phba->sli4_hba.cpu_map[cpu];
12596 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12597 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
12598 c_stat->hdwq_no = cpup->hdwq;
12600 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
12603 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
12604 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12605 c_stat->hdwq_no = cpup->hdwq;
12607 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12608 "3340 Set Affinity: not present "
12609 "CPU %d hdwq %d\n",
12613 /* The cpu_map array will be used later during initialization
12614 * when EQ / CQ / WQs are allocated and configured.
12620 * lpfc_cpuhp_get_eq
12622 * @phba: pointer to lpfc hba data structure.
12623 * @cpu: cpu going offline
12624 * @eqlist: eq list to append to
12627 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
12628 struct list_head *eqlist)
12630 const struct cpumask *maskp;
12631 struct lpfc_queue *eq;
12632 struct cpumask *tmp;
12635 tmp = kzalloc(cpumask_size(), GFP_KERNEL);
12639 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12640 maskp = pci_irq_get_affinity(phba->pcidev, idx);
12644 * if irq is not affinitized to the cpu going
12645 * then we don't need to poll the eq attached
12648 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
12650 /* get the cpus that are online and are affini-
12651 * tized to this irq vector. If the count is
12652 * more than 1 then cpuhp is not going to shut-
12653 * down this vector. Since this cpu has not
12654 * gone offline yet, we need >1.
12656 cpumask_and(tmp, maskp, cpu_online_mask);
12657 if (cpumask_weight(tmp) > 1)
12660 /* Now that we have an irq to shutdown, get the eq
12661 * mapped to this irq. Note: multiple hdwq's in
12662 * the software can share an eq, but eventually
12663 * only eq will be mapped to this vector
12665 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
12666 list_add(&eq->_poll_list, eqlist);
12672 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
12674 if (phba->sli_rev != LPFC_SLI_REV4)
12677 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
12680 * unregistering the instance doesn't stop the polling
12681 * timer. Wait for the poll timer to retire.
12684 del_timer_sync(&phba->cpuhp_poll_timer);
12687 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
12689 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
12692 __lpfc_cpuhp_remove(phba);
12695 static void lpfc_cpuhp_add(struct lpfc_hba *phba)
12697 if (phba->sli_rev != LPFC_SLI_REV4)
12702 if (!list_empty(&phba->poll_list))
12703 mod_timer(&phba->cpuhp_poll_timer,
12704 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
12708 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
12712 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
12714 if (phba->pport->load_flag & FC_UNLOADING) {
12719 if (phba->sli_rev != LPFC_SLI_REV4) {
12724 /* proceed with the hotplug */
12729 * lpfc_irq_set_aff - set IRQ affinity
12730 * @eqhdl: EQ handle
12731 * @cpu: cpu to set affinity
12735 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
12737 cpumask_clear(&eqhdl->aff_mask);
12738 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
12739 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12740 irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
12744 * lpfc_irq_clear_aff - clear IRQ affinity
12745 * @eqhdl: EQ handle
12749 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
12751 cpumask_clear(&eqhdl->aff_mask);
12752 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12756 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
12757 * @phba: pointer to HBA context object.
12758 * @cpu: cpu going offline/online
12759 * @offline: true, cpu is going offline. false, cpu is coming online.
12761 * If cpu is going offline, we'll try our best effort to find the next
12762 * online cpu on the phba's original_mask and migrate all offlining IRQ
12765 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
12767 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
12768 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
12772 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
12774 struct lpfc_vector_map_info *cpup;
12775 struct cpumask *aff_mask;
12776 unsigned int cpu_select, cpu_next, idx;
12777 const struct cpumask *orig_mask;
12779 if (phba->irq_chann_mode == NORMAL_MODE)
12782 orig_mask = &phba->sli4_hba.irq_aff_mask;
12784 if (!cpumask_test_cpu(cpu, orig_mask))
12787 cpup = &phba->sli4_hba.cpu_map[cpu];
12789 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12793 /* Find next online CPU on original mask */
12794 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
12795 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
12797 /* Found a valid CPU */
12798 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
12799 /* Go through each eqhdl and ensure offlining
12800 * cpu aff_mask is migrated
12802 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12803 aff_mask = lpfc_get_aff_mask(idx);
12805 /* Migrate affinity */
12806 if (cpumask_test_cpu(cpu, aff_mask))
12807 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
12811 /* Rely on irqbalance if no online CPUs left on NUMA */
12812 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
12813 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
12816 /* Migrate affinity back to this CPU */
12817 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
12821 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
12823 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12824 struct lpfc_queue *eq, *next;
12829 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12833 if (__lpfc_cpuhp_checks(phba, &retval))
12836 lpfc_irq_rebalance(phba, cpu, true);
12838 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
12842 /* start polling on these eq's */
12843 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
12844 list_del_init(&eq->_poll_list);
12845 lpfc_sli4_start_polling(eq);
12851 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
12853 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12854 struct lpfc_queue *eq, *next;
12859 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12863 if (__lpfc_cpuhp_checks(phba, &retval))
12866 lpfc_irq_rebalance(phba, cpu, false);
12868 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
12869 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
12871 lpfc_sli4_stop_polling(eq);
12878 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
12879 * @phba: pointer to lpfc hba data structure.
12881 * This routine is invoked to enable the MSI-X interrupt vectors to device
12882 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
12883 * to cpus on the system.
12885 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
12886 * the number of cpus on the same numa node as this adapter. The vectors are
12887 * allocated without requesting OS affinity mapping. A vector will be
12888 * allocated and assigned to each online and offline cpu. If the cpu is
12889 * online, then affinity will be set to that cpu. If the cpu is offline, then
12890 * affinity will be set to the nearest peer cpu within the numa node that is
12891 * online. If there are no online cpus within the numa node, affinity is not
12892 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
12893 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
12896 * If numa mode is not enabled and there is more than 1 vector allocated, then
12897 * the driver relies on the managed irq interface where the OS assigns vector to
12898 * cpu affinity. The driver will then use that affinity mapping to setup its
12899 * cpu mapping table.
12903 * other values - error
12906 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
12908 int vectors, rc, index;
12910 const struct cpumask *aff_mask = NULL;
12911 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
12912 struct lpfc_vector_map_info *cpup;
12913 struct lpfc_hba_eq_hdl *eqhdl;
12914 const struct cpumask *maskp;
12915 unsigned int flags = PCI_IRQ_MSIX;
12917 /* Set up MSI-X multi-message vectors */
12918 vectors = phba->cfg_irq_chann;
12920 if (phba->irq_chann_mode != NORMAL_MODE)
12921 aff_mask = &phba->sli4_hba.irq_aff_mask;
12924 cpu_cnt = cpumask_weight(aff_mask);
12925 vectors = min(phba->cfg_irq_chann, cpu_cnt);
12927 /* cpu: iterates over aff_mask including offline or online
12928 * cpu_select: iterates over online aff_mask to set affinity
12930 cpu = cpumask_first(aff_mask);
12931 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
12933 flags |= PCI_IRQ_AFFINITY;
12936 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
12938 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12939 "0484 PCI enable MSI-X failed (%d)\n", rc);
12944 /* Assign MSI-X vectors to interrupt handlers */
12945 for (index = 0; index < vectors; index++) {
12946 eqhdl = lpfc_get_eq_hdl(index);
12947 name = eqhdl->handler_name;
12948 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
12949 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
12950 LPFC_DRIVER_HANDLER_NAME"%d", index);
12952 eqhdl->idx = index;
12953 rc = request_irq(pci_irq_vector(phba->pcidev, index),
12954 &lpfc_sli4_hba_intr_handler, 0,
12957 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12958 "0486 MSI-X fast-path (%d) "
12959 "request_irq failed (%d)\n", index, rc);
12963 eqhdl->irq = pci_irq_vector(phba->pcidev, index);
12966 /* If found a neighboring online cpu, set affinity */
12967 if (cpu_select < nr_cpu_ids)
12968 lpfc_irq_set_aff(eqhdl, cpu_select);
12970 /* Assign EQ to cpu_map */
12971 lpfc_assign_eq_map_info(phba, index,
12972 LPFC_CPU_FIRST_IRQ,
12975 /* Iterate to next offline or online cpu in aff_mask */
12976 cpu = cpumask_next(cpu, aff_mask);
12978 /* Find next online cpu in aff_mask to set affinity */
12979 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
12980 } else if (vectors == 1) {
12981 cpu = cpumask_first(cpu_present_mask);
12982 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
12985 maskp = pci_irq_get_affinity(phba->pcidev, index);
12987 /* Loop through all CPUs associated with vector index */
12988 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
12989 cpup = &phba->sli4_hba.cpu_map[cpu];
12991 /* If this is the first CPU thats assigned to
12992 * this vector, set LPFC_CPU_FIRST_IRQ.
12994 * With certain platforms its possible that irq
12995 * vectors are affinitized to all the cpu's.
12996 * This can result in each cpu_map.eq to be set
12997 * to the last vector, resulting in overwrite
12998 * of all the previous cpu_map.eq. Ensure that
12999 * each vector receives a place in cpu_map.
13000 * Later call to lpfc_cpu_affinity_check will
13001 * ensure we are nicely balanced out.
13003 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
13005 lpfc_assign_eq_map_info(phba, index,
13006 LPFC_CPU_FIRST_IRQ,
13013 if (vectors != phba->cfg_irq_chann) {
13014 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13015 "3238 Reducing IO channels to match number of "
13016 "MSI-X vectors, requested %d got %d\n",
13017 phba->cfg_irq_chann, vectors);
13018 if (phba->cfg_irq_chann > vectors)
13019 phba->cfg_irq_chann = vectors;
13025 /* free the irq already requested */
13026 for (--index; index >= 0; index--) {
13027 eqhdl = lpfc_get_eq_hdl(index);
13028 lpfc_irq_clear_aff(eqhdl);
13029 free_irq(eqhdl->irq, eqhdl);
13032 /* Unconfigure MSI-X capability structure */
13033 pci_free_irq_vectors(phba->pcidev);
13040 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
13041 * @phba: pointer to lpfc hba data structure.
13043 * This routine is invoked to enable the MSI interrupt mode to device with
13044 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
13045 * called to enable the MSI vector. The device driver is responsible for
13046 * calling the request_irq() to register MSI vector with a interrupt the
13047 * handler, which is done in this function.
13051 * other values - error
13054 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
13058 struct lpfc_hba_eq_hdl *eqhdl;
13060 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
13061 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
13063 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13064 "0487 PCI enable MSI mode success.\n");
13066 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13067 "0488 PCI enable MSI mode failed (%d)\n", rc);
13068 return rc ? rc : -1;
13071 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13072 0, LPFC_DRIVER_NAME, phba);
13074 pci_free_irq_vectors(phba->pcidev);
13075 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13076 "0490 MSI request_irq failed (%d)\n", rc);
13080 eqhdl = lpfc_get_eq_hdl(0);
13081 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
13083 cpu = cpumask_first(cpu_present_mask);
13084 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
13086 for (index = 0; index < phba->cfg_irq_chann; index++) {
13087 eqhdl = lpfc_get_eq_hdl(index);
13088 eqhdl->idx = index;
13095 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
13096 * @phba: pointer to lpfc hba data structure.
13097 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
13099 * This routine is invoked to enable device interrupt and associate driver's
13100 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
13101 * interface spec. Depends on the interrupt mode configured to the driver,
13102 * the driver will try to fallback from the configured interrupt mode to an
13103 * interrupt mode which is supported by the platform, kernel, and device in
13105 * MSI-X -> MSI -> IRQ.
13109 * other values - error
13112 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
13114 uint32_t intr_mode = LPFC_INTR_ERROR;
13117 if (cfg_mode == 2) {
13118 /* Preparation before conf_msi mbox cmd */
13121 /* Now, try to enable MSI-X interrupt mode */
13122 retval = lpfc_sli4_enable_msix(phba);
13124 /* Indicate initialization to MSI-X mode */
13125 phba->intr_type = MSIX;
13131 /* Fallback to MSI if MSI-X initialization failed */
13132 if (cfg_mode >= 1 && phba->intr_type == NONE) {
13133 retval = lpfc_sli4_enable_msi(phba);
13135 /* Indicate initialization to MSI mode */
13136 phba->intr_type = MSI;
13141 /* Fallback to INTx if both MSI-X/MSI initalization failed */
13142 if (phba->intr_type == NONE) {
13143 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13144 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
13146 struct lpfc_hba_eq_hdl *eqhdl;
13149 /* Indicate initialization to INTx mode */
13150 phba->intr_type = INTx;
13153 eqhdl = lpfc_get_eq_hdl(0);
13154 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
13156 cpu = cpumask_first(cpu_present_mask);
13157 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
13159 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
13160 eqhdl = lpfc_get_eq_hdl(idx);
13169 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
13170 * @phba: pointer to lpfc hba data structure.
13172 * This routine is invoked to disable device interrupt and disassociate
13173 * the driver's interrupt handler(s) from interrupt vector(s) to device
13174 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
13175 * will release the interrupt vector(s) for the message signaled interrupt.
13178 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
13180 /* Disable the currently initialized interrupt mode */
13181 if (phba->intr_type == MSIX) {
13183 struct lpfc_hba_eq_hdl *eqhdl;
13185 /* Free up MSI-X multi-message vectors */
13186 for (index = 0; index < phba->cfg_irq_chann; index++) {
13187 eqhdl = lpfc_get_eq_hdl(index);
13188 lpfc_irq_clear_aff(eqhdl);
13189 free_irq(eqhdl->irq, eqhdl);
13192 free_irq(phba->pcidev->irq, phba);
13195 pci_free_irq_vectors(phba->pcidev);
13197 /* Reset interrupt management states */
13198 phba->intr_type = NONE;
13199 phba->sli.slistat.sli_intr = 0;
13203 * lpfc_unset_hba - Unset SLI3 hba device initialization
13204 * @phba: pointer to lpfc hba data structure.
13206 * This routine is invoked to unset the HBA device initialization steps to
13207 * a device with SLI-3 interface spec.
13210 lpfc_unset_hba(struct lpfc_hba *phba)
13212 struct lpfc_vport *vport = phba->pport;
13213 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
13215 spin_lock_irq(shost->host_lock);
13216 vport->load_flag |= FC_UNLOADING;
13217 spin_unlock_irq(shost->host_lock);
13219 kfree(phba->vpi_bmask);
13220 kfree(phba->vpi_ids);
13222 lpfc_stop_hba_timers(phba);
13224 phba->pport->work_port_events = 0;
13226 lpfc_sli_hba_down(phba);
13228 lpfc_sli_brdrestart(phba);
13230 lpfc_sli_disable_intr(phba);
13236 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
13237 * @phba: Pointer to HBA context object.
13239 * This function is called in the SLI4 code path to wait for completion
13240 * of device's XRIs exchange busy. It will check the XRI exchange busy
13241 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
13242 * that, it will check the XRI exchange busy on outstanding FCP and ELS
13243 * I/Os every 30 seconds, log error message, and wait forever. Only when
13244 * all XRI exchange busy complete, the driver unload shall proceed with
13245 * invoking the function reset ioctl mailbox command to the CNA and the
13246 * the rest of the driver unload resource release.
13249 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
13251 struct lpfc_sli4_hdw_queue *qp;
13254 int io_xri_cmpl = 1;
13255 int nvmet_xri_cmpl = 1;
13256 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13258 /* Driver just aborted IOs during the hba_unset process. Pause
13259 * here to give the HBA time to complete the IO and get entries
13260 * into the abts lists.
13262 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
13264 /* Wait for NVME pending IO to flush back to transport. */
13265 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13266 lpfc_nvme_wait_for_io_drain(phba);
13269 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13270 qp = &phba->sli4_hba.hdwq[idx];
13271 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
13272 if (!io_xri_cmpl) /* if list is NOT empty */
13278 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13280 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13283 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
13284 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
13285 if (!nvmet_xri_cmpl)
13286 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13287 "6424 NVMET XRI exchange busy "
13288 "wait time: %d seconds.\n",
13291 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13292 "6100 IO XRI exchange busy "
13293 "wait time: %d seconds.\n",
13296 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13297 "2878 ELS XRI exchange busy "
13298 "wait time: %d seconds.\n",
13300 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
13301 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
13303 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
13304 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
13308 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13309 qp = &phba->sli4_hba.hdwq[idx];
13310 io_xri_cmpl = list_empty(
13311 &qp->lpfc_abts_io_buf_list);
13312 if (!io_xri_cmpl) /* if list is NOT empty */
13318 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13319 nvmet_xri_cmpl = list_empty(
13320 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13323 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13329 * lpfc_sli4_hba_unset - Unset the fcoe hba
13330 * @phba: Pointer to HBA context object.
13332 * This function is called in the SLI4 code path to reset the HBA's FCoE
13333 * function. The caller is not required to hold any lock. This routine
13334 * issues PCI function reset mailbox command to reset the FCoE function.
13335 * At the end of the function, it calls lpfc_hba_down_post function to
13336 * free any pending commands.
13339 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
13342 LPFC_MBOXQ_t *mboxq;
13343 struct pci_dev *pdev = phba->pcidev;
13345 lpfc_stop_hba_timers(phba);
13346 hrtimer_cancel(&phba->cmf_timer);
13349 phba->sli4_hba.intr_enable = 0;
13352 * Gracefully wait out the potential current outstanding asynchronous
13356 /* First, block any pending async mailbox command from posted */
13357 spin_lock_irq(&phba->hbalock);
13358 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13359 spin_unlock_irq(&phba->hbalock);
13360 /* Now, trying to wait it out if we can */
13361 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13363 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
13366 /* Forcefully release the outstanding mailbox command if timed out */
13367 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13368 spin_lock_irq(&phba->hbalock);
13369 mboxq = phba->sli.mbox_active;
13370 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
13371 __lpfc_mbox_cmpl_put(phba, mboxq);
13372 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13373 phba->sli.mbox_active = NULL;
13374 spin_unlock_irq(&phba->hbalock);
13377 /* Abort all iocbs associated with the hba */
13378 lpfc_sli_hba_iocb_abort(phba);
13380 if (!pci_channel_offline(phba->pcidev))
13381 /* Wait for completion of device XRI exchange busy */
13382 lpfc_sli4_xri_exchange_busy_wait(phba);
13384 /* per-phba callback de-registration for hotplug event */
13386 lpfc_cpuhp_remove(phba);
13388 /* Disable PCI subsystem interrupt */
13389 lpfc_sli4_disable_intr(phba);
13391 /* Disable SR-IOV if enabled */
13392 if (phba->cfg_sriov_nr_virtfn)
13393 pci_disable_sriov(pdev);
13395 /* Stop kthread signal shall trigger work_done one more time */
13396 kthread_stop(phba->worker_thread);
13398 /* Disable FW logging to host memory */
13399 lpfc_ras_stop_fwlog(phba);
13401 /* Reset SLI4 HBA FCoE function */
13402 lpfc_pci_function_reset(phba);
13404 /* release all queue allocated resources. */
13405 lpfc_sli4_queue_destroy(phba);
13407 /* Free RAS DMA memory */
13408 if (phba->ras_fwlog.ras_enabled)
13409 lpfc_sli4_ras_dma_free(phba);
13411 /* Stop the SLI4 device port */
13413 phba->pport->work_port_events = 0;
13417 lpfc_cgn_crc32(uint32_t crc, u8 byte)
13422 for (bit = 0; bit < 8; bit++) {
13423 msb = (crc >> 31) & 1;
13426 if (msb ^ (byte & 1)) {
13427 crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
13436 lpfc_cgn_reverse_bits(uint32_t wd)
13438 uint32_t result = 0;
13441 for (i = 0; i < 32; i++) {
13443 result |= (1 & (wd >> i));
13449 * The routine corresponds with the algorithm the HBA firmware
13450 * uses to validate the data integrity.
13453 lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
13457 uint8_t *data = (uint8_t *)ptr;
13459 for (i = 0; i < byteLen; ++i)
13460 crc = lpfc_cgn_crc32(crc, data[i]);
13462 result = ~lpfc_cgn_reverse_bits(crc);
13467 lpfc_init_congestion_buf(struct lpfc_hba *phba)
13469 struct lpfc_cgn_info *cp;
13470 struct timespec64 cmpl_time;
13475 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13476 "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
13480 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13482 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
13483 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
13484 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
13485 atomic_set(&phba->cgn_sync_warn_cnt, 0);
13487 atomic_set(&phba->cgn_driver_evt_cnt, 0);
13488 atomic_set(&phba->cgn_latency_evt_cnt, 0);
13489 atomic64_set(&phba->cgn_latency_evt, 0);
13490 phba->cgn_evt_minute = 0;
13491 phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
13493 memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat));
13494 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
13495 cp->cgn_info_version = LPFC_CGN_INFO_V3;
13497 /* cgn parameters */
13498 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
13499 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
13500 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
13501 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
13503 ktime_get_real_ts64(&cmpl_time);
13504 time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13506 cp->cgn_info_month = broken.tm_mon + 1;
13507 cp->cgn_info_day = broken.tm_mday;
13508 cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */
13509 cp->cgn_info_hour = broken.tm_hour;
13510 cp->cgn_info_minute = broken.tm_min;
13511 cp->cgn_info_second = broken.tm_sec;
13513 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13514 "2643 CGNInfo Init: Start Time "
13515 "%d/%d/%d %d:%d:%d\n",
13516 cp->cgn_info_day, cp->cgn_info_month,
13517 cp->cgn_info_year, cp->cgn_info_hour,
13518 cp->cgn_info_minute, cp->cgn_info_second);
13520 /* Fill in default LUN qdepth */
13522 size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
13523 cp->cgn_lunq = cpu_to_le16(size);
13526 /* last used Index initialized to 0xff already */
13528 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13529 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13530 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13531 cp->cgn_info_crc = cpu_to_le32(crc);
13533 phba->cgn_evt_timestamp = jiffies +
13534 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
13538 lpfc_init_congestion_stat(struct lpfc_hba *phba)
13540 struct lpfc_cgn_info *cp;
13541 struct timespec64 cmpl_time;
13545 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13546 "6236 INIT Congestion Stat %p\n", phba->cgn_i);
13551 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13552 memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
13554 ktime_get_real_ts64(&cmpl_time);
13555 time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13557 cp->cgn_stat_month = broken.tm_mon + 1;
13558 cp->cgn_stat_day = broken.tm_mday;
13559 cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */
13560 cp->cgn_stat_hour = broken.tm_hour;
13561 cp->cgn_stat_minute = broken.tm_min;
13563 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13564 "2647 CGNstat Init: Start Time "
13565 "%d/%d/%d %d:%d\n",
13566 cp->cgn_stat_day, cp->cgn_stat_month,
13567 cp->cgn_stat_year, cp->cgn_stat_hour,
13568 cp->cgn_stat_minute);
13570 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13571 cp->cgn_info_crc = cpu_to_le32(crc);
13575 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
13576 * @phba: Pointer to hba context object.
13577 * @reg: flag to determine register or unregister.
13580 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
13582 struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
13583 union lpfc_sli4_cfg_shdr *shdr;
13584 uint32_t shdr_status, shdr_add_status;
13585 LPFC_MBOXQ_t *mboxq;
13591 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13593 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13594 "2641 REG_CONGESTION_BUF mbox allocation fail: "
13595 "HBA state x%x reg %d\n",
13596 phba->pport->port_state, reg);
13600 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
13601 sizeof(struct lpfc_sli4_cfg_mhdr));
13602 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13603 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
13604 LPFC_SLI4_MBX_EMBED);
13605 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
13606 bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
13608 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
13610 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
13611 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
13612 reg_congestion_buf->addr_lo =
13613 putPaddrLow(phba->cgn_i->phys);
13614 reg_congestion_buf->addr_hi =
13615 putPaddrHigh(phba->cgn_i->phys);
13617 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13618 shdr = (union lpfc_sli4_cfg_shdr *)
13619 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
13620 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13621 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13623 mempool_free(mboxq, phba->mbox_mem_pool);
13624 if (shdr_status || shdr_add_status || rc) {
13625 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13626 "2642 REG_CONGESTION_BUF mailbox "
13627 "failed with status x%x add_status x%x,"
13628 " mbx status x%x reg %d\n",
13629 shdr_status, shdr_add_status, rc, reg);
13636 lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
13638 lpfc_cmf_stop(phba);
13639 return __lpfc_reg_congestion_buf(phba, 0);
13643 lpfc_reg_congestion_buf(struct lpfc_hba *phba)
13645 return __lpfc_reg_congestion_buf(phba, 1);
13649 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
13650 * @phba: Pointer to HBA context object.
13651 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
13653 * This function is called in the SLI4 code path to read the port's
13654 * sli4 capabilities.
13656 * This function may be be called from any context that can block-wait
13657 * for the completion. The expectation is that this routine is called
13658 * typically from probe_one or from the online routine.
13661 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
13664 struct lpfc_mqe *mqe = &mboxq->u.mqe;
13665 struct lpfc_pc_sli4_params *sli4_params;
13668 bool exp_wqcq_pages = true;
13669 struct lpfc_sli4_parameters *mbx_sli4_parameters;
13672 * By default, the driver assumes the SLI4 port requires RPI
13673 * header postings. The SLI4_PARAM response will correct this
13676 phba->sli4_hba.rpi_hdrs_in_use = 1;
13678 /* Read the port's SLI4 Config Parameters */
13679 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
13680 sizeof(struct lpfc_sli4_cfg_mhdr));
13681 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13682 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
13683 length, LPFC_SLI4_MBX_EMBED);
13684 if (!phba->sli4_hba.intr_enable)
13685 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13687 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
13688 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
13692 sli4_params = &phba->sli4_hba.pc_sli4_params;
13693 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
13694 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
13695 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
13696 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
13697 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
13698 mbx_sli4_parameters);
13699 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
13700 mbx_sli4_parameters);
13701 if (bf_get(cfg_phwq, mbx_sli4_parameters))
13702 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
13704 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
13705 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
13706 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
13707 mbx_sli4_parameters);
13708 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
13709 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
13710 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
13711 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
13712 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
13713 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
13714 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
13715 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
13716 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
13717 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
13718 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
13719 mbx_sli4_parameters);
13720 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
13721 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
13722 mbx_sli4_parameters);
13723 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
13724 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
13726 /* Check for Extended Pre-Registered SGL support */
13727 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
13729 /* Check for firmware nvme support */
13730 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
13731 bf_get(cfg_xib, mbx_sli4_parameters));
13734 /* Save this to indicate the Firmware supports NVME */
13735 sli4_params->nvme = 1;
13737 /* Firmware NVME support, check driver FC4 NVME support */
13738 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
13739 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13740 "6133 Disabling NVME support: "
13741 "FC4 type not supported: x%x\n",
13742 phba->cfg_enable_fc4_type);
13746 /* No firmware NVME support, check driver FC4 NVME support */
13747 sli4_params->nvme = 0;
13748 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
13750 "6101 Disabling NVME support: Not "
13751 "supported by firmware (%d %d) x%x\n",
13752 bf_get(cfg_nvme, mbx_sli4_parameters),
13753 bf_get(cfg_xib, mbx_sli4_parameters),
13754 phba->cfg_enable_fc4_type);
13756 phba->nvmet_support = 0;
13757 phba->cfg_nvmet_mrq = 0;
13758 phba->cfg_nvme_seg_cnt = 0;
13760 /* If no FC4 type support, move to just SCSI support */
13761 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
13763 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
13767 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
13768 * accommodate 512K and 1M IOs in a single nvme buf.
13770 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13771 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
13773 /* Enable embedded Payload BDE if support is indicated */
13774 if (bf_get(cfg_pbde, mbx_sli4_parameters))
13775 phba->cfg_enable_pbde = 1;
13777 phba->cfg_enable_pbde = 0;
13780 * To support Suppress Response feature we must satisfy 3 conditions.
13781 * lpfc_suppress_rsp module parameter must be set (default).
13782 * In SLI4-Parameters Descriptor:
13783 * Extended Inline Buffers (XIB) must be supported.
13784 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
13785 * (double negative).
13787 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
13788 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
13789 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
13791 phba->cfg_suppress_rsp = 0;
13793 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
13794 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
13796 /* Make sure that sge_supp_len can be handled by the driver */
13797 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
13798 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
13801 * Check whether the adapter supports an embedded copy of the
13802 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
13803 * to use this option, 128-byte WQEs must be used.
13805 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
13806 phba->fcp_embed_io = 1;
13808 phba->fcp_embed_io = 0;
13810 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13811 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
13812 bf_get(cfg_xib, mbx_sli4_parameters),
13813 phba->cfg_enable_pbde,
13814 phba->fcp_embed_io, sli4_params->nvme,
13815 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
13817 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
13818 LPFC_SLI_INTF_IF_TYPE_2) &&
13819 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
13820 LPFC_SLI_INTF_FAMILY_LNCR_A0))
13821 exp_wqcq_pages = false;
13823 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
13824 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
13826 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
13827 phba->enab_exp_wqcq_pages = 1;
13829 phba->enab_exp_wqcq_pages = 0;
13831 * Check if the SLI port supports MDS Diagnostics
13833 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
13834 phba->mds_diags_support = 1;
13836 phba->mds_diags_support = 0;
13839 * Check if the SLI port supports NSLER
13841 if (bf_get(cfg_nsler, mbx_sli4_parameters))
13850 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
13851 * @pdev: pointer to PCI device
13852 * @pid: pointer to PCI device identifier
13854 * This routine is to be called to attach a device with SLI-3 interface spec
13855 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13856 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13857 * information of the device and driver to see if the driver state that it can
13858 * support this kind of device. If the match is successful, the driver core
13859 * invokes this routine. If this routine determines it can claim the HBA, it
13860 * does all the initialization that it needs to do to handle the HBA properly.
13863 * 0 - driver can claim the device
13864 * negative value - driver can not claim the device
13867 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
13869 struct lpfc_hba *phba;
13870 struct lpfc_vport *vport = NULL;
13871 struct Scsi_Host *shost = NULL;
13873 uint32_t cfg_mode, intr_mode;
13875 /* Allocate memory for HBA structure */
13876 phba = lpfc_hba_alloc(pdev);
13880 /* Perform generic PCI device enabling operation */
13881 error = lpfc_enable_pci_dev(phba);
13883 goto out_free_phba;
13885 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
13886 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
13888 goto out_disable_pci_dev;
13890 /* Set up SLI-3 specific device PCI memory space */
13891 error = lpfc_sli_pci_mem_setup(phba);
13893 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13894 "1402 Failed to set up pci memory space.\n");
13895 goto out_disable_pci_dev;
13898 /* Set up SLI-3 specific device driver resources */
13899 error = lpfc_sli_driver_resource_setup(phba);
13901 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13902 "1404 Failed to set up driver resource.\n");
13903 goto out_unset_pci_mem_s3;
13906 /* Initialize and populate the iocb list per host */
13908 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
13910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13911 "1405 Failed to initialize iocb list.\n");
13912 goto out_unset_driver_resource_s3;
13915 /* Set up common device driver resources */
13916 error = lpfc_setup_driver_resource_phase2(phba);
13918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13919 "1406 Failed to set up driver resource.\n");
13920 goto out_free_iocb_list;
13923 /* Get the default values for Model Name and Description */
13924 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
13926 /* Create SCSI host to the physical port */
13927 error = lpfc_create_shost(phba);
13929 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13930 "1407 Failed to create scsi host.\n");
13931 goto out_unset_driver_resource;
13934 /* Configure sysfs attributes */
13935 vport = phba->pport;
13936 error = lpfc_alloc_sysfs_attr(vport);
13938 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13939 "1476 Failed to allocate sysfs attr\n");
13940 goto out_destroy_shost;
13943 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
13944 /* Now, trying to enable interrupt and bring up the device */
13945 cfg_mode = phba->cfg_use_msi;
13947 /* Put device to a known state before enabling interrupt */
13948 lpfc_stop_port(phba);
13949 /* Configure and enable interrupt */
13950 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
13951 if (intr_mode == LPFC_INTR_ERROR) {
13952 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13953 "0431 Failed to enable interrupt.\n");
13955 goto out_free_sysfs_attr;
13957 /* SLI-3 HBA setup */
13958 if (lpfc_sli_hba_setup(phba)) {
13959 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13960 "1477 Failed to set up hba\n");
13962 goto out_remove_device;
13965 /* Wait 50ms for the interrupts of previous mailbox commands */
13967 /* Check active interrupts on message signaled interrupts */
13968 if (intr_mode == 0 ||
13969 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
13970 /* Log the current active interrupt mode */
13971 phba->intr_mode = intr_mode;
13972 lpfc_log_intr_mode(phba, intr_mode);
13975 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13976 "0447 Configure interrupt mode (%d) "
13977 "failed active interrupt test.\n",
13979 /* Disable the current interrupt mode */
13980 lpfc_sli_disable_intr(phba);
13981 /* Try next level of interrupt mode */
13982 cfg_mode = --intr_mode;
13986 /* Perform post initialization setup */
13987 lpfc_post_init_setup(phba);
13989 /* Check if there are static vports to be created. */
13990 lpfc_create_static_vport(phba);
13995 lpfc_unset_hba(phba);
13996 out_free_sysfs_attr:
13997 lpfc_free_sysfs_attr(vport);
13999 lpfc_destroy_shost(phba);
14000 out_unset_driver_resource:
14001 lpfc_unset_driver_resource_phase2(phba);
14002 out_free_iocb_list:
14003 lpfc_free_iocb_list(phba);
14004 out_unset_driver_resource_s3:
14005 lpfc_sli_driver_resource_unset(phba);
14006 out_unset_pci_mem_s3:
14007 lpfc_sli_pci_mem_unset(phba);
14008 out_disable_pci_dev:
14009 lpfc_disable_pci_dev(phba);
14011 scsi_host_put(shost);
14013 lpfc_hba_free(phba);
14018 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
14019 * @pdev: pointer to PCI device
14021 * This routine is to be called to disattach a device with SLI-3 interface
14022 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
14023 * removed from PCI bus, it performs all the necessary cleanup for the HBA
14024 * device to be removed from the PCI subsystem properly.
14027 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
14029 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14030 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14031 struct lpfc_vport **vports;
14032 struct lpfc_hba *phba = vport->phba;
14035 spin_lock_irq(&phba->hbalock);
14036 vport->load_flag |= FC_UNLOADING;
14037 spin_unlock_irq(&phba->hbalock);
14039 lpfc_free_sysfs_attr(vport);
14041 /* Release all the vports against this physical port */
14042 vports = lpfc_create_vport_work_array(phba);
14043 if (vports != NULL)
14044 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14045 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14047 fc_vport_terminate(vports[i]->fc_vport);
14049 lpfc_destroy_vport_work_array(phba, vports);
14051 /* Remove FC host with the physical port */
14052 fc_remove_host(shost);
14053 scsi_remove_host(shost);
14055 /* Clean up all nodes, mailboxes and IOs. */
14056 lpfc_cleanup(vport);
14059 * Bring down the SLI Layer. This step disable all interrupts,
14060 * clears the rings, discards all mailbox commands, and resets
14064 /* HBA interrupt will be disabled after this call */
14065 lpfc_sli_hba_down(phba);
14066 /* Stop kthread signal shall trigger work_done one more time */
14067 kthread_stop(phba->worker_thread);
14068 /* Final cleanup of txcmplq and reset the HBA */
14069 lpfc_sli_brdrestart(phba);
14071 kfree(phba->vpi_bmask);
14072 kfree(phba->vpi_ids);
14074 lpfc_stop_hba_timers(phba);
14075 spin_lock_irq(&phba->port_list_lock);
14076 list_del_init(&vport->listentry);
14077 spin_unlock_irq(&phba->port_list_lock);
14079 lpfc_debugfs_terminate(vport);
14081 /* Disable SR-IOV if enabled */
14082 if (phba->cfg_sriov_nr_virtfn)
14083 pci_disable_sriov(pdev);
14085 /* Disable interrupt */
14086 lpfc_sli_disable_intr(phba);
14088 scsi_host_put(shost);
14091 * Call scsi_free before mem_free since scsi bufs are released to their
14092 * corresponding pools here.
14094 lpfc_scsi_free(phba);
14095 lpfc_free_iocb_list(phba);
14097 lpfc_mem_free_all(phba);
14099 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
14100 phba->hbqslimp.virt, phba->hbqslimp.phys);
14102 /* Free resources associated with SLI2 interface */
14103 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
14104 phba->slim2p.virt, phba->slim2p.phys);
14106 /* unmap adapter SLIM and Control Registers */
14107 iounmap(phba->ctrl_regs_memmap_p);
14108 iounmap(phba->slim_memmap_p);
14110 lpfc_hba_free(phba);
14112 pci_release_mem_regions(pdev);
14113 pci_disable_device(pdev);
14117 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
14118 * @dev_d: pointer to device
14120 * This routine is to be called from the kernel's PCI subsystem to support
14121 * system Power Management (PM) to device with SLI-3 interface spec. When
14122 * PM invokes this method, it quiesces the device by stopping the driver's
14123 * worker thread for the device, turning off device's interrupt and DMA,
14124 * and bring the device offline. Note that as the driver implements the
14125 * minimum PM requirements to a power-aware driver's PM support for the
14126 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14127 * to the suspend() method call will be treated as SUSPEND and the driver will
14128 * fully reinitialize its device during resume() method call, the driver will
14129 * set device to PCI_D3hot state in PCI config space instead of setting it
14130 * according to the @msg provided by the PM.
14133 * 0 - driver suspended the device
14136 static int __maybe_unused
14137 lpfc_pci_suspend_one_s3(struct device *dev_d)
14139 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14140 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14142 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14143 "0473 PCI device Power Management suspend.\n");
14145 /* Bring down the device */
14146 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14147 lpfc_offline(phba);
14148 kthread_stop(phba->worker_thread);
14150 /* Disable interrupt from device */
14151 lpfc_sli_disable_intr(phba);
14157 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
14158 * @dev_d: pointer to device
14160 * This routine is to be called from the kernel's PCI subsystem to support
14161 * system Power Management (PM) to device with SLI-3 interface spec. When PM
14162 * invokes this method, it restores the device's PCI config space state and
14163 * fully reinitializes the device and brings it online. Note that as the
14164 * driver implements the minimum PM requirements to a power-aware driver's
14165 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
14166 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
14167 * driver will fully reinitialize its device during resume() method call,
14168 * the device will be set to PCI_D0 directly in PCI config space before
14169 * restoring the state.
14172 * 0 - driver suspended the device
14175 static int __maybe_unused
14176 lpfc_pci_resume_one_s3(struct device *dev_d)
14178 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14179 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14180 uint32_t intr_mode;
14183 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14184 "0452 PCI device Power Management resume.\n");
14186 /* Startup the kernel thread for this host adapter. */
14187 phba->worker_thread = kthread_run(lpfc_do_work, phba,
14188 "lpfc_worker_%d", phba->brd_no);
14189 if (IS_ERR(phba->worker_thread)) {
14190 error = PTR_ERR(phba->worker_thread);
14191 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14192 "0434 PM resume failed to start worker "
14193 "thread: error=x%x.\n", error);
14197 /* Init cpu_map array */
14198 lpfc_cpu_map_array_init(phba);
14199 /* Init hba_eq_hdl array */
14200 lpfc_hba_eq_hdl_array_init(phba);
14201 /* Configure and enable interrupt */
14202 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14203 if (intr_mode == LPFC_INTR_ERROR) {
14204 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14205 "0430 PM resume Failed to enable interrupt\n");
14208 phba->intr_mode = intr_mode;
14210 /* Restart HBA and bring it online */
14211 lpfc_sli_brdrestart(phba);
14214 /* Log the current active interrupt mode */
14215 lpfc_log_intr_mode(phba, phba->intr_mode);
14221 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
14222 * @phba: pointer to lpfc hba data structure.
14224 * This routine is called to prepare the SLI3 device for PCI slot recover. It
14225 * aborts all the outstanding SCSI I/Os to the pci device.
14228 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
14230 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14231 "2723 PCI channel I/O abort preparing for recovery\n");
14234 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
14235 * and let the SCSI mid-layer to retry them to recover.
14237 lpfc_sli_abort_fcp_rings(phba);
14241 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
14242 * @phba: pointer to lpfc hba data structure.
14244 * This routine is called to prepare the SLI3 device for PCI slot reset. It
14245 * disables the device interrupt and pci device, and aborts the internal FCP
14249 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
14251 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14252 "2710 PCI channel disable preparing for reset\n");
14254 /* Block any management I/Os to the device */
14255 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
14257 /* Block all SCSI devices' I/Os on the host */
14258 lpfc_scsi_dev_block(phba);
14260 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
14261 lpfc_sli_flush_io_rings(phba);
14263 /* stop all timers */
14264 lpfc_stop_hba_timers(phba);
14266 /* Disable interrupt and pci device */
14267 lpfc_sli_disable_intr(phba);
14268 pci_disable_device(phba->pcidev);
14272 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
14273 * @phba: pointer to lpfc hba data structure.
14275 * This routine is called to prepare the SLI3 device for PCI slot permanently
14276 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
14280 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14282 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14283 "2711 PCI channel permanent disable for failure\n");
14284 /* Block all SCSI devices' I/Os on the host */
14285 lpfc_scsi_dev_block(phba);
14286 lpfc_sli4_prep_dev_for_reset(phba);
14288 /* stop all timers */
14289 lpfc_stop_hba_timers(phba);
14291 /* Clean up all driver's outstanding SCSI I/Os */
14292 lpfc_sli_flush_io_rings(phba);
14296 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
14297 * @pdev: pointer to PCI device.
14298 * @state: the current PCI connection state.
14300 * This routine is called from the PCI subsystem for I/O error handling to
14301 * device with SLI-3 interface spec. This function is called by the PCI
14302 * subsystem after a PCI bus error affecting this device has been detected.
14303 * When this function is invoked, it will need to stop all the I/Os and
14304 * interrupt(s) to the device. Once that is done, it will return
14305 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
14309 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
14310 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
14311 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14313 static pci_ers_result_t
14314 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
14316 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14317 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14320 case pci_channel_io_normal:
14321 /* Non-fatal error, prepare for recovery */
14322 lpfc_sli_prep_dev_for_recover(phba);
14323 return PCI_ERS_RESULT_CAN_RECOVER;
14324 case pci_channel_io_frozen:
14325 /* Fatal error, prepare for slot reset */
14326 lpfc_sli_prep_dev_for_reset(phba);
14327 return PCI_ERS_RESULT_NEED_RESET;
14328 case pci_channel_io_perm_failure:
14329 /* Permanent failure, prepare for device down */
14330 lpfc_sli_prep_dev_for_perm_failure(phba);
14331 return PCI_ERS_RESULT_DISCONNECT;
14333 /* Unknown state, prepare and request slot reset */
14334 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14335 "0472 Unknown PCI error state: x%x\n", state);
14336 lpfc_sli_prep_dev_for_reset(phba);
14337 return PCI_ERS_RESULT_NEED_RESET;
14342 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
14343 * @pdev: pointer to PCI device.
14345 * This routine is called from the PCI subsystem for error handling to
14346 * device with SLI-3 interface spec. This is called after PCI bus has been
14347 * reset to restart the PCI card from scratch, as if from a cold-boot.
14348 * During the PCI subsystem error recovery, after driver returns
14349 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
14350 * recovery and then call this routine before calling the .resume method
14351 * to recover the device. This function will initialize the HBA device,
14352 * enable the interrupt, but it will just put the HBA to offline state
14353 * without passing any I/O traffic.
14356 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
14357 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14359 static pci_ers_result_t
14360 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
14362 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14363 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14364 struct lpfc_sli *psli = &phba->sli;
14365 uint32_t intr_mode;
14367 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
14368 if (pci_enable_device_mem(pdev)) {
14369 printk(KERN_ERR "lpfc: Cannot re-enable "
14370 "PCI device after reset.\n");
14371 return PCI_ERS_RESULT_DISCONNECT;
14374 pci_restore_state(pdev);
14377 * As the new kernel behavior of pci_restore_state() API call clears
14378 * device saved_state flag, need to save the restored state again.
14380 pci_save_state(pdev);
14382 if (pdev->is_busmaster)
14383 pci_set_master(pdev);
14385 spin_lock_irq(&phba->hbalock);
14386 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
14387 spin_unlock_irq(&phba->hbalock);
14389 /* Configure and enable interrupt */
14390 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14391 if (intr_mode == LPFC_INTR_ERROR) {
14392 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14393 "0427 Cannot re-enable interrupt after "
14395 return PCI_ERS_RESULT_DISCONNECT;
14397 phba->intr_mode = intr_mode;
14399 /* Take device offline, it will perform cleanup */
14400 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14401 lpfc_offline(phba);
14402 lpfc_sli_brdrestart(phba);
14404 /* Log the current active interrupt mode */
14405 lpfc_log_intr_mode(phba, phba->intr_mode);
14407 return PCI_ERS_RESULT_RECOVERED;
14411 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
14412 * @pdev: pointer to PCI device
14414 * This routine is called from the PCI subsystem for error handling to device
14415 * with SLI-3 interface spec. It is called when kernel error recovery tells
14416 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
14417 * error recovery. After this call, traffic can start to flow from this device
14421 lpfc_io_resume_s3(struct pci_dev *pdev)
14423 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14424 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14426 /* Bring device online, it will be no-op for non-fatal error resume */
14431 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
14432 * @phba: pointer to lpfc hba data structure.
14434 * returns the number of ELS/CT IOCBs to reserve
14437 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
14439 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
14441 if (phba->sli_rev == LPFC_SLI_REV4) {
14442 if (max_xri <= 100)
14444 else if (max_xri <= 256)
14446 else if (max_xri <= 512)
14448 else if (max_xri <= 1024)
14450 else if (max_xri <= 1536)
14452 else if (max_xri <= 2048)
14461 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
14462 * @phba: pointer to lpfc hba data structure.
14464 * returns the number of ELS/CT + NVMET IOCBs to reserve
14467 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
14469 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
14471 if (phba->nvmet_support)
14472 max_xri += LPFC_NVMET_BUF_POST;
14478 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
14479 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
14480 const struct firmware *fw)
14485 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
14486 /* Three cases: (1) FW was not supported on the detected adapter.
14487 * (2) FW update has been locked out administratively.
14488 * (3) Some other error during FW update.
14489 * In each case, an unmaskable message is written to the console
14490 * for admin diagnosis.
14492 if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
14493 (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
14494 magic_number != MAGIC_NUMBER_G6) ||
14495 (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
14496 magic_number != MAGIC_NUMBER_G7) ||
14497 (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
14498 magic_number != MAGIC_NUMBER_G7P)) {
14499 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14500 "3030 This firmware version is not supported on"
14501 " this HBA model. Device:%x Magic:%x Type:%x "
14502 "ID:%x Size %d %zd\n",
14503 phba->pcidev->device, magic_number, ftype, fid,
14506 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
14507 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14508 "3021 Firmware downloads have been prohibited "
14509 "by a system configuration setting on "
14510 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14512 phba->pcidev->device, magic_number, ftype, fid,
14516 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14517 "3022 FW Download failed. Add Status x%x "
14518 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14520 offset, phba->pcidev->device, magic_number,
14521 ftype, fid, fsize, fw->size);
14528 * lpfc_write_firmware - attempt to write a firmware image to the port
14529 * @fw: pointer to firmware image returned from request_firmware.
14530 * @context: pointer to firmware image returned from request_firmware.
14534 lpfc_write_firmware(const struct firmware *fw, void *context)
14536 struct lpfc_hba *phba = (struct lpfc_hba *)context;
14537 char fwrev[FW_REV_STR_SIZE];
14538 struct lpfc_grp_hdr *image;
14539 struct list_head dma_buffer_list;
14541 struct lpfc_dmabuf *dmabuf, *next;
14542 uint32_t offset = 0, temp_offset = 0;
14543 uint32_t magic_number, ftype, fid, fsize;
14545 /* It can be null in no-wait mode, sanity check */
14550 image = (struct lpfc_grp_hdr *)fw->data;
14552 magic_number = be32_to_cpu(image->magic_number);
14553 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
14554 fid = bf_get_be32(lpfc_grp_hdr_id, image);
14555 fsize = be32_to_cpu(image->size);
14557 INIT_LIST_HEAD(&dma_buffer_list);
14558 lpfc_decode_firmware_rev(phba, fwrev, 1);
14559 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
14560 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14561 "3023 Updating Firmware, Current Version:%s "
14562 "New Version:%s\n",
14563 fwrev, image->revision);
14564 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
14565 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
14571 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14575 if (!dmabuf->virt) {
14580 list_add_tail(&dmabuf->list, &dma_buffer_list);
14582 while (offset < fw->size) {
14583 temp_offset = offset;
14584 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
14585 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
14586 memcpy(dmabuf->virt,
14587 fw->data + temp_offset,
14588 fw->size - temp_offset);
14589 temp_offset = fw->size;
14592 memcpy(dmabuf->virt, fw->data + temp_offset,
14594 temp_offset += SLI4_PAGE_SIZE;
14596 rc = lpfc_wr_object(phba, &dma_buffer_list,
14597 (fw->size - offset), &offset);
14599 rc = lpfc_log_write_firmware_error(phba, offset,
14610 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14611 "3029 Skipped Firmware update, Current "
14612 "Version:%s New Version:%s\n",
14613 fwrev, image->revision);
14616 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
14617 list_del(&dmabuf->list);
14618 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
14619 dmabuf->virt, dmabuf->phys);
14622 release_firmware(fw);
14625 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14626 "3062 Firmware update error, status %d.\n", rc);
14628 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14629 "3024 Firmware update success: size %d.\n", rc);
14633 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
14634 * @phba: pointer to lpfc hba data structure.
14635 * @fw_upgrade: which firmware to update.
14637 * This routine is called to perform Linux generic firmware upgrade on device
14638 * that supports such feature.
14641 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
14643 uint8_t file_name[ELX_MODEL_NAME_SIZE];
14645 const struct firmware *fw;
14647 /* Only supported on SLI4 interface type 2 for now */
14648 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
14649 LPFC_SLI_INTF_IF_TYPE_2)
14652 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
14654 if (fw_upgrade == INT_FW_UPGRADE) {
14655 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
14656 file_name, &phba->pcidev->dev,
14657 GFP_KERNEL, (void *)phba,
14658 lpfc_write_firmware);
14659 } else if (fw_upgrade == RUN_FW_UPGRADE) {
14660 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
14662 lpfc_write_firmware(fw, (void *)phba);
14671 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
14672 * @pdev: pointer to PCI device
14673 * @pid: pointer to PCI device identifier
14675 * This routine is called from the kernel's PCI subsystem to device with
14676 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14677 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
14678 * information of the device and driver to see if the driver state that it
14679 * can support this kind of device. If the match is successful, the driver
14680 * core invokes this routine. If this routine determines it can claim the HBA,
14681 * it does all the initialization that it needs to do to handle the HBA
14685 * 0 - driver can claim the device
14686 * negative value - driver can not claim the device
14689 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
14691 struct lpfc_hba *phba;
14692 struct lpfc_vport *vport = NULL;
14693 struct Scsi_Host *shost = NULL;
14695 uint32_t cfg_mode, intr_mode;
14697 /* Allocate memory for HBA structure */
14698 phba = lpfc_hba_alloc(pdev);
14702 INIT_LIST_HEAD(&phba->poll_list);
14704 /* Perform generic PCI device enabling operation */
14705 error = lpfc_enable_pci_dev(phba);
14707 goto out_free_phba;
14709 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
14710 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
14712 goto out_disable_pci_dev;
14714 /* Set up SLI-4 specific device PCI memory space */
14715 error = lpfc_sli4_pci_mem_setup(phba);
14717 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14718 "1410 Failed to set up pci memory space.\n");
14719 goto out_disable_pci_dev;
14722 /* Set up SLI-4 Specific device driver resources */
14723 error = lpfc_sli4_driver_resource_setup(phba);
14725 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14726 "1412 Failed to set up driver resource.\n");
14727 goto out_unset_pci_mem_s4;
14730 INIT_LIST_HEAD(&phba->active_rrq_list);
14731 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
14733 /* Set up common device driver resources */
14734 error = lpfc_setup_driver_resource_phase2(phba);
14736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14737 "1414 Failed to set up driver resource.\n");
14738 goto out_unset_driver_resource_s4;
14741 /* Get the default values for Model Name and Description */
14742 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14744 /* Now, trying to enable interrupt and bring up the device */
14745 cfg_mode = phba->cfg_use_msi;
14747 /* Put device to a known state before enabling interrupt */
14748 phba->pport = NULL;
14749 lpfc_stop_port(phba);
14751 /* Init cpu_map array */
14752 lpfc_cpu_map_array_init(phba);
14754 /* Init hba_eq_hdl array */
14755 lpfc_hba_eq_hdl_array_init(phba);
14757 /* Configure and enable interrupt */
14758 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
14759 if (intr_mode == LPFC_INTR_ERROR) {
14760 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14761 "0426 Failed to enable interrupt.\n");
14763 goto out_unset_driver_resource;
14765 /* Default to single EQ for non-MSI-X */
14766 if (phba->intr_type != MSIX) {
14767 phba->cfg_irq_chann = 1;
14768 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14769 if (phba->nvmet_support)
14770 phba->cfg_nvmet_mrq = 1;
14773 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
14775 /* Create SCSI host to the physical port */
14776 error = lpfc_create_shost(phba);
14778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14779 "1415 Failed to create scsi host.\n");
14780 goto out_disable_intr;
14782 vport = phba->pport;
14783 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14785 /* Configure sysfs attributes */
14786 error = lpfc_alloc_sysfs_attr(vport);
14788 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14789 "1416 Failed to allocate sysfs attr\n");
14790 goto out_destroy_shost;
14793 /* Set up SLI-4 HBA */
14794 if (lpfc_sli4_hba_setup(phba)) {
14795 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14796 "1421 Failed to set up hba\n");
14798 goto out_free_sysfs_attr;
14801 /* Log the current active interrupt mode */
14802 phba->intr_mode = intr_mode;
14803 lpfc_log_intr_mode(phba, intr_mode);
14805 /* Perform post initialization setup */
14806 lpfc_post_init_setup(phba);
14808 /* NVME support in FW earlier in the driver load corrects the
14809 * FC4 type making a check for nvme_support unnecessary.
14811 if (phba->nvmet_support == 0) {
14812 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14813 /* Create NVME binding with nvme_fc_transport. This
14814 * ensures the vport is initialized. If the localport
14815 * create fails, it should not unload the driver to
14816 * support field issues.
14818 error = lpfc_nvme_create_localport(vport);
14820 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14821 "6004 NVME registration "
14822 "failed, error x%x\n",
14828 /* check for firmware upgrade or downgrade */
14829 if (phba->cfg_request_firmware_upgrade)
14830 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
14832 /* Check if there are static vports to be created. */
14833 lpfc_create_static_vport(phba);
14835 /* Enable RAS FW log support */
14836 lpfc_sli4_ras_setup(phba);
14838 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14839 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
14843 out_free_sysfs_attr:
14844 lpfc_free_sysfs_attr(vport);
14846 lpfc_destroy_shost(phba);
14848 lpfc_sli4_disable_intr(phba);
14849 out_unset_driver_resource:
14850 lpfc_unset_driver_resource_phase2(phba);
14851 out_unset_driver_resource_s4:
14852 lpfc_sli4_driver_resource_unset(phba);
14853 out_unset_pci_mem_s4:
14854 lpfc_sli4_pci_mem_unset(phba);
14855 out_disable_pci_dev:
14856 lpfc_disable_pci_dev(phba);
14858 scsi_host_put(shost);
14860 lpfc_hba_free(phba);
14865 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
14866 * @pdev: pointer to PCI device
14868 * This routine is called from the kernel's PCI subsystem to device with
14869 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14870 * removed from PCI bus, it performs all the necessary cleanup for the HBA
14871 * device to be removed from the PCI subsystem properly.
14874 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
14876 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14877 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14878 struct lpfc_vport **vports;
14879 struct lpfc_hba *phba = vport->phba;
14882 /* Mark the device unloading flag */
14883 spin_lock_irq(&phba->hbalock);
14884 vport->load_flag |= FC_UNLOADING;
14885 spin_unlock_irq(&phba->hbalock);
14887 lpfc_unreg_congestion_buf(phba);
14889 lpfc_free_sysfs_attr(vport);
14891 /* Release all the vports against this physical port */
14892 vports = lpfc_create_vport_work_array(phba);
14893 if (vports != NULL)
14894 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14895 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14897 fc_vport_terminate(vports[i]->fc_vport);
14899 lpfc_destroy_vport_work_array(phba, vports);
14901 /* Remove FC host with the physical port */
14902 fc_remove_host(shost);
14903 scsi_remove_host(shost);
14905 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
14906 * localports are destroyed after to cleanup all transport memory.
14908 lpfc_cleanup(vport);
14909 lpfc_nvmet_destroy_targetport(phba);
14910 lpfc_nvme_destroy_localport(vport);
14912 /* De-allocate multi-XRI pools */
14913 if (phba->cfg_xri_rebalancing)
14914 lpfc_destroy_multixri_pools(phba);
14917 * Bring down the SLI Layer. This step disables all interrupts,
14918 * clears the rings, discards all mailbox commands, and resets
14919 * the HBA FCoE function.
14921 lpfc_debugfs_terminate(vport);
14923 lpfc_stop_hba_timers(phba);
14924 spin_lock_irq(&phba->port_list_lock);
14925 list_del_init(&vport->listentry);
14926 spin_unlock_irq(&phba->port_list_lock);
14928 /* Perform scsi free before driver resource_unset since scsi
14929 * buffers are released to their corresponding pools here.
14931 lpfc_io_free(phba);
14932 lpfc_free_iocb_list(phba);
14933 lpfc_sli4_hba_unset(phba);
14935 lpfc_unset_driver_resource_phase2(phba);
14936 lpfc_sli4_driver_resource_unset(phba);
14938 /* Unmap adapter Control and Doorbell registers */
14939 lpfc_sli4_pci_mem_unset(phba);
14941 /* Release PCI resources and disable device's PCI function */
14942 scsi_host_put(shost);
14943 lpfc_disable_pci_dev(phba);
14945 /* Finally, free the driver's device data structure */
14946 lpfc_hba_free(phba);
14952 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
14953 * @dev_d: pointer to device
14955 * This routine is called from the kernel's PCI subsystem to support system
14956 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
14957 * this method, it quiesces the device by stopping the driver's worker
14958 * thread for the device, turning off device's interrupt and DMA, and bring
14959 * the device offline. Note that as the driver implements the minimum PM
14960 * requirements to a power-aware driver's PM support for suspend/resume -- all
14961 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
14962 * method call will be treated as SUSPEND and the driver will fully
14963 * reinitialize its device during resume() method call, the driver will set
14964 * device to PCI_D3hot state in PCI config space instead of setting it
14965 * according to the @msg provided by the PM.
14968 * 0 - driver suspended the device
14971 static int __maybe_unused
14972 lpfc_pci_suspend_one_s4(struct device *dev_d)
14974 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14975 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14977 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14978 "2843 PCI device Power Management suspend.\n");
14980 /* Bring down the device */
14981 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14982 lpfc_offline(phba);
14983 kthread_stop(phba->worker_thread);
14985 /* Disable interrupt from device */
14986 lpfc_sli4_disable_intr(phba);
14987 lpfc_sli4_queue_destroy(phba);
14993 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
14994 * @dev_d: pointer to device
14996 * This routine is called from the kernel's PCI subsystem to support system
14997 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
14998 * this method, it restores the device's PCI config space state and fully
14999 * reinitializes the device and brings it online. Note that as the driver
15000 * implements the minimum PM requirements to a power-aware driver's PM for
15001 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
15002 * to the suspend() method call will be treated as SUSPEND and the driver
15003 * will fully reinitialize its device during resume() method call, the device
15004 * will be set to PCI_D0 directly in PCI config space before restoring the
15008 * 0 - driver suspended the device
15011 static int __maybe_unused
15012 lpfc_pci_resume_one_s4(struct device *dev_d)
15014 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15015 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15016 uint32_t intr_mode;
15019 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15020 "0292 PCI device Power Management resume.\n");
15022 /* Startup the kernel thread for this host adapter. */
15023 phba->worker_thread = kthread_run(lpfc_do_work, phba,
15024 "lpfc_worker_%d", phba->brd_no);
15025 if (IS_ERR(phba->worker_thread)) {
15026 error = PTR_ERR(phba->worker_thread);
15027 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15028 "0293 PM resume failed to start worker "
15029 "thread: error=x%x.\n", error);
15033 /* Configure and enable interrupt */
15034 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15035 if (intr_mode == LPFC_INTR_ERROR) {
15036 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15037 "0294 PM resume Failed to enable interrupt\n");
15040 phba->intr_mode = intr_mode;
15042 /* Restart HBA and bring it online */
15043 lpfc_sli_brdrestart(phba);
15046 /* Log the current active interrupt mode */
15047 lpfc_log_intr_mode(phba, phba->intr_mode);
15053 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
15054 * @phba: pointer to lpfc hba data structure.
15056 * This routine is called to prepare the SLI4 device for PCI slot recover. It
15057 * aborts all the outstanding SCSI I/Os to the pci device.
15060 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
15062 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15063 "2828 PCI channel I/O abort preparing for recovery\n");
15065 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
15066 * and let the SCSI mid-layer to retry them to recover.
15068 lpfc_sli_abort_fcp_rings(phba);
15072 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
15073 * @phba: pointer to lpfc hba data structure.
15075 * This routine is called to prepare the SLI4 device for PCI slot reset. It
15076 * disables the device interrupt and pci device, and aborts the internal FCP
15080 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
15082 int offline = pci_channel_offline(phba->pcidev);
15084 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15085 "2826 PCI channel disable preparing for reset offline"
15088 /* Block any management I/Os to the device */
15089 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
15092 /* HBA_PCI_ERR was set in io_error_detect */
15093 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
15094 /* Flush all driver's outstanding I/Os as we are to reset */
15095 lpfc_sli_flush_io_rings(phba);
15096 lpfc_offline(phba);
15098 /* stop all timers */
15099 lpfc_stop_hba_timers(phba);
15101 lpfc_sli4_queue_destroy(phba);
15102 /* Disable interrupt and pci device */
15103 lpfc_sli4_disable_intr(phba);
15104 pci_disable_device(phba->pcidev);
15108 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
15109 * @phba: pointer to lpfc hba data structure.
15111 * This routine is called to prepare the SLI4 device for PCI slot permanently
15112 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
15116 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
15118 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15119 "2827 PCI channel permanent disable for failure\n");
15121 /* Block all SCSI devices' I/Os on the host */
15122 lpfc_scsi_dev_block(phba);
15124 /* stop all timers */
15125 lpfc_stop_hba_timers(phba);
15127 /* Clean up all driver's outstanding I/Os */
15128 lpfc_sli_flush_io_rings(phba);
15132 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
15133 * @pdev: pointer to PCI device.
15134 * @state: the current PCI connection state.
15136 * This routine is called from the PCI subsystem for error handling to device
15137 * with SLI-4 interface spec. This function is called by the PCI subsystem
15138 * after a PCI bus error affecting this device has been detected. When this
15139 * function is invoked, it will need to stop all the I/Os and interrupt(s)
15140 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
15141 * for the PCI subsystem to perform proper recovery as desired.
15144 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15145 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15147 static pci_ers_result_t
15148 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
15150 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15151 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15155 case pci_channel_io_normal:
15156 /* Non-fatal error, prepare for recovery */
15157 lpfc_sli4_prep_dev_for_recover(phba);
15158 return PCI_ERS_RESULT_CAN_RECOVER;
15159 case pci_channel_io_frozen:
15160 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15161 /* Fatal error, prepare for slot reset */
15163 lpfc_sli4_prep_dev_for_reset(phba);
15165 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15166 "2832 Already handling PCI error "
15167 "state: x%x\n", state);
15168 return PCI_ERS_RESULT_NEED_RESET;
15169 case pci_channel_io_perm_failure:
15170 set_bit(HBA_PCI_ERR, &phba->bit_flags);
15171 /* Permanent failure, prepare for device down */
15172 lpfc_sli4_prep_dev_for_perm_failure(phba);
15173 return PCI_ERS_RESULT_DISCONNECT;
15175 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15177 lpfc_sli4_prep_dev_for_reset(phba);
15178 /* Unknown state, prepare and request slot reset */
15179 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15180 "2825 Unknown PCI error state: x%x\n", state);
15181 lpfc_sli4_prep_dev_for_reset(phba);
15182 return PCI_ERS_RESULT_NEED_RESET;
15187 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
15188 * @pdev: pointer to PCI device.
15190 * This routine is called from the PCI subsystem for error handling to device
15191 * with SLI-4 interface spec. It is called after PCI bus has been reset to
15192 * restart the PCI card from scratch, as if from a cold-boot. During the
15193 * PCI subsystem error recovery, after the driver returns
15194 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
15195 * recovery and then call this routine before calling the .resume method to
15196 * recover the device. This function will initialize the HBA device, enable
15197 * the interrupt, but it will just put the HBA to offline state without
15198 * passing any I/O traffic.
15201 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15202 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15204 static pci_ers_result_t
15205 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
15207 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15208 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15209 struct lpfc_sli *psli = &phba->sli;
15210 uint32_t intr_mode;
15213 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
15214 if (pci_enable_device_mem(pdev)) {
15215 printk(KERN_ERR "lpfc: Cannot re-enable "
15216 "PCI device after reset.\n");
15217 return PCI_ERS_RESULT_DISCONNECT;
15220 pci_restore_state(pdev);
15222 hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
15224 dev_info(&pdev->dev,
15225 "hba_pci_err was not set, recovering slot reset.\n");
15227 * As the new kernel behavior of pci_restore_state() API call clears
15228 * device saved_state flag, need to save the restored state again.
15230 pci_save_state(pdev);
15232 if (pdev->is_busmaster)
15233 pci_set_master(pdev);
15235 spin_lock_irq(&phba->hbalock);
15236 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
15237 spin_unlock_irq(&phba->hbalock);
15239 /* Init cpu_map array */
15240 lpfc_cpu_map_array_init(phba);
15241 /* Configure and enable interrupt */
15242 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15243 if (intr_mode == LPFC_INTR_ERROR) {
15244 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15245 "2824 Cannot re-enable interrupt after "
15247 return PCI_ERS_RESULT_DISCONNECT;
15249 phba->intr_mode = intr_mode;
15250 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
15252 /* Log the current active interrupt mode */
15253 lpfc_log_intr_mode(phba, phba->intr_mode);
15255 return PCI_ERS_RESULT_RECOVERED;
15259 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
15260 * @pdev: pointer to PCI device
15262 * This routine is called from the PCI subsystem for error handling to device
15263 * with SLI-4 interface spec. It is called when kernel error recovery tells
15264 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
15265 * error recovery. After this call, traffic can start to flow from this device
15269 lpfc_io_resume_s4(struct pci_dev *pdev)
15271 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15272 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15275 * In case of slot reset, as function reset is performed through
15276 * mailbox command which needs DMA to be enabled, this operation
15277 * has to be moved to the io resume phase. Taking device offline
15278 * will perform the necessary cleanup.
15280 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
15281 /* Perform device reset */
15282 lpfc_sli_brdrestart(phba);
15283 /* Bring the device back online */
15289 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
15290 * @pdev: pointer to PCI device
15291 * @pid: pointer to PCI device identifier
15293 * This routine is to be registered to the kernel's PCI subsystem. When an
15294 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
15295 * at PCI device-specific information of the device and driver to see if the
15296 * driver state that it can support this kind of device. If the match is
15297 * successful, the driver core invokes this routine. This routine dispatches
15298 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
15299 * do all the initialization that it needs to do to handle the HBA device
15303 * 0 - driver can claim the device
15304 * negative value - driver can not claim the device
15307 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
15310 struct lpfc_sli_intf intf;
15312 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
15315 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
15316 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
15317 rc = lpfc_pci_probe_one_s4(pdev, pid);
15319 rc = lpfc_pci_probe_one_s3(pdev, pid);
15325 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
15326 * @pdev: pointer to PCI device
15328 * This routine is to be registered to the kernel's PCI subsystem. When an
15329 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
15330 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
15331 * remove routine, which will perform all the necessary cleanup for the
15332 * device to be removed from the PCI subsystem properly.
15335 lpfc_pci_remove_one(struct pci_dev *pdev)
15337 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15338 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15340 switch (phba->pci_dev_grp) {
15341 case LPFC_PCI_DEV_LP:
15342 lpfc_pci_remove_one_s3(pdev);
15344 case LPFC_PCI_DEV_OC:
15345 lpfc_pci_remove_one_s4(pdev);
15348 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15349 "1424 Invalid PCI device group: 0x%x\n",
15350 phba->pci_dev_grp);
15357 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
15358 * @dev: pointer to device
15360 * This routine is to be registered to the kernel's PCI subsystem to support
15361 * system Power Management (PM). When PM invokes this method, it dispatches
15362 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
15363 * suspend the device.
15366 * 0 - driver suspended the device
15369 static int __maybe_unused
15370 lpfc_pci_suspend_one(struct device *dev)
15372 struct Scsi_Host *shost = dev_get_drvdata(dev);
15373 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15376 switch (phba->pci_dev_grp) {
15377 case LPFC_PCI_DEV_LP:
15378 rc = lpfc_pci_suspend_one_s3(dev);
15380 case LPFC_PCI_DEV_OC:
15381 rc = lpfc_pci_suspend_one_s4(dev);
15384 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15385 "1425 Invalid PCI device group: 0x%x\n",
15386 phba->pci_dev_grp);
15393 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
15394 * @dev: pointer to device
15396 * This routine is to be registered to the kernel's PCI subsystem to support
15397 * system Power Management (PM). When PM invokes this method, it dispatches
15398 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
15399 * resume the device.
15402 * 0 - driver suspended the device
15405 static int __maybe_unused
15406 lpfc_pci_resume_one(struct device *dev)
15408 struct Scsi_Host *shost = dev_get_drvdata(dev);
15409 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15412 switch (phba->pci_dev_grp) {
15413 case LPFC_PCI_DEV_LP:
15414 rc = lpfc_pci_resume_one_s3(dev);
15416 case LPFC_PCI_DEV_OC:
15417 rc = lpfc_pci_resume_one_s4(dev);
15420 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15421 "1426 Invalid PCI device group: 0x%x\n",
15422 phba->pci_dev_grp);
15429 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
15430 * @pdev: pointer to PCI device.
15431 * @state: the current PCI connection state.
15433 * This routine is registered to the PCI subsystem for error handling. This
15434 * function is called by the PCI subsystem after a PCI bus error affecting
15435 * this device has been detected. When this routine is invoked, it dispatches
15436 * the action to the proper SLI-3 or SLI-4 device error detected handling
15437 * routine, which will perform the proper error detected operation.
15440 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15441 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15443 static pci_ers_result_t
15444 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
15446 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15447 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15448 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15450 if (phba->link_state == LPFC_HBA_ERROR &&
15451 phba->hba_flag & HBA_IOQ_FLUSH)
15452 return PCI_ERS_RESULT_NEED_RESET;
15454 switch (phba->pci_dev_grp) {
15455 case LPFC_PCI_DEV_LP:
15456 rc = lpfc_io_error_detected_s3(pdev, state);
15458 case LPFC_PCI_DEV_OC:
15459 rc = lpfc_io_error_detected_s4(pdev, state);
15462 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15463 "1427 Invalid PCI device group: 0x%x\n",
15464 phba->pci_dev_grp);
15471 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
15472 * @pdev: pointer to PCI device.
15474 * This routine is registered to the PCI subsystem for error handling. This
15475 * function is called after PCI bus has been reset to restart the PCI card
15476 * from scratch, as if from a cold-boot. When this routine is invoked, it
15477 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
15478 * routine, which will perform the proper device reset.
15481 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15482 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15484 static pci_ers_result_t
15485 lpfc_io_slot_reset(struct pci_dev *pdev)
15487 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15488 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15489 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15491 switch (phba->pci_dev_grp) {
15492 case LPFC_PCI_DEV_LP:
15493 rc = lpfc_io_slot_reset_s3(pdev);
15495 case LPFC_PCI_DEV_OC:
15496 rc = lpfc_io_slot_reset_s4(pdev);
15499 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15500 "1428 Invalid PCI device group: 0x%x\n",
15501 phba->pci_dev_grp);
15508 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
15509 * @pdev: pointer to PCI device
15511 * This routine is registered to the PCI subsystem for error handling. It
15512 * is called when kernel error recovery tells the lpfc driver that it is
15513 * OK to resume normal PCI operation after PCI bus error recovery. When
15514 * this routine is invoked, it dispatches the action to the proper SLI-3
15515 * or SLI-4 device io_resume routine, which will resume the device operation.
15518 lpfc_io_resume(struct pci_dev *pdev)
15520 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15521 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15523 switch (phba->pci_dev_grp) {
15524 case LPFC_PCI_DEV_LP:
15525 lpfc_io_resume_s3(pdev);
15527 case LPFC_PCI_DEV_OC:
15528 lpfc_io_resume_s4(pdev);
15531 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15532 "1429 Invalid PCI device group: 0x%x\n",
15533 phba->pci_dev_grp);
15540 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
15541 * @phba: pointer to lpfc hba data structure.
15543 * This routine checks to see if OAS is supported for this adapter. If
15544 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
15545 * the enable oas flag is cleared and the pool created for OAS device data
15550 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
15553 if (!phba->cfg_EnableXLane)
15556 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
15560 mempool_destroy(phba->device_data_mem_pool);
15561 phba->device_data_mem_pool = NULL;
15568 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
15569 * @phba: pointer to lpfc hba data structure.
15571 * This routine checks to see if RAS is supported by the adapter. Check the
15572 * function through which RAS support enablement is to be done.
15575 lpfc_sli4_ras_init(struct lpfc_hba *phba)
15577 /* if ASIC_GEN_NUM >= 0xC) */
15578 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
15579 LPFC_SLI_INTF_IF_TYPE_6) ||
15580 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
15581 LPFC_SLI_INTF_FAMILY_G6)) {
15582 phba->ras_fwlog.ras_hwsupport = true;
15583 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
15584 phba->cfg_ras_fwlog_buffsize)
15585 phba->ras_fwlog.ras_enabled = true;
15587 phba->ras_fwlog.ras_enabled = false;
15589 phba->ras_fwlog.ras_hwsupport = false;
15594 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
15596 static const struct pci_error_handlers lpfc_err_handler = {
15597 .error_detected = lpfc_io_error_detected,
15598 .slot_reset = lpfc_io_slot_reset,
15599 .resume = lpfc_io_resume,
15602 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
15603 lpfc_pci_suspend_one,
15604 lpfc_pci_resume_one);
15606 static struct pci_driver lpfc_driver = {
15607 .name = LPFC_DRIVER_NAME,
15608 .id_table = lpfc_id_table,
15609 .probe = lpfc_pci_probe_one,
15610 .remove = lpfc_pci_remove_one,
15611 .shutdown = lpfc_pci_remove_one,
15612 .driver.pm = &lpfc_pci_pm_ops_one,
15613 .err_handler = &lpfc_err_handler,
15616 static const struct file_operations lpfc_mgmt_fop = {
15617 .owner = THIS_MODULE,
15620 static struct miscdevice lpfc_mgmt_dev = {
15621 .minor = MISC_DYNAMIC_MINOR,
15622 .name = "lpfcmgmt",
15623 .fops = &lpfc_mgmt_fop,
15627 * lpfc_init - lpfc module initialization routine
15629 * This routine is to be invoked when the lpfc module is loaded into the
15630 * kernel. The special kernel macro module_init() is used to indicate the
15631 * role of this routine to the kernel as lpfc module entry point.
15635 * -ENOMEM - FC attach transport failed
15636 * all others - failed
15643 pr_info(LPFC_MODULE_DESC "\n");
15644 pr_info(LPFC_COPYRIGHT "\n");
15646 error = misc_register(&lpfc_mgmt_dev);
15648 printk(KERN_ERR "Could not register lpfcmgmt device, "
15649 "misc_register returned with status %d", error);
15652 lpfc_transport_functions.vport_create = lpfc_vport_create;
15653 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
15654 lpfc_transport_template =
15655 fc_attach_transport(&lpfc_transport_functions);
15656 if (lpfc_transport_template == NULL)
15658 lpfc_vport_transport_template =
15659 fc_attach_transport(&lpfc_vport_transport_functions);
15660 if (lpfc_vport_transport_template == NULL) {
15661 fc_release_transport(lpfc_transport_template);
15664 lpfc_wqe_cmd_template();
15665 lpfc_nvmet_cmd_template();
15667 /* Initialize in case vector mapping is needed */
15668 lpfc_present_cpu = num_present_cpus();
15670 lpfc_pldv_detect = false;
15672 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
15673 "lpfc/sli4:online",
15674 lpfc_cpu_online, lpfc_cpu_offline);
15676 goto cpuhp_failure;
15677 lpfc_cpuhp_state = error;
15679 error = pci_register_driver(&lpfc_driver);
15686 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15688 fc_release_transport(lpfc_transport_template);
15689 fc_release_transport(lpfc_vport_transport_template);
15691 misc_deregister(&lpfc_mgmt_dev);
15696 void lpfc_dmp_dbg(struct lpfc_hba *phba)
15698 unsigned int start_idx;
15699 unsigned int dbg_cnt;
15700 unsigned int temp_idx;
15703 unsigned long rem_nsec, iflags;
15704 bool log_verbose = false;
15705 struct lpfc_vport *port_iterator;
15707 /* Don't dump messages if we explicitly set log_verbose for the
15708 * physical port or any vport.
15710 if (phba->cfg_log_verbose)
15713 spin_lock_irqsave(&phba->port_list_lock, iflags);
15714 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
15715 if (port_iterator->load_flag & FC_UNLOADING)
15717 if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
15718 if (port_iterator->cfg_log_verbose)
15719 log_verbose = true;
15721 scsi_host_put(lpfc_shost_from_vport(port_iterator));
15724 spin_unlock_irqrestore(&phba->port_list_lock,
15730 spin_unlock_irqrestore(&phba->port_list_lock, iflags);
15732 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
15735 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
15736 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
15739 temp_idx = start_idx;
15740 if (dbg_cnt >= DBG_LOG_SZ) {
15741 dbg_cnt = DBG_LOG_SZ;
15744 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
15745 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
15747 if (start_idx < dbg_cnt)
15748 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
15750 start_idx -= dbg_cnt;
15753 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
15754 start_idx, temp_idx, dbg_cnt);
15756 for (i = 0; i < dbg_cnt; i++) {
15757 if ((start_idx + i) < DBG_LOG_SZ)
15758 temp_idx = (start_idx + i) % DBG_LOG_SZ;
15761 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
15762 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
15764 (unsigned long)phba->dbg_log[temp_idx].t_ns,
15766 phba->dbg_log[temp_idx].log);
15769 atomic_set(&phba->dbg_log_cnt, 0);
15770 atomic_set(&phba->dbg_log_dmping, 0);
15774 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
15778 int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
15779 struct va_format vaf;
15782 va_start(args, fmt);
15783 if (unlikely(dbg_dmping)) {
15786 dev_info(&phba->pcidev->dev, "%pV", &vaf);
15790 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
15793 atomic_inc(&phba->dbg_log_cnt);
15795 vscnprintf(phba->dbg_log[idx].log,
15796 sizeof(phba->dbg_log[idx].log), fmt, args);
15799 phba->dbg_log[idx].t_ns = local_clock();
15803 * lpfc_exit - lpfc module removal routine
15805 * This routine is invoked when the lpfc module is removed from the kernel.
15806 * The special kernel macro module_exit() is used to indicate the role of
15807 * this routine to the kernel as lpfc module exit point.
15812 misc_deregister(&lpfc_mgmt_dev);
15813 pci_unregister_driver(&lpfc_driver);
15814 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15815 fc_release_transport(lpfc_transport_template);
15816 fc_release_transport(lpfc_vport_transport_template);
15817 idr_destroy(&lpfc_hba_index);
15820 module_init(lpfc_init);
15821 module_exit(lpfc_exit);
15822 MODULE_LICENSE("GPL");
15823 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
15824 MODULE_AUTHOR("Broadcom");
15825 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);