2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/etherdevice.h>
14 #include <linux/if_vlan.h>
19 #include "thunder_bgx.h"
21 #define DRV_NAME "nicpf"
22 #define DRV_VERSION "1.0"
24 #define NIC_VF_PER_MBX_REG 64
29 u8 chans_per_bgx; /* Rx/Tx chans */
39 bool tl1_per_bgx; /* TL1 per BGX or per LMAC */
47 u8 num_vf_en; /* No of VF enabled */
48 bool vf_enabled[MAX_NUM_VFS_SUPPORTED];
49 void __iomem *reg_base; /* Register start address */
50 u8 num_sqs_en; /* Secondary qsets enabled */
51 u64 nicvf[MAX_NUM_VFS_SUPPORTED];
52 u8 vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF];
53 u8 pqs_vf[MAX_NUM_VFS_SUPPORTED];
54 bool sqs_used[MAX_NUM_VFS_SUPPORTED];
55 struct pkind_cfg pkind;
56 #define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF))
57 #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
58 #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
60 u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
61 u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
65 bool irq_allocated[NIC_PF_MSIX_VECTORS];
66 char irq_name[NIC_PF_MSIX_VECTORS][20];
69 /* Supported devices */
70 static const struct pci_device_id nic_id_table[] = {
71 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
72 { 0, } /* end of table */
75 MODULE_AUTHOR("Sunil Goutham");
76 MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver");
77 MODULE_LICENSE("GPL v2");
78 MODULE_VERSION(DRV_VERSION);
79 MODULE_DEVICE_TABLE(pci, nic_id_table);
81 /* The Cavium ThunderX network controller can *only* be found in SoCs
82 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
83 * registers on this platform are implicitly strongly ordered with respect
84 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
85 * with no memory barriers in this driver. The readq()/writeq() functions add
86 * explicit ordering operation which in this case are redundant, and only
90 /* Register read/write APIs */
91 static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val)
93 writeq_relaxed(val, nic->reg_base + offset);
96 static u64 nic_reg_read(struct nicpf *nic, u64 offset)
98 return readq_relaxed(nic->reg_base + offset);
101 /* PF -> VF mailbox communication APIs */
102 static void nic_enable_mbx_intr(struct nicpf *nic)
104 int vf_cnt = pci_sriov_get_totalvfs(nic->pdev);
106 #define INTR_MASK(vfs) ((vfs < 64) ? (BIT_ULL(vfs) - 1) : (~0ull))
108 /* Clear it, to avoid spurious interrupts (if any) */
109 nic_reg_write(nic, NIC_PF_MAILBOX_INT, INTR_MASK(vf_cnt));
111 /* Enable mailbox interrupt for all VFs */
112 nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, INTR_MASK(vf_cnt));
113 /* One mailbox intr enable reg per 64 VFs */
115 nic_reg_write(nic, NIC_PF_MAILBOX_INT + sizeof(u64),
116 INTR_MASK(vf_cnt - 64));
117 nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64),
118 INTR_MASK(vf_cnt - 64));
122 static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg)
124 nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf));
127 static u64 nic_get_mbx_addr(int vf)
129 return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT);
132 /* Send a mailbox message to VF
133 * @vf: vf to which this message to be sent
134 * @mbx: Message to be sent
136 static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
138 void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf);
139 u64 *msg = (u64 *)mbx;
141 /* In first revision HW, mbox interrupt is triggerred
142 * when PF writes to MBOX(1), in next revisions when
143 * PF writes to MBOX(0)
145 if (pass1_silicon(nic->pdev)) {
146 /* see the comment for nic_reg_write()/nic_reg_read()
149 writeq_relaxed(msg[0], mbx_addr);
150 writeq_relaxed(msg[1], mbx_addr + 8);
152 writeq_relaxed(msg[1], mbx_addr + 8);
153 writeq_relaxed(msg[0], mbx_addr);
157 /* Responds to VF's READY message with VF's
158 * ID, node, MAC address e.t.c
159 * @vf: VF which sent READY message
161 static void nic_mbx_send_ready(struct nicpf *nic, int vf)
163 union nic_mbx mbx = {};
167 mbx.nic_cfg.msg = NIC_MBOX_MSG_READY;
168 mbx.nic_cfg.vf_id = vf;
170 mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
172 if (vf < nic->num_vf_en) {
173 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
174 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
176 mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
178 ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
180 mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
181 mbx.nic_cfg.node_id = nic->node;
183 mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en;
185 nic_send_msg_to_vf(nic, vf, &mbx);
188 /* ACKs VF's mailbox message
189 * @vf: VF to which ACK to be sent
191 static void nic_mbx_send_ack(struct nicpf *nic, int vf)
193 union nic_mbx mbx = {};
195 mbx.msg.msg = NIC_MBOX_MSG_ACK;
196 nic_send_msg_to_vf(nic, vf, &mbx);
199 /* NACKs VF's mailbox message that PF is not able to
200 * complete the action
201 * @vf: VF to which ACK to be sent
203 static void nic_mbx_send_nack(struct nicpf *nic, int vf)
205 union nic_mbx mbx = {};
207 mbx.msg.msg = NIC_MBOX_MSG_NACK;
208 nic_send_msg_to_vf(nic, vf, &mbx);
211 /* Flush all in flight receive packets to memory and
212 * bring down an active RQ
214 static int nic_rcv_queue_sw_sync(struct nicpf *nic)
218 nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01);
219 /* Wait till sync cycle is finished */
221 if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1)
225 nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00);
227 dev_err(&nic->pdev->dev, "Receive queue software sync failed");
233 /* Get BGX Rx/Tx stats and respond to VF's request */
234 static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
237 union nic_mbx mbx = {};
239 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
240 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
242 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
243 mbx.bgx_stats.vf_id = bgx->vf_id;
244 mbx.bgx_stats.rx = bgx->rx;
245 mbx.bgx_stats.idx = bgx->idx;
247 mbx.bgx_stats.stats = bgx_get_rx_stats(nic->node, bgx_idx,
250 mbx.bgx_stats.stats = bgx_get_tx_stats(nic->node, bgx_idx,
252 nic_send_msg_to_vf(nic, bgx->vf_id, &mbx);
255 /* Update hardware min/max frame size */
256 static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
258 int bgx, lmac, lmac_cnt;
261 if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS))
264 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
265 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
266 lmac += bgx * MAX_LMAC_PER_BGX;
268 new_frs += VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
270 /* Update corresponding LMAC credits */
271 lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
272 lmac_credits = nic_reg_read(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8));
273 lmac_credits &= ~(0xFFFFFULL << 12);
274 lmac_credits |= (((((48 * 1024) / lmac_cnt) - new_frs) / 16) << 12);
275 nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credits);
278 * This config is supported only from 88xx pass 2.0 onwards.
280 if (!pass1_silicon(nic->pdev))
282 NIC_PF_LMAC_0_7_CFG2 + (lmac * 8), new_frs);
286 /* Set minimum transmit packet size */
287 static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
293 /* There is a issue in HW where-in while sending GSO sized
294 * pkts as part of TSO, if pkt len falls below this size
295 * NIC will zero PAD packet and also updates IP total length.
296 * Hence set this value to lessthan min pkt size of MAC+IP+TCP
297 * headers, BGX will do the padding to transmit 64 byte pkt.
302 pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
303 /* 81xx's RGX has only one LMAC */
304 if (sdevid == PCI_SUBSYS_DEVID_81XX_NIC_PF)
305 max_lmac = ((nic->hw->bgx_cnt - 1) * MAX_LMAC_PER_BGX) + 1;
307 max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX;
309 for (lmac = 0; lmac < max_lmac; lmac++) {
310 lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
311 lmac_cfg &= ~(0xF << 2);
312 lmac_cfg |= ((size / 4) << 2);
313 nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg);
317 /* Function to check number of LMACs present and set VF::LMAC mapping.
318 * Mapping will be used while initializing channels.
320 static void nic_set_lmac_vf_mapping(struct nicpf *nic)
322 unsigned bgx_map = bgx_get_map(nic->node);
323 int bgx, next_bgx_lmac = 0;
324 int lmac, lmac_cnt = 0;
329 for (bgx = 0; bgx < nic->hw->bgx_cnt; bgx++) {
330 if (!(bgx_map & (1 << bgx)))
332 lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
333 for (lmac = 0; lmac < lmac_cnt; lmac++)
334 nic->vf_lmac_map[next_bgx_lmac++] =
335 NIC_SET_VF_LMAC_MAP(bgx, lmac);
336 nic->num_vf_en += lmac_cnt;
338 /* Program LMAC credits */
339 lmac_credit = (1ull << 1); /* channel credit enable */
340 lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */
341 /* 48KB BGX Tx buffer size, each unit is of size 16bytes */
342 lmac_credit |= (((((48 * 1024) / lmac_cnt) -
343 NIC_HW_MAX_FRS) / 16) << 12);
344 lmac = bgx * MAX_LMAC_PER_BGX;
345 for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++)
347 NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
350 /* On CN81XX there are only 8 VFs but max possible no of
353 if (nic->num_vf_en >= pci_sriov_get_totalvfs(nic->pdev)) {
354 nic->num_vf_en = pci_sriov_get_totalvfs(nic->pdev);
360 static void nic_get_hw_info(struct nicpf *nic)
363 struct hw_info *hw = nic->hw;
365 pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
368 case PCI_SUBSYS_DEVID_88XX_NIC_PF:
369 hw->bgx_cnt = MAX_BGX_PER_CN88XX;
370 hw->chans_per_lmac = 16;
371 hw->chans_per_bgx = 128;
374 hw->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
378 hw->tl1_per_bgx = true;
380 case PCI_SUBSYS_DEVID_81XX_NIC_PF:
381 hw->bgx_cnt = MAX_BGX_PER_CN81XX;
382 hw->chans_per_lmac = 8;
383 hw->chans_per_bgx = 32;
384 hw->chans_per_rgx = 8;
385 hw->chans_per_lbk = 24;
388 hw->rss_ind_tbl_size = 32; /* Max RSSI / Max interfaces */
392 hw->tl1_per_bgx = false;
394 case PCI_SUBSYS_DEVID_83XX_NIC_PF:
395 hw->bgx_cnt = MAX_BGX_PER_CN83XX;
396 hw->chans_per_lmac = 8;
397 hw->chans_per_bgx = 32;
398 hw->chans_per_lbk = 64;
401 hw->rss_ind_tbl_size = 64; /* Max RSSI / Max interfaces */
405 hw->tl1_per_bgx = false;
408 hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->pdev);
414 static void nic_init_hw(struct nicpf *nic)
419 /* Enable NIC HW block */
420 nic_reg_write(nic, NIC_PF_CFG, 0x3);
422 /* Enable backpressure */
423 nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
425 /* TNS and TNS bypass modes are present only on 88xx
426 * Also offset of this CSR has changed in 81xx and 83xx.
428 if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) {
429 /* Disable TNS mode on both interfaces */
430 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
431 (NIC_TNS_BYPASS_MODE << 7) |
432 BGX0_BLOCK | (1ULL << 16));
433 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
434 (NIC_TNS_BYPASS_MODE << 7) |
435 BGX1_BLOCK | (1ULL << 16));
437 /* Configure timestamp generation timeout to 10us */
438 for (i = 0; i < nic->hw->bgx_cnt; i++)
439 nic_reg_write(nic, NIC_PF_INTFX_SEND_CFG | (i << 3),
443 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
444 (1ULL << 63) | BGX0_BLOCK);
445 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
446 (1ULL << 63) | BGX1_BLOCK);
448 /* PKIND configuration */
449 nic->pkind.minlen = 0;
450 nic->pkind.maxlen = NIC_HW_MAX_FRS + VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
451 nic->pkind.lenerr_en = 1;
452 nic->pkind.rx_hdr = 0;
453 nic->pkind.hdr_sl = 0;
455 for (i = 0; i < NIC_MAX_PKIND; i++)
456 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3),
457 *(u64 *)&nic->pkind);
459 nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS);
462 nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
464 /* Enable VLAN ethertype matching and stripping */
465 nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
466 (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
468 /* Check if HW expected value is higher (could be in future chips) */
469 cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
470 if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
471 nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
474 /* Channel parse index configuration */
475 static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
477 struct hw_info *hw = nic->hw;
478 u32 vnic, bgx, lmac, chan;
479 u32 padd, cpi_count = 0;
480 u64 cpi_base, cpi, rssi_base, rssi;
484 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
485 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
487 chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
488 cpi_base = vnic * NIC_MAX_CPI_PER_LMAC;
489 rssi_base = vnic * hw->rss_ind_tbl_size;
491 /* Rx channel configuration */
492 nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
493 (1ull << 63) | (vnic << 0));
494 nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3),
495 ((u64)cfg->cpi_alg << 62) | (cpi_base << 48));
497 if (cfg->cpi_alg == CPI_ALG_NONE)
499 else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */
501 else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */
503 else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */
504 cpi_count = NIC_MAX_CPI_PER_LMAC;
506 /* RSS Qset, Qidx mapping */
509 for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) {
510 nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
511 (qset << 3) | rq_idx);
517 for (; cpi < (cpi_base + cpi_count); cpi++) {
518 /* Determine port to channel adder */
519 if (cfg->cpi_alg != CPI_ALG_DIFF)
520 padd = cpi % cpi_count;
522 padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
524 /* Leave RSS_SIZE as '0' to disable RSS */
525 if (pass1_silicon(nic->pdev)) {
526 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
527 (vnic << 24) | (padd << 16) |
530 /* Set MPI_ALG to '0' to disable MCAM parsing */
531 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
533 /* MPI index is same as CPI if MPI_ALG is not enabled */
534 nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3),
535 (vnic << 24) | (rssi_base + rssi));
538 if ((rssi + 1) >= cfg->rq_cnt)
541 if (cfg->cpi_alg == CPI_ALG_VLAN)
543 else if (cfg->cpi_alg == CPI_ALG_VLAN16)
544 rssi = ((cpi - cpi_base) & 0xe) >> 1;
545 else if (cfg->cpi_alg == CPI_ALG_DIFF)
546 rssi = ((cpi - cpi_base) & 0x38) >> 3;
548 nic->cpi_base[cfg->vf_id] = cpi_base;
549 nic->rssi_base[cfg->vf_id] = rssi_base;
552 /* Responsds to VF with its RSS indirection table size */
553 static void nic_send_rss_size(struct nicpf *nic, int vf)
555 union nic_mbx mbx = {};
557 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
558 mbx.rss_size.ind_tbl_size = nic->hw->rss_ind_tbl_size;
559 nic_send_msg_to_vf(nic, vf, &mbx);
562 /* Receive side scaling configuration
565 * - indir table i.e hash::RQ mapping
566 * - no of hash bits to consider
568 static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
571 u64 cpi_cfg, cpi_base, rssi_base, rssi;
574 rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset;
578 for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
579 u8 svf = cfg->ind_tbl[idx] >> 3;
582 qset = nic->vf_sqs[cfg->vf_id][svf - 1];
585 nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
586 (qset << 3) | (cfg->ind_tbl[idx] & 0x7));
590 cpi_base = nic->cpi_base[cfg->vf_id];
591 if (pass1_silicon(nic->pdev))
592 idx_addr = NIC_PF_CPI_0_2047_CFG;
594 idx_addr = NIC_PF_MPI_0_2047_CFG;
595 cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3));
596 cpi_cfg &= ~(0xFULL << 20);
597 cpi_cfg |= (cfg->hash_bits << 20);
598 nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg);
601 /* 4 level transmit side scheduler configutation
602 * for TNS bypass mode
604 * Sample configuration for SQ0 on 88xx
605 * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0
606 * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0
607 * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0
608 * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0
609 * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1
610 * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1
611 * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
612 * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
614 static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
615 struct sq_cfg_msg *sq)
617 struct hw_info *hw = nic->hw;
621 u8 sq_idx = sq->sq_num;
626 pqs_vnic = nic->pqs_vf[vnic];
630 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
631 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
633 /* 24 bytes for FCS, IPG and preamble */
634 rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
636 /* For 88xx 0-511 TL4 transmits via BGX0 and
637 * 512-1023 TL4s transmit via BGX1.
639 if (hw->tl1_per_bgx) {
640 tl4 = bgx * (hw->tl4_cnt / hw->bgx_cnt);
642 tl4 += (lmac * MAX_QUEUES_PER_QSET);
644 for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
645 if (nic->vf_sqs[pqs_vnic][svf] == vnic)
648 tl4 += (MAX_LMAC_PER_BGX * MAX_QUEUES_PER_QSET);
649 tl4 += (lmac * MAX_QUEUES_PER_QSET * MAX_SQS_PER_VF);
650 tl4 += (svf * MAX_QUEUES_PER_QSET);
653 tl4 = (vnic * MAX_QUEUES_PER_QSET);
657 tl3 = tl4 / (hw->tl4_cnt / hw->tl3_cnt);
658 nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
659 ((u64)vnic << NIC_QS_ID_SHIFT) |
660 ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
661 nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3),
662 ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
664 nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
666 /* On 88xx 0-127 channels are for BGX0 and
667 * 127-255 channels for BGX1.
669 * On 81xx/83xx TL3_CHAN reg should be configured with channel
670 * within LMAC i.e 0-7 and not the actual channel number like on 88xx
672 chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
674 nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
676 nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), 0);
678 /* Enable backpressure on the channel */
679 nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
682 nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2);
683 nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
684 /* No priorities as of now */
685 nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
687 /* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1'
688 * on 81xx/83xx TL2 needs to be configured to transmit to one of the
691 * This register doesn't exist on 88xx.
693 if (!hw->tl1_per_bgx)
694 nic_reg_write(nic, NIC_PF_TL2_LMAC | (tl2 << 3),
695 lmac + (bgx * MAX_LMAC_PER_BGX));
698 /* Send primary nicvf pointer to secondary QS's VF */
699 static void nic_send_pnicvf(struct nicpf *nic, int sqs)
701 union nic_mbx mbx = {};
703 mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
704 mbx.nicvf.nicvf = nic->nicvf[nic->pqs_vf[sqs]];
705 nic_send_msg_to_vf(nic, sqs, &mbx);
708 /* Send SQS's nicvf pointer to primary QS's VF */
709 static void nic_send_snicvf(struct nicpf *nic, struct nicvf_ptr *nicvf)
711 union nic_mbx mbx = {};
712 int sqs_id = nic->vf_sqs[nicvf->vf_id][nicvf->sqs_id];
714 mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
715 mbx.nicvf.sqs_id = nicvf->sqs_id;
716 mbx.nicvf.nicvf = nic->nicvf[sqs_id];
717 nic_send_msg_to_vf(nic, nicvf->vf_id, &mbx);
720 /* Find next available Qset that can be assigned as a
721 * secondary Qset to a VF.
723 static int nic_nxt_avail_sqs(struct nicpf *nic)
727 for (sqs = 0; sqs < nic->num_sqs_en; sqs++) {
728 if (!nic->sqs_used[sqs])
729 nic->sqs_used[sqs] = true;
732 return sqs + nic->num_vf_en;
737 /* Allocate additional Qsets for requested VF */
738 static void nic_alloc_sqs(struct nicpf *nic, struct sqs_alloc *sqs)
740 union nic_mbx mbx = {};
741 int idx, alloc_qs = 0;
744 if (!nic->num_sqs_en)
747 for (idx = 0; idx < sqs->qs_count; idx++) {
748 sqs_id = nic_nxt_avail_sqs(nic);
751 nic->vf_sqs[sqs->vf_id][idx] = sqs_id;
752 nic->pqs_vf[sqs_id] = sqs->vf_id;
757 mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
758 mbx.sqs_alloc.vf_id = sqs->vf_id;
759 mbx.sqs_alloc.qs_count = alloc_qs;
760 nic_send_msg_to_vf(nic, sqs->vf_id, &mbx);
763 static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
765 int bgx_idx, lmac_idx;
767 if (lbk->vf_id >= nic->num_vf_en)
770 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
771 lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
773 bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
775 /* Enable moving average calculation.
776 * Keep the LVL/AVG delay to HW enforced minimum so that, not too many
777 * packets sneek in between average calculations.
779 nic_reg_write(nic, NIC_PF_CQ_AVG_CFG,
780 (BIT_ULL(20) | 0x2ull << 14 | 0x1));
781 nic_reg_write(nic, NIC_PF_RRM_AVG_CFG,
782 (BIT_ULL(20) | 0x3ull << 14 | 0x1));
787 /* Reset statistics counters */
788 static int nic_reset_stat_counters(struct nicpf *nic,
789 int vf, struct reset_stat_cfg *cfg)
794 for (i = 0; i < RX_STATS_ENUM_LAST; i++) {
795 if (cfg->rx_stat_mask & BIT(i)) {
796 reg_addr = NIC_PF_VNIC_0_127_RX_STAT_0_13 |
797 (vf << NIC_QS_ID_SHIFT) |
799 nic_reg_write(nic, reg_addr, 0);
803 for (i = 0; i < TX_STATS_ENUM_LAST; i++) {
804 if (cfg->tx_stat_mask & BIT(i)) {
805 reg_addr = NIC_PF_VNIC_0_127_TX_STAT_0_4 |
806 (vf << NIC_QS_ID_SHIFT) |
808 nic_reg_write(nic, reg_addr, 0);
812 for (i = 0; i <= 15; i++) {
814 stat = i & 1 ? 1 : 0;
815 reg_addr = (vf << NIC_QS_ID_SHIFT) |
816 (qnum << NIC_Q_NUM_SHIFT) | (stat << 3);
817 if (cfg->rq_stat_mask & BIT(i)) {
818 reg_addr |= NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1;
819 nic_reg_write(nic, reg_addr, 0);
821 if (cfg->sq_stat_mask & BIT(i)) {
822 reg_addr |= NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1;
823 nic_reg_write(nic, reg_addr, 0);
830 static void nic_enable_tunnel_parsing(struct nicpf *nic, int vf)
832 u64 prot_def = (IPV6_PROT << 32) | (IPV4_PROT << 16) | ET_PROT;
833 u64 vxlan_prot_def = (IPV6_PROT_DEF << 32) |
834 (IPV4_PROT_DEF) << 16 | ET_PROT_DEF;
836 /* Configure tunnel parsing parameters */
837 nic_reg_write(nic, NIC_PF_RX_GENEVE_DEF,
838 (1ULL << 63 | UDP_GENEVE_PORT_NUM));
839 nic_reg_write(nic, NIC_PF_RX_GENEVE_PROT_DEF,
840 ((7ULL << 61) | prot_def));
841 nic_reg_write(nic, NIC_PF_RX_NVGRE_PROT_DEF,
842 ((7ULL << 61) | prot_def));
843 nic_reg_write(nic, NIC_PF_RX_VXLAN_DEF_0_1,
844 ((1ULL << 63) | UDP_VXLAN_PORT_NUM));
845 nic_reg_write(nic, NIC_PF_RX_VXLAN_PROT_DEF,
846 ((0xfULL << 60) | vxlan_prot_def));
849 static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
853 nic->vf_enabled[vf] = enable;
855 if (vf >= nic->num_vf_en)
858 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
859 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
861 bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable);
864 static void nic_pause_frame(struct nicpf *nic, int vf, struct pfc *cfg)
868 union nic_mbx mbx = {};
870 if (vf >= nic->num_vf_en)
872 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
873 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
876 bgx_lmac_get_pfc(nic->node, bgx, lmac, &pfc);
877 mbx.pfc.msg = NIC_MBOX_MSG_PFC;
878 mbx.pfc.autoneg = pfc.autoneg;
879 mbx.pfc.fc_rx = pfc.fc_rx;
880 mbx.pfc.fc_tx = pfc.fc_tx;
881 nic_send_msg_to_vf(nic, vf, &mbx);
883 bgx_lmac_set_pfc(nic->node, bgx, lmac, cfg);
884 nic_mbx_send_ack(nic, vf);
888 /* Enable or disable HW timestamping by BGX for pkts received on a LMAC */
889 static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp)
891 struct pkind_cfg *pkind;
893 u64 pkind_val, pkind_idx;
895 if (vf >= nic->num_vf_en)
898 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
899 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
901 pkind_idx = lmac + bgx_idx * MAX_LMAC_PER_BGX;
902 pkind_val = nic_reg_read(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3));
903 pkind = (struct pkind_cfg *)&pkind_val;
905 if (ptp->enable && !pkind->hdr_sl) {
906 /* Skiplen to exclude 8byte timestamp while parsing pkt
907 * If not configured, will result in L2 errors.
910 /* Adjust max packet length allowed */
911 pkind->maxlen += (pkind->hdr_sl * 2);
912 bgx_config_timestamping(nic->node, bgx_idx, lmac, true);
913 nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3),
914 (ETYPE_ALG_ENDPARSE << 16) | ETH_P_1588);
915 } else if (!ptp->enable && pkind->hdr_sl) {
916 pkind->maxlen -= (pkind->hdr_sl * 2);
918 bgx_config_timestamping(nic->node, bgx_idx, lmac, false);
919 nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3),
920 (ETYPE_ALG_SKIP << 16) | ETH_P_8021Q);
923 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val);
926 /* Get BGX LMAC link status and update corresponding VF
927 * if there is a change, valid only if internal L2 switch
928 * is not present otherwise VF link is always treated as up
930 static void nic_link_status_get(struct nicpf *nic, u8 vf)
932 union nic_mbx mbx = {};
933 struct bgx_link_status link;
936 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
938 /* Get BGX, LMAC indices for the VF */
939 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
940 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
942 /* Get interface link status */
943 bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
945 /* Send a mbox message to VF with current link status */
946 mbx.link_status.link_up = link.link_up;
947 mbx.link_status.duplex = link.duplex;
948 mbx.link_status.speed = link.speed;
949 mbx.link_status.mac_type = link.mac_type;
951 /* reply with link status */
952 nic_send_msg_to_vf(nic, vf, &mbx);
955 /* Interrupt handler to handle mailbox messages from VFs */
956 static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
958 union nic_mbx mbx = {};
967 mbx_addr = nic_get_mbx_addr(vf);
968 mbx_data = (u64 *)&mbx;
970 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
971 *mbx_data = nic_reg_read(nic, mbx_addr);
973 mbx_addr += sizeof(u64);
976 dev_dbg(&nic->pdev->dev, "%s: Mailbox msg 0x%02x from VF%d\n",
977 __func__, mbx.msg.msg, vf);
978 switch (mbx.msg.msg) {
979 case NIC_MBOX_MSG_READY:
980 nic_mbx_send_ready(nic, vf);
982 case NIC_MBOX_MSG_QS_CFG:
983 reg_addr = NIC_PF_QSET_0_127_CFG |
984 (mbx.qs.num << NIC_QS_ID_SHIFT);
986 /* Check if its a secondary Qset */
987 if (vf >= nic->num_vf_en) {
988 cfg = cfg & (~0x7FULL);
989 /* Assign this Qset to primary Qset's VF */
990 cfg |= nic->pqs_vf[vf];
992 nic_reg_write(nic, reg_addr, cfg);
994 case NIC_MBOX_MSG_RQ_CFG:
995 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
996 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
997 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
998 nic_reg_write(nic, reg_addr, mbx.rq.cfg);
999 /* Enable CQE_RX2_S extension in CQE_RX descriptor.
1000 * This gets appended by default on 81xx/83xx chips,
1001 * for consistency enabling the same on 88xx pass2
1002 * where this is introduced.
1004 if (pass2_silicon(nic->pdev))
1005 nic_reg_write(nic, NIC_PF_RX_CFG, 0x01);
1006 if (!pass1_silicon(nic->pdev))
1007 nic_enable_tunnel_parsing(nic, vf);
1009 case NIC_MBOX_MSG_RQ_BP_CFG:
1010 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
1011 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
1012 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
1013 nic_reg_write(nic, reg_addr, mbx.rq.cfg);
1015 case NIC_MBOX_MSG_RQ_SW_SYNC:
1016 ret = nic_rcv_queue_sw_sync(nic);
1018 case NIC_MBOX_MSG_RQ_DROP_CFG:
1019 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG |
1020 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
1021 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
1022 nic_reg_write(nic, reg_addr, mbx.rq.cfg);
1024 case NIC_MBOX_MSG_SQ_CFG:
1025 reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG |
1026 (mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
1027 (mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
1028 nic_reg_write(nic, reg_addr, mbx.sq.cfg);
1029 nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq);
1031 case NIC_MBOX_MSG_SET_MAC:
1032 if (vf >= nic->num_vf_en) {
1033 ret = -1; /* NACK */
1036 lmac = mbx.mac.vf_id;
1037 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
1038 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
1039 bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr);
1041 case NIC_MBOX_MSG_SET_MAX_FRS:
1042 ret = nic_update_hw_frs(nic, mbx.frs.max_frs,
1045 case NIC_MBOX_MSG_CPI_CFG:
1046 nic_config_cpi(nic, &mbx.cpi_cfg);
1048 case NIC_MBOX_MSG_RSS_SIZE:
1049 nic_send_rss_size(nic, vf);
1051 case NIC_MBOX_MSG_RSS_CFG:
1052 case NIC_MBOX_MSG_RSS_CFG_CONT:
1053 nic_config_rss(nic, &mbx.rss_cfg);
1055 case NIC_MBOX_MSG_CFG_DONE:
1056 /* Last message of VF config msg sequence */
1057 nic_enable_vf(nic, vf, true);
1059 case NIC_MBOX_MSG_SHUTDOWN:
1060 /* First msg in VF teardown sequence */
1061 if (vf >= nic->num_vf_en)
1062 nic->sqs_used[vf - nic->num_vf_en] = false;
1063 nic->pqs_vf[vf] = 0;
1064 nic_enable_vf(nic, vf, false);
1066 case NIC_MBOX_MSG_ALLOC_SQS:
1067 nic_alloc_sqs(nic, &mbx.sqs_alloc);
1069 case NIC_MBOX_MSG_NICVF_PTR:
1070 nic->nicvf[vf] = mbx.nicvf.nicvf;
1072 case NIC_MBOX_MSG_PNICVF_PTR:
1073 nic_send_pnicvf(nic, vf);
1075 case NIC_MBOX_MSG_SNICVF_PTR:
1076 nic_send_snicvf(nic, &mbx.nicvf);
1078 case NIC_MBOX_MSG_BGX_STATS:
1079 nic_get_bgx_stats(nic, &mbx.bgx_stats);
1081 case NIC_MBOX_MSG_LOOPBACK:
1082 ret = nic_config_loopback(nic, &mbx.lbk);
1084 case NIC_MBOX_MSG_RESET_STAT_COUNTER:
1085 ret = nic_reset_stat_counters(nic, vf, &mbx.reset_stat);
1087 case NIC_MBOX_MSG_PFC:
1088 nic_pause_frame(nic, vf, &mbx.pfc);
1090 case NIC_MBOX_MSG_PTP_CFG:
1091 nic_config_timestamp(nic, vf, &mbx.ptp);
1093 case NIC_MBOX_MSG_RESET_XCAST:
1094 if (vf >= nic->num_vf_en) {
1095 ret = -1; /* NACK */
1098 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1099 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1100 bgx_reset_xcast_mode(nic->node, bgx, lmac,
1101 vf < NIC_VF_PER_MBX_REG ? vf :
1102 vf - NIC_VF_PER_MBX_REG);
1105 case NIC_MBOX_MSG_ADD_MCAST:
1106 if (vf >= nic->num_vf_en) {
1107 ret = -1; /* NACK */
1110 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1111 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1112 bgx_set_dmac_cam_filter(nic->node, bgx, lmac,
1114 vf < NIC_VF_PER_MBX_REG ? vf :
1115 vf - NIC_VF_PER_MBX_REG);
1118 case NIC_MBOX_MSG_SET_XCAST:
1119 if (vf >= nic->num_vf_en) {
1120 ret = -1; /* NACK */
1123 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1124 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1125 bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.mode);
1127 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
1128 if (vf >= nic->num_vf_en) {
1129 ret = -1; /* NACK */
1132 nic_link_status_get(nic, vf);
1135 dev_err(&nic->pdev->dev,
1136 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
1141 nic_mbx_send_ack(nic, vf);
1142 } else if (mbx.msg.msg != NIC_MBOX_MSG_READY) {
1143 dev_err(&nic->pdev->dev, "NACK for MBOX 0x%02x from VF %d\n",
1145 nic_mbx_send_nack(nic, vf);
1149 static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
1151 struct nicpf *nic = (struct nicpf *)nic_irq;
1156 if (irq == pci_irq_vector(nic->pdev, NIC_PF_INTR_ID_MBOX0))
1161 intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3));
1162 dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
1163 for (vf = 0; vf < NIC_VF_PER_MBX_REG; vf++) {
1164 if (intr & (1ULL << vf)) {
1165 dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
1166 vf + (mbx * NIC_VF_PER_MBX_REG));
1168 nic_handle_mbx_intr(nic, vf +
1169 (mbx * NIC_VF_PER_MBX_REG));
1170 nic_clear_mbx_intr(nic, vf, mbx);
1176 static void nic_free_all_interrupts(struct nicpf *nic)
1180 for (irq = 0; irq < nic->num_vec; irq++) {
1181 if (nic->irq_allocated[irq])
1182 free_irq(pci_irq_vector(nic->pdev, irq), nic);
1183 nic->irq_allocated[irq] = false;
1187 static int nic_register_interrupts(struct nicpf *nic)
1190 nic->num_vec = pci_msix_vec_count(nic->pdev);
1193 ret = pci_alloc_irq_vectors(nic->pdev, nic->num_vec, nic->num_vec,
1196 dev_err(&nic->pdev->dev,
1197 "Request for #%d msix vectors failed, returned %d\n",
1202 /* Register mailbox interrupt handler */
1203 for (i = NIC_PF_INTR_ID_MBOX0; i < nic->num_vec; i++) {
1204 sprintf(nic->irq_name[i],
1205 "NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0));
1207 ret = request_irq(pci_irq_vector(nic->pdev, i),
1208 nic_mbx_intr_handler, 0,
1209 nic->irq_name[i], nic);
1213 nic->irq_allocated[i] = true;
1216 /* Enable mailbox interrupt */
1217 nic_enable_mbx_intr(nic);
1221 dev_err(&nic->pdev->dev, "Request irq failed\n");
1222 nic_free_all_interrupts(nic);
1223 pci_free_irq_vectors(nic->pdev);
1228 static void nic_unregister_interrupts(struct nicpf *nic)
1230 nic_free_all_interrupts(nic);
1231 pci_free_irq_vectors(nic->pdev);
1235 static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
1237 int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE;
1240 /* Secondary Qsets are needed only if CPU count is
1241 * morethan MAX_QUEUES_PER_QSET.
1243 if (num_online_cpus() <= MAX_QUEUES_PER_QSET)
1246 /* Check if its a multi-node environment */
1247 if (nr_node_ids > 1)
1248 sqs_per_vf = MAX_SQS_PER_VF;
1250 pos = pci_find_ext_capability(nic->pdev, PCI_EXT_CAP_ID_SRIOV);
1251 pci_read_config_word(nic->pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf);
1252 return min(total_vf - vf_en, vf_en * sqs_per_vf);
1255 static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
1262 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1264 dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n");
1268 pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt);
1269 if (total_vf_cnt < nic->num_vf_en)
1270 nic->num_vf_en = total_vf_cnt;
1275 vf_en = nic->num_vf_en;
1276 nic->num_sqs_en = nic_num_sqs_en(nic, nic->num_vf_en);
1277 vf_en += nic->num_sqs_en;
1279 err = pci_enable_sriov(pdev, vf_en);
1281 dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
1287 dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
1290 nic->flags |= NIC_SRIOV_ENABLED;
1294 static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1296 struct device *dev = &pdev->dev;
1301 BUILD_BUG_ON(sizeof(union nic_mbx) > 16);
1303 nic = devm_kzalloc(dev, sizeof(*nic), GFP_KERNEL);
1307 nic->hw = devm_kzalloc(dev, sizeof(struct hw_info), GFP_KERNEL);
1311 pci_set_drvdata(pdev, nic);
1315 err = pci_enable_device(pdev);
1317 dev_err(dev, "Failed to enable PCI device\n");
1318 pci_set_drvdata(pdev, NULL);
1322 err = pci_request_regions(pdev, DRV_NAME);
1324 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1325 goto err_disable_device;
1328 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
1330 dev_err(dev, "Unable to get usable DMA configuration\n");
1331 goto err_release_regions;
1334 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
1336 dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
1337 goto err_release_regions;
1340 /* MAP PF's configuration registers */
1341 nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1342 if (!nic->reg_base) {
1343 dev_err(dev, "Cannot map config register space, aborting\n");
1345 goto err_release_regions;
1348 nic->node = nic_get_node_id(pdev);
1350 /* Get HW capability info */
1351 nic_get_hw_info(nic);
1353 /* Allocate memory for LMAC tracking elements */
1355 max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX;
1357 nic->vf_lmac_map = devm_kmalloc_array(dev, max_lmac, sizeof(u8),
1359 if (!nic->vf_lmac_map)
1360 goto err_release_regions;
1362 /* Initialize hardware */
1365 nic_set_lmac_vf_mapping(nic);
1367 /* Register interrupts */
1368 err = nic_register_interrupts(nic);
1370 goto err_release_regions;
1372 /* Configure SRIOV */
1373 err = nic_sriov_init(pdev, nic);
1375 goto err_unregister_interrupts;
1379 err_unregister_interrupts:
1380 nic_unregister_interrupts(nic);
1381 err_release_regions:
1382 pci_release_regions(pdev);
1384 pci_disable_device(pdev);
1385 pci_set_drvdata(pdev, NULL);
1389 static void nic_remove(struct pci_dev *pdev)
1391 struct nicpf *nic = pci_get_drvdata(pdev);
1396 if (nic->flags & NIC_SRIOV_ENABLED)
1397 pci_disable_sriov(pdev);
1399 nic_unregister_interrupts(nic);
1400 pci_release_regions(pdev);
1402 pci_disable_device(pdev);
1403 pci_set_drvdata(pdev, NULL);
1406 static struct pci_driver nic_driver = {
1408 .id_table = nic_id_table,
1410 .remove = nic_remove,
1413 static int __init nic_init_module(void)
1415 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
1417 return pci_register_driver(&nic_driver);
1420 static void __exit nic_cleanup_module(void)
1422 pci_unregister_driver(&nic_driver);
1425 module_init(nic_init_module);
1426 module_exit(nic_cleanup_module);