2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/etherdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/log2.h>
17 #include <linux/prefetch.h>
18 #include <linux/irq.h>
19 #include <linux/iommu.h>
20 #include <linux/bpf.h>
21 #include <linux/bpf_trace.h>
22 #include <linux/filter.h>
23 #include <linux/net_tstamp.h>
24 #include <linux/workqueue.h>
28 #include "nicvf_queues.h"
29 #include "thunder_bgx.h"
30 #include "../common/cavium_ptp.h"
32 #define DRV_NAME "nicvf"
33 #define DRV_VERSION "1.0"
35 /* Supported devices */
36 static const struct pci_device_id nicvf_id_table[] = {
37 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
38 PCI_DEVICE_ID_THUNDER_NIC_VF,
40 PCI_SUBSYS_DEVID_88XX_NIC_VF) },
41 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
42 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
44 PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF) },
45 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
46 PCI_DEVICE_ID_THUNDER_NIC_VF,
48 PCI_SUBSYS_DEVID_81XX_NIC_VF) },
49 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
50 PCI_DEVICE_ID_THUNDER_NIC_VF,
52 PCI_SUBSYS_DEVID_83XX_NIC_VF) },
53 { 0, } /* end of table */
56 MODULE_AUTHOR("Sunil Goutham");
57 MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
58 MODULE_LICENSE("GPL v2");
59 MODULE_VERSION(DRV_VERSION);
60 MODULE_DEVICE_TABLE(pci, nicvf_id_table);
62 static int debug = 0x00;
63 module_param(debug, int, 0644);
64 MODULE_PARM_DESC(debug, "Debug message level bitmap");
66 static int cpi_alg = CPI_ALG_NONE;
67 module_param(cpi_alg, int, 0444);
68 MODULE_PARM_DESC(cpi_alg,
69 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
71 static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
74 return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
79 /* The Cavium ThunderX network controller can *only* be found in SoCs
80 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
81 * registers on this platform are implicitly strongly ordered with respect
82 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
83 * with no memory barriers in this driver. The readq()/writeq() functions add
84 * explicit ordering operation which in this case are redundant, and only
88 /* Register read/write APIs */
89 void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
91 writeq_relaxed(val, nic->reg_base + offset);
94 u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
96 return readq_relaxed(nic->reg_base + offset);
99 void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
102 void __iomem *addr = nic->reg_base + offset;
104 writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
107 u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
109 void __iomem *addr = nic->reg_base + offset;
111 return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
114 /* VF -> PF mailbox communication */
115 static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
117 u64 *msg = (u64 *)mbx;
119 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
120 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
123 int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
125 int timeout = NIC_MBOX_MSG_TIMEOUT;
129 mutex_lock(&nic->rx_mode_mtx);
131 nic->pf_acked = false;
132 nic->pf_nacked = false;
134 nicvf_write_to_mbx(nic, mbx);
136 /* Wait for previous message to be acked, timeout 2sec */
137 while (!nic->pf_acked) {
138 if (nic->pf_nacked) {
139 netdev_err(nic->netdev,
140 "PF NACK to mbox msg 0x%02x from VF%d\n",
141 (mbx->msg.msg & 0xFF), nic->vf_id);
150 netdev_err(nic->netdev,
151 "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
152 (mbx->msg.msg & 0xFF), nic->vf_id);
157 mutex_unlock(&nic->rx_mode_mtx);
161 /* Checks if VF is able to comminicate with PF
162 * and also gets the VNIC number this VF is associated to.
164 static int nicvf_check_pf_ready(struct nicvf *nic)
166 union nic_mbx mbx = {};
168 mbx.msg.msg = NIC_MBOX_MSG_READY;
169 if (nicvf_send_msg_to_pf(nic, &mbx)) {
170 netdev_err(nic->netdev,
171 "PF didn't respond to READY msg\n");
178 static void nicvf_send_cfg_done(struct nicvf *nic)
180 union nic_mbx mbx = {};
182 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
183 if (nicvf_send_msg_to_pf(nic, &mbx)) {
184 netdev_err(nic->netdev,
185 "PF didn't respond to CFG DONE msg\n");
189 static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
192 nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
194 nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
197 static void nicvf_handle_mbx_intr(struct nicvf *nic)
199 union nic_mbx mbx = {};
204 mbx_addr = NIC_VF_PF_MAILBOX_0_1;
205 mbx_data = (u64 *)&mbx;
207 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
208 *mbx_data = nicvf_reg_read(nic, mbx_addr);
210 mbx_addr += sizeof(u64);
213 netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
214 switch (mbx.msg.msg) {
215 case NIC_MBOX_MSG_READY:
216 nic->pf_acked = true;
217 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
218 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
219 nic->node = mbx.nic_cfg.node_id;
220 if (!nic->set_mac_pending)
221 ether_addr_copy(nic->netdev->dev_addr,
222 mbx.nic_cfg.mac_addr);
223 nic->sqs_mode = mbx.nic_cfg.sqs_mode;
224 nic->loopback_supported = mbx.nic_cfg.loopback_supported;
225 nic->link_up = false;
229 case NIC_MBOX_MSG_ACK:
230 nic->pf_acked = true;
232 case NIC_MBOX_MSG_NACK:
233 nic->pf_nacked = true;
235 case NIC_MBOX_MSG_RSS_SIZE:
236 nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
237 nic->pf_acked = true;
239 case NIC_MBOX_MSG_BGX_STATS:
240 nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
241 nic->pf_acked = true;
243 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
244 nic->pf_acked = true;
245 if (nic->link_up != mbx.link_status.link_up) {
246 nic->link_up = mbx.link_status.link_up;
247 nic->duplex = mbx.link_status.duplex;
248 nic->speed = mbx.link_status.speed;
249 nic->mac_type = mbx.link_status.mac_type;
251 netdev_info(nic->netdev,
252 "Link is Up %d Mbps %s duplex\n",
254 nic->duplex == DUPLEX_FULL ?
256 netif_carrier_on(nic->netdev);
257 netif_tx_start_all_queues(nic->netdev);
259 netdev_info(nic->netdev, "Link is Down\n");
260 netif_carrier_off(nic->netdev);
261 netif_tx_stop_all_queues(nic->netdev);
265 case NIC_MBOX_MSG_ALLOC_SQS:
266 nic->sqs_count = mbx.sqs_alloc.qs_count;
267 nic->pf_acked = true;
269 case NIC_MBOX_MSG_SNICVF_PTR:
270 /* Primary VF: make note of secondary VF's pointer
271 * to be used while packet transmission.
273 nic->snicvf[mbx.nicvf.sqs_id] =
274 (struct nicvf *)mbx.nicvf.nicvf;
275 nic->pf_acked = true;
277 case NIC_MBOX_MSG_PNICVF_PTR:
278 /* Secondary VF/Qset: make note of primary VF's pointer
279 * to be used while packet reception, to handover packet
280 * to primary VF's netdev.
282 nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf;
283 nic->pf_acked = true;
285 case NIC_MBOX_MSG_PFC:
286 nic->pfc.autoneg = mbx.pfc.autoneg;
287 nic->pfc.fc_rx = mbx.pfc.fc_rx;
288 nic->pfc.fc_tx = mbx.pfc.fc_tx;
289 nic->pf_acked = true;
292 netdev_err(nic->netdev,
293 "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
296 nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
299 static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
301 union nic_mbx mbx = {};
303 mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
304 mbx.mac.vf_id = nic->vf_id;
305 ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
307 return nicvf_send_msg_to_pf(nic, &mbx);
310 static void nicvf_config_cpi(struct nicvf *nic)
312 union nic_mbx mbx = {};
314 mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
315 mbx.cpi_cfg.vf_id = nic->vf_id;
316 mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
317 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
319 nicvf_send_msg_to_pf(nic, &mbx);
322 static void nicvf_get_rss_size(struct nicvf *nic)
324 union nic_mbx mbx = {};
326 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
327 mbx.rss_size.vf_id = nic->vf_id;
328 nicvf_send_msg_to_pf(nic, &mbx);
331 void nicvf_config_rss(struct nicvf *nic)
333 union nic_mbx mbx = {};
334 struct nicvf_rss_info *rss = &nic->rss_info;
335 int ind_tbl_len = rss->rss_size;
338 mbx.rss_cfg.vf_id = nic->vf_id;
339 mbx.rss_cfg.hash_bits = rss->hash_bits;
340 while (ind_tbl_len) {
341 mbx.rss_cfg.tbl_offset = nextq;
342 mbx.rss_cfg.tbl_len = min(ind_tbl_len,
343 RSS_IND_TBL_LEN_PER_MBX_MSG);
344 mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
345 NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
347 for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
348 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
350 nicvf_send_msg_to_pf(nic, &mbx);
352 ind_tbl_len -= mbx.rss_cfg.tbl_len;
356 void nicvf_set_rss_key(struct nicvf *nic)
358 struct nicvf_rss_info *rss = &nic->rss_info;
359 u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
362 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
363 nicvf_reg_write(nic, key_addr, rss->key[idx]);
364 key_addr += sizeof(u64);
368 static int nicvf_rss_init(struct nicvf *nic)
370 struct nicvf_rss_info *rss = &nic->rss_info;
373 nicvf_get_rss_size(nic);
375 if (cpi_alg != CPI_ALG_NONE) {
383 netdev_rss_key_fill(rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
384 nicvf_set_rss_key(nic);
386 rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
387 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
389 rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size));
391 for (idx = 0; idx < rss->rss_size; idx++)
392 rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
394 nicvf_config_rss(nic);
398 /* Request PF to allocate additional Qsets */
399 static void nicvf_request_sqs(struct nicvf *nic)
401 union nic_mbx mbx = {};
403 int sqs_count = nic->sqs_count;
404 int rx_queues = 0, tx_queues = 0;
406 /* Only primary VF should request */
407 if (nic->sqs_mode || !nic->sqs_count)
410 mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
411 mbx.sqs_alloc.vf_id = nic->vf_id;
412 mbx.sqs_alloc.qs_count = nic->sqs_count;
413 if (nicvf_send_msg_to_pf(nic, &mbx)) {
414 /* No response from PF */
419 /* Return if no Secondary Qsets available */
423 if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
424 rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
426 tx_queues = nic->tx_queues + nic->xdp_tx_queues;
427 if (tx_queues > MAX_SND_QUEUES_PER_QS)
428 tx_queues = tx_queues - MAX_SND_QUEUES_PER_QS;
430 /* Set no of Rx/Tx queues in each of the SQsets */
431 for (sqs = 0; sqs < nic->sqs_count; sqs++) {
432 mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
433 mbx.nicvf.vf_id = nic->vf_id;
434 mbx.nicvf.sqs_id = sqs;
435 nicvf_send_msg_to_pf(nic, &mbx);
437 nic->snicvf[sqs]->sqs_id = sqs;
438 if (rx_queues > MAX_RCV_QUEUES_PER_QS) {
439 nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS;
440 rx_queues -= MAX_RCV_QUEUES_PER_QS;
442 nic->snicvf[sqs]->qs->rq_cnt = rx_queues;
446 if (tx_queues > MAX_SND_QUEUES_PER_QS) {
447 nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS;
448 tx_queues -= MAX_SND_QUEUES_PER_QS;
450 nic->snicvf[sqs]->qs->sq_cnt = tx_queues;
454 nic->snicvf[sqs]->qs->cq_cnt =
455 max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt);
457 /* Initialize secondary Qset's queues and its interrupts */
458 nicvf_open(nic->snicvf[sqs]->netdev);
461 /* Update stack with actual Rx/Tx queue count allocated */
462 if (sqs_count != nic->sqs_count)
463 nicvf_set_real_num_queues(nic->netdev,
464 nic->tx_queues, nic->rx_queues);
467 /* Send this Qset's nicvf pointer to PF.
468 * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs
469 * so that packets received by these Qsets can use primary VF's netdev
471 static void nicvf_send_vf_struct(struct nicvf *nic)
473 union nic_mbx mbx = {};
475 mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR;
476 mbx.nicvf.sqs_mode = nic->sqs_mode;
477 mbx.nicvf.nicvf = (u64)nic;
478 nicvf_send_msg_to_pf(nic, &mbx);
481 static void nicvf_get_primary_vf_struct(struct nicvf *nic)
483 union nic_mbx mbx = {};
485 mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
486 nicvf_send_msg_to_pf(nic, &mbx);
489 int nicvf_set_real_num_queues(struct net_device *netdev,
490 int tx_queues, int rx_queues)
494 err = netif_set_real_num_tx_queues(netdev, tx_queues);
497 "Failed to set no of Tx queues: %d\n", tx_queues);
501 err = netif_set_real_num_rx_queues(netdev, rx_queues);
504 "Failed to set no of Rx queues: %d\n", rx_queues);
508 static int nicvf_init_resources(struct nicvf *nic)
513 nicvf_qset_config(nic, true);
515 /* Initialize queues and HW for data transfer */
516 err = nicvf_config_data_transfer(nic, true);
518 netdev_err(nic->netdev,
519 "Failed to alloc/config VF's QSet resources\n");
526 static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
527 struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
528 struct rcv_queue *rq, struct sk_buff **skb)
534 u64 dma_addr, cpu_addr;
537 /* Retrieve packet buffer's DMA address and length */
538 len = *((u16 *)((void *)cqe_rx + (3 * sizeof(u64))));
539 dma_addr = *((u64 *)((void *)cqe_rx + (7 * sizeof(u64))));
541 cpu_addr = nicvf_iova_to_phys(nic, dma_addr);
544 cpu_addr = (u64)phys_to_virt(cpu_addr);
545 page = virt_to_page((void *)cpu_addr);
547 xdp.data_hard_start = page_address(page);
548 xdp.data = (void *)cpu_addr;
549 xdp_set_data_meta_invalid(&xdp);
550 xdp.data_end = xdp.data + len;
551 xdp.rxq = &rq->xdp_rxq;
552 orig_data = xdp.data;
555 action = bpf_prog_run_xdp(prog, &xdp);
558 len = xdp.data_end - xdp.data;
559 /* Check if XDP program has changed headers */
560 if (orig_data != xdp.data) {
561 offset = orig_data - xdp.data;
567 /* Check if it's a recycled page, if not
568 * unmap the DMA mapping.
570 * Recycled page holds an extra reference.
572 if (page_ref_count(page) == 1) {
573 dma_addr &= PAGE_MASK;
574 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
575 RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
577 DMA_ATTR_SKIP_CPU_SYNC);
580 /* Build SKB and pass on packet to network stack */
581 *skb = build_skb(xdp.data,
582 RCV_FRAG_LEN - cqe_rx->align_pad + offset);
589 nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
592 bpf_warn_invalid_xdp_action(action);
595 trace_xdp_exception(nic->netdev, prog, action);
598 /* Check if it's a recycled page, if not
599 * unmap the DMA mapping.
601 * Recycled page holds an extra reference.
603 if (page_ref_count(page) == 1) {
604 dma_addr &= PAGE_MASK;
605 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
606 RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
608 DMA_ATTR_SKIP_CPU_SYNC);
616 static void nicvf_snd_ptp_handler(struct net_device *netdev,
617 struct cqe_send_t *cqe_tx)
619 struct nicvf *nic = netdev_priv(netdev);
620 struct skb_shared_hwtstamps ts;
625 /* Sync for 'ptp_skb' */
628 /* New timestamp request can be queued now */
629 atomic_set(&nic->tx_ptp_skbs, 0);
631 /* Check for timestamp requested skb */
635 /* Check if timestamping is timedout, which is set to 10us */
636 if (cqe_tx->send_status == CQ_TX_ERROP_TSTMP_TIMEOUT ||
637 cqe_tx->send_status == CQ_TX_ERROP_TSTMP_CONFLICT)
640 /* Get the timestamp */
641 memset(&ts, 0, sizeof(ts));
642 ns = cavium_ptp_tstamp2time(nic->ptp_clock, cqe_tx->ptp_timestamp);
643 ts.hwtstamp = ns_to_ktime(ns);
644 skb_tstamp_tx(nic->ptp_skb, &ts);
647 /* Free the original skb */
648 dev_kfree_skb_any(nic->ptp_skb);
654 static void nicvf_snd_pkt_handler(struct net_device *netdev,
655 struct cqe_send_t *cqe_tx,
656 int budget, int *subdesc_cnt,
657 unsigned int *tx_pkts, unsigned int *tx_bytes)
659 struct sk_buff *skb = NULL;
661 struct nicvf *nic = netdev_priv(netdev);
662 struct snd_queue *sq;
663 struct sq_hdr_subdesc *hdr;
664 struct sq_hdr_subdesc *tso_sqe;
666 sq = &nic->qs->sq[cqe_tx->sq_idx];
668 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
669 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
672 /* Check for errors */
673 if (cqe_tx->send_status)
674 nicvf_check_cqe_tx_errs(nic->pnicvf, cqe_tx);
676 /* Is this a XDP designated Tx queue */
678 page = (struct page *)sq->xdp_page[cqe_tx->sqe_ptr];
679 /* Check if it's recycled page or else unmap DMA mapping */
680 if (page && (page_ref_count(page) == 1))
681 nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
684 /* Release page reference for recycling */
687 sq->xdp_page[cqe_tx->sqe_ptr] = (u64)NULL;
688 *subdesc_cnt += hdr->subdesc_cnt + 1;
692 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
694 /* Check for dummy descriptor used for HW TSO offload on 88xx */
695 if (hdr->dont_send) {
696 /* Get actual TSO descriptors and free them */
698 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
699 nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
700 tso_sqe->subdesc_cnt);
701 *subdesc_cnt += tso_sqe->subdesc_cnt + 1;
703 nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
706 *subdesc_cnt += hdr->subdesc_cnt + 1;
709 *tx_bytes += skb->len;
710 /* If timestamp is requested for this skb, don't free it */
711 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
712 !nic->pnicvf->ptp_skb)
713 nic->pnicvf->ptp_skb = skb;
715 napi_consume_skb(skb, budget);
716 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
718 /* In case of SW TSO on 88xx, only last segment will have
719 * a SKB attached, so just free SQEs here.
722 *subdesc_cnt += hdr->subdesc_cnt + 1;
726 static inline void nicvf_set_rxhash(struct net_device *netdev,
727 struct cqe_rx_t *cqe_rx,
733 if (!(netdev->features & NETIF_F_RXHASH))
736 switch (cqe_rx->rss_alg) {
739 hash_type = PKT_HASH_TYPE_L4;
740 hash = cqe_rx->rss_tag;
743 hash_type = PKT_HASH_TYPE_L3;
744 hash = cqe_rx->rss_tag;
747 hash_type = PKT_HASH_TYPE_NONE;
751 skb_set_hash(skb, hash, hash_type);
754 static inline void nicvf_set_rxtstamp(struct nicvf *nic, struct sk_buff *skb)
758 if (!nic->ptp_clock || !nic->hw_rx_tstamp)
761 /* The first 8 bytes is the timestamp */
762 ns = cavium_ptp_tstamp2time(nic->ptp_clock,
763 be64_to_cpu(*(__be64 *)skb->data));
764 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
769 static void nicvf_rcv_pkt_handler(struct net_device *netdev,
770 struct napi_struct *napi,
771 struct cqe_rx_t *cqe_rx,
772 struct snd_queue *sq, struct rcv_queue *rq)
774 struct sk_buff *skb = NULL;
775 struct nicvf *nic = netdev_priv(netdev);
776 struct nicvf *snic = nic;
780 rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
783 /* Use primary VF's 'nicvf' struct */
785 netdev = nic->netdev;
788 /* Check for errors */
789 if (cqe_rx->err_level || cqe_rx->err_opcode) {
790 err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
791 if (err && !cqe_rx->rb_cnt)
795 /* For XDP, ignore pkts spanning multiple pages */
796 if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) {
797 /* Packet consumed by XDP */
798 if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb))
801 skb = nicvf_get_rcv_skb(snic, cqe_rx,
802 nic->xdp_prog ? true : false);
808 if (netif_msg_pktdata(nic)) {
809 netdev_info(nic->netdev, "skb 0x%p, len=%d\n", skb, skb->len);
810 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
811 skb->data, skb->len, true);
814 /* If error packet, drop it here */
816 dev_kfree_skb_any(skb);
820 nicvf_set_rxtstamp(nic, skb);
821 nicvf_set_rxhash(netdev, cqe_rx, skb);
823 skb_record_rx_queue(skb, rq_idx);
824 if (netdev->hw_features & NETIF_F_RXCSUM) {
825 /* HW by default verifies TCP/UDP/SCTP checksums */
826 skb->ip_summed = CHECKSUM_UNNECESSARY;
828 skb_checksum_none_assert(skb);
831 skb->protocol = eth_type_trans(skb, netdev);
833 /* Check for stripped VLAN */
834 if (cqe_rx->vlan_found && cqe_rx->vlan_stripped)
835 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
836 ntohs((__force __be16)cqe_rx->vlan_tci));
838 if (napi && (netdev->features & NETIF_F_GRO))
839 napi_gro_receive(napi, skb);
841 netif_receive_skb(skb);
844 static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
845 struct napi_struct *napi, int budget)
847 int processed_cqe, work_done = 0, tx_done = 0;
848 int cqe_count, cqe_head;
850 struct nicvf *nic = netdev_priv(netdev);
851 struct queue_set *qs = nic->qs;
852 struct cmp_queue *cq = &qs->cq[cq_idx];
853 struct cqe_rx_t *cq_desc;
854 struct netdev_queue *txq;
855 struct snd_queue *sq = &qs->sq[cq_idx];
856 struct rcv_queue *rq = &qs->rq[cq_idx];
857 unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx;
859 spin_lock_bh(&cq->lock);
862 /* Get no of valid CQ entries to process */
863 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
864 cqe_count &= CQ_CQE_COUNT;
868 /* Get head of the valid CQ entries */
869 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
872 while (processed_cqe < cqe_count) {
873 /* Get the CQ descriptor */
874 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
876 cqe_head &= (cq->dmem.q_len - 1);
877 /* Initiate prefetch for next descriptor */
878 prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
880 if ((work_done >= budget) && napi &&
881 (cq_desc->cqe_type != CQE_TYPE_SEND)) {
885 switch (cq_desc->cqe_type) {
887 nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq, rq);
891 nicvf_snd_pkt_handler(netdev, (void *)cq_desc,
892 budget, &subdesc_cnt,
893 &tx_pkts, &tx_bytes);
896 case CQE_TYPE_SEND_PTP:
897 nicvf_snd_ptp_handler(netdev, (void *)cq_desc);
899 case CQE_TYPE_INVALID:
900 case CQE_TYPE_RX_SPLIT:
901 case CQE_TYPE_RX_TCP:
908 /* Ring doorbell to inform H/W to reuse processed CQEs */
909 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
910 cq_idx, processed_cqe);
912 if ((work_done < budget) && napi)
916 /* Update SQ's descriptor free count */
918 nicvf_put_sq_desc(sq, subdesc_cnt);
920 txq_idx = nicvf_netdev_qidx(nic, cq_idx);
921 /* Handle XDP TX queues */
922 if (nic->pnicvf->xdp_prog) {
923 if (txq_idx < nic->pnicvf->xdp_tx_queues) {
924 nicvf_xdp_sq_doorbell(nic, sq, cq_idx);
928 txq_idx -= nic->pnicvf->xdp_tx_queues;
931 /* Wakeup TXQ if its stopped earlier due to SQ full */
933 (atomic_read(&sq->free_cnt) >= MIN_SQ_DESC_PER_PKT_XMIT)) {
934 netdev = nic->pnicvf->netdev;
935 txq = netdev_get_tx_queue(netdev, txq_idx);
937 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
939 /* To read updated queue and carrier status */
941 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
942 netif_tx_wake_queue(txq);
944 this_cpu_inc(nic->drv_stats->txq_wake);
945 netif_warn(nic, tx_err, netdev,
946 "Transmit queue wakeup SQ%d\n", txq_idx);
951 spin_unlock_bh(&cq->lock);
955 static int nicvf_poll(struct napi_struct *napi, int budget)
959 struct net_device *netdev = napi->dev;
960 struct nicvf *nic = netdev_priv(netdev);
961 struct nicvf_cq_poll *cq;
963 cq = container_of(napi, struct nicvf_cq_poll, napi);
964 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
966 if (work_done < budget) {
967 /* Slow packet rate, exit polling */
968 napi_complete_done(napi, work_done);
969 /* Re-enable interrupts */
970 cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
972 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
973 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
974 cq->cq_idx, cq_head);
975 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
980 /* Qset error interrupt handler
982 * As of now only CQ errors are handled
984 static void nicvf_handle_qs_err(unsigned long data)
986 struct nicvf *nic = (struct nicvf *)data;
987 struct queue_set *qs = nic->qs;
991 netif_tx_disable(nic->netdev);
993 /* Check if it is CQ err */
994 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
995 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
997 if (!(status & CQ_ERR_MASK))
999 /* Process already queued CQEs and reconfig CQ */
1000 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1001 nicvf_sq_disable(nic, qidx);
1002 nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
1003 nicvf_cmp_queue_config(nic, qs, qidx, true);
1004 nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
1005 nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
1007 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
1010 netif_tx_start_all_queues(nic->netdev);
1011 /* Re-enable Qset error interrupt */
1012 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
1015 static void nicvf_dump_intr_status(struct nicvf *nic)
1017 netif_info(nic, intr, nic->netdev, "interrupt status 0x%llx\n",
1018 nicvf_reg_read(nic, NIC_VF_INT));
1021 static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
1023 struct nicvf *nic = (struct nicvf *)nicvf_irq;
1026 nicvf_dump_intr_status(nic);
1028 intr = nicvf_reg_read(nic, NIC_VF_INT);
1029 /* Check for spurious interrupt */
1030 if (!(intr & NICVF_INTR_MBOX_MASK))
1033 nicvf_handle_mbx_intr(nic);
1038 static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
1040 struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq;
1041 struct nicvf *nic = cq_poll->nicvf;
1042 int qidx = cq_poll->cq_idx;
1044 nicvf_dump_intr_status(nic);
1046 /* Disable interrupts */
1047 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1050 napi_schedule_irqoff(&cq_poll->napi);
1052 /* Clear interrupt */
1053 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1058 static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq)
1060 struct nicvf *nic = (struct nicvf *)nicvf_irq;
1064 nicvf_dump_intr_status(nic);
1066 /* Disable RBDR interrupt and schedule softirq */
1067 for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
1068 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
1070 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1071 tasklet_hi_schedule(&nic->rbdr_task);
1072 /* Clear interrupt */
1073 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1079 static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
1081 struct nicvf *nic = (struct nicvf *)nicvf_irq;
1083 nicvf_dump_intr_status(nic);
1085 /* Disable Qset err interrupt and schedule softirq */
1086 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1087 tasklet_hi_schedule(&nic->qs_err_task);
1088 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1093 static void nicvf_set_irq_affinity(struct nicvf *nic)
1097 for (vec = 0; vec < nic->num_vec; vec++) {
1098 if (!nic->irq_allocated[vec])
1101 if (!zalloc_cpumask_var(&nic->affinity_mask[vec], GFP_KERNEL))
1104 if (vec < NICVF_INTR_ID_SQ)
1105 /* Leave CPU0 for RBDR and other interrupts */
1106 cpu = nicvf_netdev_qidx(nic, vec) + 1;
1110 cpumask_set_cpu(cpumask_local_spread(cpu, nic->node),
1111 nic->affinity_mask[vec]);
1112 irq_set_affinity_hint(pci_irq_vector(nic->pdev, vec),
1113 nic->affinity_mask[vec]);
1117 static int nicvf_register_interrupts(struct nicvf *nic)
1121 for_each_cq_irq(irq)
1122 sprintf(nic->irq_name[irq], "%s-rxtx-%d",
1123 nic->pnicvf->netdev->name,
1124 nicvf_netdev_qidx(nic, irq));
1126 for_each_sq_irq(irq)
1127 sprintf(nic->irq_name[irq], "%s-sq-%d",
1128 nic->pnicvf->netdev->name,
1129 nicvf_netdev_qidx(nic, irq - NICVF_INTR_ID_SQ));
1131 for_each_rbdr_irq(irq)
1132 sprintf(nic->irq_name[irq], "%s-rbdr-%d",
1133 nic->pnicvf->netdev->name,
1134 nic->sqs_mode ? (nic->sqs_id + 1) : 0);
1136 /* Register CQ interrupts */
1137 for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
1138 ret = request_irq(pci_irq_vector(nic->pdev, irq),
1140 0, nic->irq_name[irq], nic->napi[irq]);
1143 nic->irq_allocated[irq] = true;
1146 /* Register RBDR interrupt */
1147 for (irq = NICVF_INTR_ID_RBDR;
1148 irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
1149 ret = request_irq(pci_irq_vector(nic->pdev, irq),
1150 nicvf_rbdr_intr_handler,
1151 0, nic->irq_name[irq], nic);
1154 nic->irq_allocated[irq] = true;
1157 /* Register QS error interrupt */
1158 sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], "%s-qset-err-%d",
1159 nic->pnicvf->netdev->name,
1160 nic->sqs_mode ? (nic->sqs_id + 1) : 0);
1161 irq = NICVF_INTR_ID_QS_ERR;
1162 ret = request_irq(pci_irq_vector(nic->pdev, irq),
1163 nicvf_qs_err_intr_handler,
1164 0, nic->irq_name[irq], nic);
1168 nic->irq_allocated[irq] = true;
1170 /* Set IRQ affinities */
1171 nicvf_set_irq_affinity(nic);
1175 netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq);
1180 static void nicvf_unregister_interrupts(struct nicvf *nic)
1182 struct pci_dev *pdev = nic->pdev;
1185 /* Free registered interrupts */
1186 for (irq = 0; irq < nic->num_vec; irq++) {
1187 if (!nic->irq_allocated[irq])
1190 irq_set_affinity_hint(pci_irq_vector(pdev, irq), NULL);
1191 free_cpumask_var(nic->affinity_mask[irq]);
1193 if (irq < NICVF_INTR_ID_SQ)
1194 free_irq(pci_irq_vector(pdev, irq), nic->napi[irq]);
1196 free_irq(pci_irq_vector(pdev, irq), nic);
1198 nic->irq_allocated[irq] = false;
1202 pci_free_irq_vectors(pdev);
1206 /* Initialize MSIX vectors and register MISC interrupt.
1207 * Send READY message to PF to check if its alive
1209 static int nicvf_register_misc_interrupt(struct nicvf *nic)
1212 int irq = NICVF_INTR_ID_MISC;
1214 /* Return if mailbox interrupt is already registered */
1215 if (nic->pdev->msix_enabled)
1219 nic->num_vec = pci_msix_vec_count(nic->pdev);
1220 ret = pci_alloc_irq_vectors(nic->pdev, nic->num_vec, nic->num_vec,
1223 netdev_err(nic->netdev,
1224 "Req for #%d msix vectors failed\n", nic->num_vec);
1228 sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
1229 /* Register Misc interrupt */
1230 ret = request_irq(pci_irq_vector(nic->pdev, irq),
1231 nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
1235 nic->irq_allocated[irq] = true;
1237 /* Enable mailbox interrupt */
1238 nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
1240 /* Check if VF is able to communicate with PF */
1241 if (!nicvf_check_pf_ready(nic)) {
1242 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1243 nicvf_unregister_interrupts(nic);
1250 static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
1252 struct nicvf *nic = netdev_priv(netdev);
1253 int qid = skb_get_queue_mapping(skb);
1254 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
1256 struct snd_queue *sq;
1259 /* Check for minimum packet length */
1260 if (skb->len <= ETH_HLEN) {
1262 return NETDEV_TX_OK;
1265 /* In XDP case, initial HW tx queues are used for XDP,
1266 * but stack's queue mapping starts at '0', so skip the
1267 * Tx queues attached to Rx queues for XDP.
1270 qid += nic->xdp_tx_queues;
1273 /* Get secondary Qset's SQ structure */
1274 if (qid >= MAX_SND_QUEUES_PER_QS) {
1275 tmp = qid / MAX_SND_QUEUES_PER_QS;
1276 snic = (struct nicvf *)nic->snicvf[tmp - 1];
1278 netdev_warn(nic->netdev,
1279 "Secondary Qset#%d's ptr not initialized\n",
1282 return NETDEV_TX_OK;
1284 qid = qid % MAX_SND_QUEUES_PER_QS;
1287 sq = &snic->qs->sq[qid];
1288 if (!netif_tx_queue_stopped(txq) &&
1289 !nicvf_sq_append_skb(snic, sq, skb, qid)) {
1290 netif_tx_stop_queue(txq);
1292 /* Barrier, so that stop_queue visible to other cpus */
1295 /* Check again, incase another cpu freed descriptors */
1296 if (atomic_read(&sq->free_cnt) > MIN_SQ_DESC_PER_PKT_XMIT) {
1297 netif_tx_wake_queue(txq);
1299 this_cpu_inc(nic->drv_stats->txq_stop);
1300 netif_warn(nic, tx_err, netdev,
1301 "Transmit ring full, stopping SQ%d\n", qid);
1303 return NETDEV_TX_BUSY;
1306 return NETDEV_TX_OK;
1309 static inline void nicvf_free_cq_poll(struct nicvf *nic)
1311 struct nicvf_cq_poll *cq_poll;
1314 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1315 cq_poll = nic->napi[qidx];
1318 nic->napi[qidx] = NULL;
1323 int nicvf_stop(struct net_device *netdev)
1326 struct nicvf *nic = netdev_priv(netdev);
1327 struct queue_set *qs = nic->qs;
1328 struct nicvf_cq_poll *cq_poll = NULL;
1329 union nic_mbx mbx = {};
1331 cancel_delayed_work_sync(&nic->link_change_work);
1333 /* wait till all queued set_rx_mode tasks completes */
1334 drain_workqueue(nic->nicvf_rx_mode_wq);
1336 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1337 nicvf_send_msg_to_pf(nic, &mbx);
1339 netif_carrier_off(netdev);
1340 netif_tx_stop_all_queues(nic->netdev);
1341 nic->link_up = false;
1343 /* Teardown secondary qsets first */
1344 if (!nic->sqs_mode) {
1345 for (qidx = 0; qidx < nic->sqs_count; qidx++) {
1346 if (!nic->snicvf[qidx])
1348 nicvf_stop(nic->snicvf[qidx]->netdev);
1349 nic->snicvf[qidx] = NULL;
1353 /* Disable RBDR & QS error interrupts */
1354 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1355 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1356 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1358 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1359 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1361 /* Wait for pending IRQ handlers to finish */
1362 for (irq = 0; irq < nic->num_vec; irq++)
1363 synchronize_irq(pci_irq_vector(nic->pdev, irq));
1365 tasklet_kill(&nic->rbdr_task);
1366 tasklet_kill(&nic->qs_err_task);
1367 if (nic->rb_work_scheduled)
1368 cancel_delayed_work_sync(&nic->rbdr_work);
1370 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1371 cq_poll = nic->napi[qidx];
1374 napi_synchronize(&cq_poll->napi);
1375 /* CQ intr is enabled while napi_complete,
1378 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1379 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1380 napi_disable(&cq_poll->napi);
1381 netif_napi_del(&cq_poll->napi);
1384 netif_tx_disable(netdev);
1386 for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
1387 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
1389 /* Free resources */
1390 nicvf_config_data_transfer(nic, false);
1392 /* Disable HW Qset */
1393 nicvf_qset_config(nic, false);
1395 /* disable mailbox interrupt */
1396 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1398 nicvf_unregister_interrupts(nic);
1400 nicvf_free_cq_poll(nic);
1402 /* Free any pending SKB saved to receive timestamp */
1404 dev_kfree_skb_any(nic->ptp_skb);
1405 nic->ptp_skb = NULL;
1408 /* Clear multiqset info */
1414 static int nicvf_config_hw_rx_tstamp(struct nicvf *nic, bool enable)
1416 union nic_mbx mbx = {};
1418 mbx.ptp.msg = NIC_MBOX_MSG_PTP_CFG;
1419 mbx.ptp.enable = enable;
1421 return nicvf_send_msg_to_pf(nic, &mbx);
1424 static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1426 union nic_mbx mbx = {};
1428 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1429 mbx.frs.max_frs = mtu;
1430 mbx.frs.vf_id = nic->vf_id;
1432 return nicvf_send_msg_to_pf(nic, &mbx);
1435 static void nicvf_link_status_check_task(struct work_struct *work_arg)
1437 struct nicvf *nic = container_of(work_arg,
1439 link_change_work.work);
1440 union nic_mbx mbx = {};
1441 mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
1442 nicvf_send_msg_to_pf(nic, &mbx);
1443 queue_delayed_work(nic->nicvf_rx_mode_wq,
1444 &nic->link_change_work, 2 * HZ);
1447 int nicvf_open(struct net_device *netdev)
1450 struct nicvf *nic = netdev_priv(netdev);
1451 struct queue_set *qs = nic->qs;
1452 struct nicvf_cq_poll *cq_poll = NULL;
1454 /* wait till all queued set_rx_mode tasks completes if any */
1455 drain_workqueue(nic->nicvf_rx_mode_wq);
1457 netif_carrier_off(netdev);
1459 err = nicvf_register_misc_interrupt(nic);
1463 /* Register NAPI handler for processing CQEs */
1464 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1465 cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
1470 cq_poll->cq_idx = qidx;
1471 cq_poll->nicvf = nic;
1472 netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
1474 napi_enable(&cq_poll->napi);
1475 nic->napi[qidx] = cq_poll;
1478 /* Check if we got MAC address from PF or else generate a radom MAC */
1479 if (!nic->sqs_mode && is_zero_ether_addr(netdev->dev_addr)) {
1480 eth_hw_addr_random(netdev);
1481 nicvf_hw_set_mac_addr(nic, netdev);
1484 if (nic->set_mac_pending) {
1485 nic->set_mac_pending = false;
1486 nicvf_hw_set_mac_addr(nic, netdev);
1489 /* Init tasklet for handling Qset err interrupt */
1490 tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
1491 (unsigned long)nic);
1493 /* Init RBDR tasklet which will refill RBDR */
1494 tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
1495 (unsigned long)nic);
1496 INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
1498 /* Configure CPI alorithm */
1499 nic->cpi_alg = cpi_alg;
1501 nicvf_config_cpi(nic);
1503 nicvf_request_sqs(nic);
1505 nicvf_get_primary_vf_struct(nic);
1507 /* Configure PTP timestamp */
1509 nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
1510 atomic_set(&nic->tx_ptp_skbs, 0);
1511 nic->ptp_skb = NULL;
1513 /* Configure receive side scaling and MTU */
1514 if (!nic->sqs_mode) {
1515 nicvf_rss_init(nic);
1516 err = nicvf_update_hw_max_frs(nic, netdev->mtu);
1520 /* Clear percpu stats */
1521 for_each_possible_cpu(cpu)
1522 memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
1523 sizeof(struct nicvf_drv_stats));
1526 err = nicvf_register_interrupts(nic);
1530 /* Initialize the queues */
1531 err = nicvf_init_resources(nic);
1535 /* Make sure queue initialization is written */
1538 nicvf_reg_write(nic, NIC_VF_INT, -1);
1539 /* Enable Qset err interrupt */
1540 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
1542 /* Enable completion queue interrupt */
1543 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1544 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
1546 /* Enable RBDR threshold interrupt */
1547 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1548 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1550 /* Send VF config done msg to PF */
1551 nicvf_send_cfg_done(nic);
1553 INIT_DELAYED_WORK(&nic->link_change_work,
1554 nicvf_link_status_check_task);
1555 queue_delayed_work(nic->nicvf_rx_mode_wq,
1556 &nic->link_change_work, 0);
1560 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1561 nicvf_unregister_interrupts(nic);
1562 tasklet_kill(&nic->qs_err_task);
1563 tasklet_kill(&nic->rbdr_task);
1565 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1566 cq_poll = nic->napi[qidx];
1569 napi_disable(&cq_poll->napi);
1570 netif_napi_del(&cq_poll->napi);
1572 nicvf_free_cq_poll(nic);
1576 static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1578 struct nicvf *nic = netdev_priv(netdev);
1579 int orig_mtu = netdev->mtu;
1581 netdev->mtu = new_mtu;
1583 if (!netif_running(netdev))
1586 if (nicvf_update_hw_max_frs(nic, new_mtu)) {
1587 netdev->mtu = orig_mtu;
1594 static int nicvf_set_mac_address(struct net_device *netdev, void *p)
1596 struct sockaddr *addr = p;
1597 struct nicvf *nic = netdev_priv(netdev);
1599 if (!is_valid_ether_addr(addr->sa_data))
1600 return -EADDRNOTAVAIL;
1602 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1604 if (nic->pdev->msix_enabled) {
1605 if (nicvf_hw_set_mac_addr(nic, netdev))
1608 nic->set_mac_pending = true;
1614 void nicvf_update_lmac_stats(struct nicvf *nic)
1617 union nic_mbx mbx = {};
1619 if (!netif_running(nic->netdev))
1622 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
1623 mbx.bgx_stats.vf_id = nic->vf_id;
1625 mbx.bgx_stats.rx = 1;
1626 while (stat < BGX_RX_STATS_COUNT) {
1627 mbx.bgx_stats.idx = stat;
1628 if (nicvf_send_msg_to_pf(nic, &mbx))
1636 mbx.bgx_stats.rx = 0;
1637 while (stat < BGX_TX_STATS_COUNT) {
1638 mbx.bgx_stats.idx = stat;
1639 if (nicvf_send_msg_to_pf(nic, &mbx))
1645 void nicvf_update_stats(struct nicvf *nic)
1649 struct nicvf_hw_stats *stats = &nic->hw_stats;
1650 struct nicvf_drv_stats *drv_stats;
1651 struct queue_set *qs = nic->qs;
1653 #define GET_RX_STATS(reg) \
1654 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
1655 #define GET_TX_STATS(reg) \
1656 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
1658 stats->rx_bytes = GET_RX_STATS(RX_OCTS);
1659 stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
1660 stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
1661 stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
1662 stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1663 stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1664 stats->rx_drop_red = GET_RX_STATS(RX_RED);
1665 stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
1666 stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
1667 stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
1668 stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1669 stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1670 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1671 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1673 stats->tx_bytes = GET_TX_STATS(TX_OCTS);
1674 stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST);
1675 stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST);
1676 stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST);
1677 stats->tx_drops = GET_TX_STATS(TX_DROP);
1679 /* On T88 pass 2.0, the dummy SQE added for TSO notification
1680 * via CQE has 'dont_send' set. Hence HW drops the pkt pointed
1681 * pointed by dummy SQE and results in tx_drops counter being
1682 * incremented. Subtracting it from tx_tso counter will give
1683 * exact tx_drops counter.
1685 if (nic->t88 && nic->hw_tso) {
1686 for_each_possible_cpu(cpu) {
1687 drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
1688 tmp_stats += drv_stats->tx_tso;
1690 stats->tx_drops = tmp_stats - stats->tx_drops;
1692 stats->tx_frames = stats->tx_ucast_frames +
1693 stats->tx_bcast_frames +
1694 stats->tx_mcast_frames;
1695 stats->rx_frames = stats->rx_ucast_frames +
1696 stats->rx_bcast_frames +
1697 stats->rx_mcast_frames;
1698 stats->rx_drops = stats->rx_drop_red +
1699 stats->rx_drop_overrun;
1701 /* Update RQ and SQ stats */
1702 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1703 nicvf_update_rq_stats(nic, qidx);
1704 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1705 nicvf_update_sq_stats(nic, qidx);
1708 static void nicvf_get_stats64(struct net_device *netdev,
1709 struct rtnl_link_stats64 *stats)
1711 struct nicvf *nic = netdev_priv(netdev);
1712 struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
1714 nicvf_update_stats(nic);
1716 stats->rx_bytes = hw_stats->rx_bytes;
1717 stats->rx_packets = hw_stats->rx_frames;
1718 stats->rx_dropped = hw_stats->rx_drops;
1719 stats->multicast = hw_stats->rx_mcast_frames;
1721 stats->tx_bytes = hw_stats->tx_bytes;
1722 stats->tx_packets = hw_stats->tx_frames;
1723 stats->tx_dropped = hw_stats->tx_drops;
1727 static void nicvf_tx_timeout(struct net_device *dev)
1729 struct nicvf *nic = netdev_priv(dev);
1731 netif_warn(nic, tx_err, dev, "Transmit timed out, resetting\n");
1733 this_cpu_inc(nic->drv_stats->tx_timeout);
1734 schedule_work(&nic->reset_task);
1737 static void nicvf_reset_task(struct work_struct *work)
1741 nic = container_of(work, struct nicvf, reset_task);
1743 if (!netif_running(nic->netdev))
1746 nicvf_stop(nic->netdev);
1747 nicvf_open(nic->netdev);
1748 netif_trans_update(nic->netdev);
1751 static int nicvf_config_loopback(struct nicvf *nic,
1752 netdev_features_t features)
1754 union nic_mbx mbx = {};
1756 mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
1757 mbx.lbk.vf_id = nic->vf_id;
1758 mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0;
1760 return nicvf_send_msg_to_pf(nic, &mbx);
1763 static netdev_features_t nicvf_fix_features(struct net_device *netdev,
1764 netdev_features_t features)
1766 struct nicvf *nic = netdev_priv(netdev);
1768 if ((features & NETIF_F_LOOPBACK) &&
1769 netif_running(netdev) && !nic->loopback_supported)
1770 features &= ~NETIF_F_LOOPBACK;
1775 static int nicvf_set_features(struct net_device *netdev,
1776 netdev_features_t features)
1778 struct nicvf *nic = netdev_priv(netdev);
1779 netdev_features_t changed = features ^ netdev->features;
1781 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1782 nicvf_config_vlan_stripping(nic, features);
1784 if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1785 return nicvf_config_loopback(nic, features);
1790 static void nicvf_set_xdp_queues(struct nicvf *nic, bool bpf_attached)
1792 u8 cq_count, txq_count;
1794 /* Set XDP Tx queue count same as Rx queue count */
1796 nic->xdp_tx_queues = 0;
1798 nic->xdp_tx_queues = nic->rx_queues;
1800 /* If queue count > MAX_CMP_QUEUES_PER_QS, then additional qsets
1801 * needs to be allocated, check how many.
1803 txq_count = nic->xdp_tx_queues + nic->tx_queues;
1804 cq_count = max(nic->rx_queues, txq_count);
1805 if (cq_count > MAX_CMP_QUEUES_PER_QS) {
1806 nic->sqs_count = roundup(cq_count, MAX_CMP_QUEUES_PER_QS);
1807 nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
1812 /* Set primary Qset's resources */
1813 nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
1814 nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
1815 nic->qs->cq_cnt = max_t(u8, nic->qs->rq_cnt, nic->qs->sq_cnt);
1818 nicvf_set_real_num_queues(nic->netdev, nic->tx_queues, nic->rx_queues);
1821 static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
1823 struct net_device *dev = nic->netdev;
1824 bool if_up = netif_running(nic->netdev);
1825 struct bpf_prog *old_prog;
1826 bool bpf_attached = false;
1829 /* For now just support only the usual MTU sized frames */
1830 if (prog && (dev->mtu > 1500)) {
1831 netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
1836 /* ALL SQs attached to CQs i.e same as RQs, are treated as
1837 * XDP Tx queues and more Tx queues are allocated for
1838 * network stack to send pkts out.
1840 * No of Tx queues are either same as Rx queues or whatever
1841 * is left in max no of queues possible.
1843 if ((nic->rx_queues + nic->tx_queues) > nic->max_queues) {
1845 "Failed to attach BPF prog, RXQs + TXQs > Max %d\n",
1851 nicvf_stop(nic->netdev);
1853 old_prog = xchg(&nic->xdp_prog, prog);
1854 /* Detach old prog, if any */
1856 bpf_prog_put(old_prog);
1858 if (nic->xdp_prog) {
1859 /* Attach BPF program */
1860 nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
1861 if (!IS_ERR(nic->xdp_prog)) {
1862 bpf_attached = true;
1864 ret = PTR_ERR(nic->xdp_prog);
1865 nic->xdp_prog = NULL;
1869 /* Calculate Tx queues needed for XDP and network stack */
1870 nicvf_set_xdp_queues(nic, bpf_attached);
1873 /* Reinitialize interface, clean slate */
1874 nicvf_open(nic->netdev);
1875 netif_trans_update(nic->netdev);
1881 static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
1883 struct nicvf *nic = netdev_priv(netdev);
1885 /* To avoid checks while retrieving buffer address from CQE_RX,
1886 * do not support XDP for T88 pass1.x silicons which are anyway
1887 * not in use widely.
1889 if (pass1_silicon(nic->pdev))
1892 switch (xdp->command) {
1893 case XDP_SETUP_PROG:
1894 return nicvf_xdp_setup(nic, xdp->prog);
1895 case XDP_QUERY_PROG:
1896 xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0;
1903 static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
1905 struct hwtstamp_config config;
1906 struct nicvf *nic = netdev_priv(netdev);
1908 if (!nic->ptp_clock)
1911 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1914 /* reserved for future extensions */
1918 switch (config.tx_type) {
1919 case HWTSTAMP_TX_OFF:
1920 case HWTSTAMP_TX_ON:
1926 switch (config.rx_filter) {
1927 case HWTSTAMP_FILTER_NONE:
1928 nic->hw_rx_tstamp = false;
1930 case HWTSTAMP_FILTER_ALL:
1931 case HWTSTAMP_FILTER_SOME:
1932 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1933 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1934 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1935 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1936 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1937 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1938 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1939 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1940 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1941 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1942 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1943 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1944 nic->hw_rx_tstamp = true;
1945 config.rx_filter = HWTSTAMP_FILTER_ALL;
1951 if (netif_running(netdev))
1952 nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
1954 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1960 static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1964 return nicvf_config_hwtstamp(netdev, req);
1970 static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
1973 union nic_mbx mbx = {};
1976 /* From the inside of VM code flow we have only 128 bits memory
1977 * available to send message to host's PF, so send all mc addrs
1978 * one by one, starting from flush command in case if kernel
1979 * requests to configure specific MAC filtering
1982 /* flush DMAC filters and reset RX mode */
1983 mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
1984 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
1987 if (mode & BGX_XCAST_MCAST_FILTER) {
1988 /* once enabling filtering, we need to signal to PF to add
1989 * its' own LMAC to the filter to accept packets for it.
1991 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1993 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
1997 /* check if we have any specific MACs to be added to PF DMAC filter */
1999 /* now go through kernel list of MACs and add them one by one */
2000 for (idx = 0; idx < mc_addrs->count; idx++) {
2001 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
2002 mbx.xcast.mac = mc_addrs->mc[idx];
2003 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
2008 /* and finally set rx mode for PF accordingly */
2009 mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
2010 mbx.xcast.mode = mode;
2012 nicvf_send_msg_to_pf(nic, &mbx);
2017 static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
2019 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
2021 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
2023 struct xcast_addr_list *mc;
2028 /* Save message data locally to prevent them from
2029 * being overwritten by next ndo_set_rx_mode call().
2031 spin_lock(&nic->rx_mode_wq_lock);
2032 mode = vf_work->mode;
2035 spin_unlock(&nic->rx_mode_wq_lock);
2037 __nicvf_set_rx_mode_task(mode, mc, nic);
2040 static void nicvf_set_rx_mode(struct net_device *netdev)
2042 struct nicvf *nic = netdev_priv(netdev);
2043 struct netdev_hw_addr *ha;
2044 struct xcast_addr_list *mc_list = NULL;
2047 if (netdev->flags & IFF_PROMISC) {
2048 mode = BGX_XCAST_BCAST_ACCEPT | BGX_XCAST_MCAST_ACCEPT;
2050 if (netdev->flags & IFF_BROADCAST)
2051 mode |= BGX_XCAST_BCAST_ACCEPT;
2053 if (netdev->flags & IFF_ALLMULTI) {
2054 mode |= BGX_XCAST_MCAST_ACCEPT;
2055 } else if (netdev->flags & IFF_MULTICAST) {
2056 mode |= BGX_XCAST_MCAST_FILTER;
2057 /* here we need to copy mc addrs */
2058 if (netdev_mc_count(netdev)) {
2059 mc_list = kmalloc(offsetof(typeof(*mc_list),
2060 mc[netdev_mc_count(netdev)]),
2062 if (unlikely(!mc_list))
2065 netdev_hw_addr_list_for_each(ha, &netdev->mc) {
2066 mc_list->mc[mc_list->count] =
2067 ether_addr_to_u64(ha->addr);
2073 spin_lock(&nic->rx_mode_wq_lock);
2074 kfree(nic->rx_mode_work.mc);
2075 nic->rx_mode_work.mc = mc_list;
2076 nic->rx_mode_work.mode = mode;
2077 queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work);
2078 spin_unlock(&nic->rx_mode_wq_lock);
2081 static const struct net_device_ops nicvf_netdev_ops = {
2082 .ndo_open = nicvf_open,
2083 .ndo_stop = nicvf_stop,
2084 .ndo_start_xmit = nicvf_xmit,
2085 .ndo_change_mtu = nicvf_change_mtu,
2086 .ndo_set_mac_address = nicvf_set_mac_address,
2087 .ndo_get_stats64 = nicvf_get_stats64,
2088 .ndo_tx_timeout = nicvf_tx_timeout,
2089 .ndo_fix_features = nicvf_fix_features,
2090 .ndo_set_features = nicvf_set_features,
2091 .ndo_bpf = nicvf_xdp,
2092 .ndo_do_ioctl = nicvf_ioctl,
2093 .ndo_set_rx_mode = nicvf_set_rx_mode,
2096 static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2098 struct device *dev = &pdev->dev;
2099 struct net_device *netdev;
2103 struct cavium_ptp *ptp_clock;
2105 ptp_clock = cavium_ptp_get();
2106 if (IS_ERR(ptp_clock)) {
2107 if (PTR_ERR(ptp_clock) == -ENODEV)
2108 /* In virtualized environment we proceed without ptp */
2111 return PTR_ERR(ptp_clock);
2114 err = pci_enable_device(pdev);
2116 dev_err(dev, "Failed to enable PCI device\n");
2120 err = pci_request_regions(pdev, DRV_NAME);
2122 dev_err(dev, "PCI request regions failed 0x%x\n", err);
2123 goto err_disable_device;
2126 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
2128 dev_err(dev, "Unable to get usable DMA configuration\n");
2129 goto err_release_regions;
2132 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
2134 dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
2135 goto err_release_regions;
2138 qcount = netif_get_num_default_rss_queues();
2140 /* Restrict multiqset support only for host bound VFs */
2141 if (pdev->is_virtfn) {
2142 /* Set max number of queues per VF */
2143 qcount = min_t(int, num_online_cpus(),
2144 (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
2147 netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
2150 goto err_release_regions;
2153 pci_set_drvdata(pdev, netdev);
2155 SET_NETDEV_DEV(netdev, &pdev->dev);
2157 nic = netdev_priv(netdev);
2158 nic->netdev = netdev;
2161 nic->max_queues = qcount;
2162 /* If no of CPUs are too low, there won't be any queues left
2163 * for XDP_TX, hence double it.
2166 nic->max_queues *= 2;
2167 nic->ptp_clock = ptp_clock;
2169 /* MAP VF's configuration registers */
2170 nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
2171 if (!nic->reg_base) {
2172 dev_err(dev, "Cannot map config register space, aborting\n");
2174 goto err_free_netdev;
2177 nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats);
2178 if (!nic->drv_stats) {
2180 goto err_free_netdev;
2183 err = nicvf_set_qset_resources(nic);
2185 goto err_free_netdev;
2187 /* Check if PF is alive and get MAC address for this VF */
2188 err = nicvf_register_misc_interrupt(nic);
2190 goto err_free_netdev;
2192 nicvf_send_vf_struct(nic);
2194 if (!pass1_silicon(nic->pdev))
2197 /* Get iommu domain for iova to physical addr conversion */
2198 nic->iommu_domain = iommu_get_domain_for_dev(dev);
2200 pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
2201 if (sdevid == 0xA134)
2204 /* Check if this VF is in QS only mode */
2208 err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues);
2210 goto err_unregister_interrupts;
2212 netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_SG |
2213 NETIF_F_TSO | NETIF_F_GRO | NETIF_F_TSO6 |
2214 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2215 NETIF_F_HW_VLAN_CTAG_RX);
2217 netdev->hw_features |= NETIF_F_RXHASH;
2219 netdev->features |= netdev->hw_features;
2220 netdev->hw_features |= NETIF_F_LOOPBACK;
2222 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM |
2223 NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
2225 netdev->netdev_ops = &nicvf_netdev_ops;
2226 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
2228 /* MTU range: 64 - 9200 */
2229 netdev->min_mtu = NIC_HW_MIN_FRS;
2230 netdev->max_mtu = NIC_HW_MAX_FRS;
2232 INIT_WORK(&nic->reset_task, nicvf_reset_task);
2234 nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d",
2237 INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
2238 spin_lock_init(&nic->rx_mode_wq_lock);
2239 mutex_init(&nic->rx_mode_mtx);
2241 err = register_netdev(netdev);
2243 dev_err(dev, "Failed to register netdevice\n");
2244 goto err_unregister_interrupts;
2247 nic->msg_enable = debug;
2249 nicvf_set_ethtool_ops(netdev);
2253 err_unregister_interrupts:
2254 nicvf_unregister_interrupts(nic);
2256 pci_set_drvdata(pdev, NULL);
2258 free_percpu(nic->drv_stats);
2259 free_netdev(netdev);
2260 err_release_regions:
2261 pci_release_regions(pdev);
2263 pci_disable_device(pdev);
2267 static void nicvf_remove(struct pci_dev *pdev)
2269 struct net_device *netdev = pci_get_drvdata(pdev);
2271 struct net_device *pnetdev;
2276 nic = netdev_priv(netdev);
2277 pnetdev = nic->pnicvf->netdev;
2279 /* Check if this Qset is assigned to different VF.
2280 * If yes, clean primary and all secondary Qsets.
2282 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
2283 unregister_netdev(pnetdev);
2284 if (nic->nicvf_rx_mode_wq) {
2285 destroy_workqueue(nic->nicvf_rx_mode_wq);
2286 nic->nicvf_rx_mode_wq = NULL;
2288 nicvf_unregister_interrupts(nic);
2289 pci_set_drvdata(pdev, NULL);
2291 free_percpu(nic->drv_stats);
2292 cavium_ptp_put(nic->ptp_clock);
2293 free_netdev(netdev);
2294 pci_release_regions(pdev);
2295 pci_disable_device(pdev);
2298 static void nicvf_shutdown(struct pci_dev *pdev)
2303 static struct pci_driver nicvf_driver = {
2305 .id_table = nicvf_id_table,
2306 .probe = nicvf_probe,
2307 .remove = nicvf_remove,
2308 .shutdown = nicvf_shutdown,
2311 static int __init nicvf_init_module(void)
2313 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
2314 return pci_register_driver(&nicvf_driver);
2317 static void __exit nicvf_cleanup_module(void)
2319 pci_unregister_driver(&nicvf_driver);
2322 module_init(nicvf_init_module);
2323 module_exit(nicvf_cleanup_module);