2 * Copyright (C) 2015 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * Netronome network device driver: Common functions between PF and VF
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Jason McMullan <jason.mcmullan@netronome.com>
39 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
40 * Brad Petrus <brad.petrus@netronome.com>
41 * Chris Telfer <chris.telfer@netronome.com>
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/init.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include <linux/interrupt.h>
52 #include <linux/ipv6.h>
53 #include <linux/pci.h>
54 #include <linux/pci_regs.h>
55 #include <linux/msi.h>
56 #include <linux/ethtool.h>
57 #include <linux/log2.h>
58 #include <linux/if_vlan.h>
59 #include <linux/random.h>
61 #include <linux/ktime.h>
63 #include <net/pkt_cls.h>
64 #include <net/vxlan.h>
66 #include "nfp_net_ctrl.h"
70 * nfp_net_get_fw_version() - Read and parse the FW version
71 * @fw_ver: Output fw_version structure to read to
72 * @ctrl_bar: Mapped address of the control BAR
74 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
75 void __iomem *ctrl_bar)
79 reg = readl(ctrl_bar + NFP_NET_CFG_VERSION);
80 put_unaligned_le32(reg, fw_ver);
85 * Firmware reconfig may take a while so we have two versions of it -
86 * synchronous and asynchronous (posted). All synchronous callers are holding
87 * RTNL so we don't have to worry about serializing them.
89 static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
91 nn_writel(nn, NFP_NET_CFG_UPDATE, update);
92 /* ensure update is written before pinging HW */
94 nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
97 /* Pass 0 as update to run posted reconfigs. */
98 static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
100 update |= nn->reconfig_posted;
101 nn->reconfig_posted = 0;
103 nfp_net_reconfig_start(nn, update);
105 nn->reconfig_timer_active = true;
106 mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
109 static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
113 reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
116 if (reg & NFP_NET_CFG_UPDATE_ERR) {
117 nn_err(nn, "Reconfig error: 0x%08x\n", reg);
119 } else if (last_check) {
120 nn_err(nn, "Reconfig timeout: 0x%08x\n", reg);
127 static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
129 bool timed_out = false;
131 /* Poll update field, waiting for NFP to ack the config */
132 while (!nfp_net_reconfig_check_done(nn, timed_out)) {
134 timed_out = time_is_before_eq_jiffies(deadline);
137 if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
140 return timed_out ? -EIO : 0;
143 static void nfp_net_reconfig_timer(unsigned long data)
145 struct nfp_net *nn = (void *)data;
147 spin_lock_bh(&nn->reconfig_lock);
149 nn->reconfig_timer_active = false;
151 /* If sync caller is present it will take over from us */
152 if (nn->reconfig_sync_present)
155 /* Read reconfig status and report errors */
156 nfp_net_reconfig_check_done(nn, true);
158 if (nn->reconfig_posted)
159 nfp_net_reconfig_start_async(nn, 0);
161 spin_unlock_bh(&nn->reconfig_lock);
165 * nfp_net_reconfig_post() - Post async reconfig request
166 * @nn: NFP Net device to reconfigure
167 * @update: The value for the update field in the BAR config
169 * Record FW reconfiguration request. Reconfiguration will be kicked off
170 * whenever reconfiguration machinery is idle. Multiple requests can be
173 static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
175 spin_lock_bh(&nn->reconfig_lock);
177 /* Sync caller will kick off async reconf when it's done, just post */
178 if (nn->reconfig_sync_present) {
179 nn->reconfig_posted |= update;
183 /* Opportunistically check if the previous command is done */
184 if (!nn->reconfig_timer_active ||
185 nfp_net_reconfig_check_done(nn, false))
186 nfp_net_reconfig_start_async(nn, update);
188 nn->reconfig_posted |= update;
190 spin_unlock_bh(&nn->reconfig_lock);
194 * nfp_net_reconfig() - Reconfigure the firmware
195 * @nn: NFP Net device to reconfigure
196 * @update: The value for the update field in the BAR config
198 * Write the update word to the BAR and ping the reconfig queue. The
199 * poll until the firmware has acknowledged the update by zeroing the
202 * Return: Negative errno on error, 0 on success
204 int nfp_net_reconfig(struct nfp_net *nn, u32 update)
206 bool cancelled_timer = false;
207 u32 pre_posted_requests;
210 spin_lock_bh(&nn->reconfig_lock);
212 nn->reconfig_sync_present = true;
214 if (nn->reconfig_timer_active) {
215 del_timer(&nn->reconfig_timer);
216 nn->reconfig_timer_active = false;
217 cancelled_timer = true;
219 pre_posted_requests = nn->reconfig_posted;
220 nn->reconfig_posted = 0;
222 spin_unlock_bh(&nn->reconfig_lock);
225 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
227 /* Run the posted reconfigs which were issued before we started */
228 if (pre_posted_requests) {
229 nfp_net_reconfig_start(nn, pre_posted_requests);
230 nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
233 nfp_net_reconfig_start(nn, update);
234 ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
236 spin_lock_bh(&nn->reconfig_lock);
238 if (nn->reconfig_posted)
239 nfp_net_reconfig_start_async(nn, 0);
241 nn->reconfig_sync_present = false;
243 spin_unlock_bh(&nn->reconfig_lock);
248 /* Interrupt configuration and handling
252 * nfp_net_irq_unmask() - Unmask automasked interrupt
253 * @nn: NFP Network structure
254 * @entry_nr: MSI-X table entry
256 * Clear the ICR for the IRQ entry.
258 static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
260 nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
265 * nfp_net_msix_alloc() - Try to allocate MSI-X irqs
266 * @nn: NFP Network structure
267 * @nr_vecs: Number of MSI-X vectors to allocate
269 * For MSI-X we want at least NFP_NET_NON_Q_VECTORS + 1 vectors.
271 * Return: Number of MSI-X vectors obtained or 0 on error.
273 static int nfp_net_msix_alloc(struct nfp_net *nn, int nr_vecs)
275 struct pci_dev *pdev = nn->pdev;
279 for (i = 0; i < nr_vecs; i++)
280 nn->irq_entries[i].entry = i;
282 nvecs = pci_enable_msix_range(pdev, nn->irq_entries,
283 NFP_NET_NON_Q_VECTORS + 1, nr_vecs);
285 nn_warn(nn, "Failed to enable MSI-X. Wanted %d-%d (err=%d)\n",
286 NFP_NET_NON_Q_VECTORS + 1, nr_vecs, nvecs);
294 * nfp_net_irqs_wanted() - Work out how many interrupt vectors we want
295 * @nn: NFP Network structure
297 * We want a vector per CPU (or ring), whatever is smaller plus
298 * NFP_NET_NON_Q_VECTORS for LSC etc.
300 * Return: Number of interrupts wanted
302 static int nfp_net_irqs_wanted(struct nfp_net *nn)
307 ncpus = num_online_cpus();
309 vecs = max_t(int, nn->num_tx_rings, nn->num_rx_rings);
310 vecs = min_t(int, vecs, ncpus);
312 return vecs + NFP_NET_NON_Q_VECTORS;
316 * nfp_net_irqs_alloc() - allocates MSI-X irqs
317 * @nn: NFP Network structure
319 * Return: Number of irqs obtained or 0 on error.
321 int nfp_net_irqs_alloc(struct nfp_net *nn)
325 wanted_irqs = nfp_net_irqs_wanted(nn);
327 nn->num_irqs = nfp_net_msix_alloc(nn, wanted_irqs);
328 if (nn->num_irqs == 0) {
329 nn_err(nn, "Failed to allocate MSI-X IRQs\n");
333 nn->num_r_vecs = nn->num_irqs - NFP_NET_NON_Q_VECTORS;
335 if (nn->num_irqs < wanted_irqs)
336 nn_warn(nn, "Unable to allocate %d vectors. Got %d instead\n",
337 wanted_irqs, nn->num_irqs);
343 * nfp_net_irqs_disable() - Disable interrupts
344 * @nn: NFP Network structure
346 * Undoes what @nfp_net_irqs_alloc() does.
348 void nfp_net_irqs_disable(struct nfp_net *nn)
350 pci_disable_msix(nn->pdev);
354 * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings.
356 * @data: Opaque data structure
358 * Return: Indicate if the interrupt has been handled.
360 static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
362 struct nfp_net_r_vector *r_vec = data;
364 napi_schedule_irqoff(&r_vec->napi);
366 /* The FW auto-masks any interrupt, either via the MASK bit in
367 * the MSI-X table or via the per entry ICR field. So there
368 * is no need to disable interrupts here.
374 * nfp_net_read_link_status() - Reread link status from control BAR
375 * @nn: NFP Network structure
377 static void nfp_net_read_link_status(struct nfp_net *nn)
383 spin_lock_irqsave(&nn->link_status_lock, flags);
385 sts = nn_readl(nn, NFP_NET_CFG_STS);
386 link_up = !!(sts & NFP_NET_CFG_STS_LINK);
388 if (nn->link_up == link_up)
391 nn->link_up = link_up;
394 netif_carrier_on(nn->netdev);
395 netdev_info(nn->netdev, "NIC Link is Up\n");
397 netif_carrier_off(nn->netdev);
398 netdev_info(nn->netdev, "NIC Link is Down\n");
401 spin_unlock_irqrestore(&nn->link_status_lock, flags);
405 * nfp_net_irq_lsc() - Interrupt service routine for link state changes
407 * @data: Opaque data structure
409 * Return: Indicate if the interrupt has been handled.
411 static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
413 struct nfp_net *nn = data;
415 nfp_net_read_link_status(nn);
417 nfp_net_irq_unmask(nn, NFP_NET_IRQ_LSC_IDX);
423 * nfp_net_irq_exn() - Interrupt service routine for exceptions
425 * @data: Opaque data structure
427 * Return: Indicate if the interrupt has been handled.
429 static irqreturn_t nfp_net_irq_exn(int irq, void *data)
431 struct nfp_net *nn = data;
433 nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__);
434 /* XXX TO BE IMPLEMENTED */
439 * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
440 * @tx_ring: TX ring structure
441 * @r_vec: IRQ vector servicing this ring
445 nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
446 struct nfp_net_r_vector *r_vec, unsigned int idx)
448 struct nfp_net *nn = r_vec->nfp_net;
451 tx_ring->r_vec = r_vec;
453 tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
454 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
458 * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
459 * @rx_ring: RX ring structure
460 * @r_vec: IRQ vector servicing this ring
464 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
465 struct nfp_net_r_vector *r_vec, unsigned int idx)
467 struct nfp_net *nn = r_vec->nfp_net;
470 rx_ring->r_vec = r_vec;
472 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
473 rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1);
475 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
476 rx_ring->qcp_rx = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->rx_qcidx);
480 * nfp_net_irqs_assign() - Assign IRQs and setup rvecs.
481 * @netdev: netdev structure
483 static void nfp_net_irqs_assign(struct net_device *netdev)
485 struct nfp_net *nn = netdev_priv(netdev);
486 struct nfp_net_r_vector *r_vec;
489 /* Assumes nn->num_tx_rings == nn->num_rx_rings */
490 if (nn->num_tx_rings > nn->num_r_vecs) {
491 nn_warn(nn, "More rings (%d) than vectors (%d).\n",
492 nn->num_tx_rings, nn->num_r_vecs);
493 nn->num_tx_rings = nn->num_r_vecs;
494 nn->num_rx_rings = nn->num_r_vecs;
497 nn->lsc_handler = nfp_net_irq_lsc;
498 nn->exn_handler = nfp_net_irq_exn;
500 for (r = 0; r < nn->num_r_vecs; r++) {
501 r_vec = &nn->r_vecs[r];
503 r_vec->handler = nfp_net_irq_rxtx;
504 r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r;
506 cpumask_set_cpu(r, &r_vec->affinity_mask);
511 * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
512 * @nn: NFP Network structure
513 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
514 * @format: printf-style format to construct the interrupt name
515 * @name: Pointer to allocated space for interrupt name
516 * @name_sz: Size of space for interrupt name
517 * @vector_idx: Index of MSI-X vector used for this interrupt
518 * @handler: IRQ handler to register for this interrupt
521 nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
522 const char *format, char *name, size_t name_sz,
523 unsigned int vector_idx, irq_handler_t handler)
525 struct msix_entry *entry;
528 entry = &nn->irq_entries[vector_idx];
530 snprintf(name, name_sz, format, netdev_name(nn->netdev));
531 err = request_irq(entry->vector, handler, 0, name, nn);
533 nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
537 nn_writeb(nn, ctrl_offset, vector_idx);
543 * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN)
544 * @nn: NFP Network structure
545 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
546 * @vector_idx: Index of MSI-X vector used for this interrupt
548 static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
549 unsigned int vector_idx)
551 nn_writeb(nn, ctrl_offset, 0xff);
552 free_irq(nn->irq_entries[vector_idx].vector, nn);
557 * One queue controller peripheral queue is used for transmit. The
558 * driver en-queues packets for transmit by advancing the write
559 * pointer. The device indicates that packets have transmitted by
560 * advancing the read pointer. The driver maintains a local copy of
561 * the read and write pointer in @struct nfp_net_tx_ring. The driver
562 * keeps @wr_p in sync with the queue controller write pointer and can
563 * determine how many packets have been transmitted by comparing its
564 * copy of the read pointer @rd_p with the read pointer maintained by
565 * the queue controller peripheral.
569 * nfp_net_tx_full() - Check if the TX ring is full
570 * @tx_ring: TX ring to check
571 * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
573 * This function checks, based on the *host copy* of read/write
574 * pointer if a given TX ring is full. The real TX queue may have
575 * some newly made available slots.
577 * Return: True if the ring is full.
579 static int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
581 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
584 /* Wrappers for deciding when to stop and restart TX queues */
585 static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
587 return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
590 static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
592 return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
596 * nfp_net_tx_ring_stop() - stop tx ring
597 * @nd_q: netdev queue
598 * @tx_ring: driver tx queue structure
600 * Safely stop TX ring. Remember that while we are running .start_xmit()
601 * someone else may be cleaning the TX ring completions so we need to be
602 * extra careful here.
604 static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
605 struct nfp_net_tx_ring *tx_ring)
607 netif_tx_stop_queue(nd_q);
609 /* We can race with the TX completion out of NAPI so recheck */
611 if (unlikely(nfp_net_tx_ring_should_wake(tx_ring)))
612 netif_tx_start_queue(nd_q);
616 * nfp_net_tx_tso() - Set up Tx descriptor for LSO
617 * @nn: NFP Net device
618 * @r_vec: per-ring structure
619 * @txbuf: Pointer to driver soft TX descriptor
620 * @txd: Pointer to HW TX descriptor
621 * @skb: Pointer to SKB
623 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
624 * Return error on packet header greater than maximum supported LSO header size.
626 static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
627 struct nfp_net_tx_buf *txbuf,
628 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
633 if (!skb_is_gso(skb))
636 if (!skb->encapsulation)
637 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
639 hdrlen = skb_inner_transport_header(skb) - skb->data +
640 inner_tcp_hdrlen(skb);
642 txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
643 txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
645 mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
646 txd->l4_offset = hdrlen;
647 txd->mss = cpu_to_le16(mss);
648 txd->flags |= PCIE_DESC_TX_LSO;
650 u64_stats_update_begin(&r_vec->tx_sync);
652 u64_stats_update_end(&r_vec->tx_sync);
656 * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
657 * @nn: NFP Net device
658 * @r_vec: per-ring structure
659 * @txbuf: Pointer to driver soft TX descriptor
660 * @txd: Pointer to TX descriptor
661 * @skb: Pointer to SKB
663 * This function sets the TX checksum flags in the TX descriptor based
664 * on the configuration and the protocol of the packet to be transmitted.
666 static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
667 struct nfp_net_tx_buf *txbuf,
668 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
670 struct ipv6hdr *ipv6h;
674 if (!(nn->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
677 if (skb->ip_summed != CHECKSUM_PARTIAL)
680 txd->flags |= PCIE_DESC_TX_CSUM;
681 if (skb->encapsulation)
682 txd->flags |= PCIE_DESC_TX_ENCAP;
684 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
685 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
687 if (iph->version == 4) {
688 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
689 l4_hdr = iph->protocol;
690 } else if (ipv6h->version == 6) {
691 l4_hdr = ipv6h->nexthdr;
693 nn_warn_ratelimit(nn, "partial checksum but ipv=%x!\n",
700 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
703 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
706 nn_warn_ratelimit(nn, "partial checksum but l4 proto=%x!\n",
711 u64_stats_update_begin(&r_vec->tx_sync);
712 if (skb->encapsulation)
713 r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
715 r_vec->hw_csum_tx += txbuf->pkt_cnt;
716 u64_stats_update_end(&r_vec->tx_sync);
720 * nfp_net_tx() - Main transmit entry point
721 * @skb: SKB to transmit
722 * @netdev: netdev structure
724 * Return: NETDEV_TX_OK on success.
726 static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
728 struct nfp_net *nn = netdev_priv(netdev);
729 const struct skb_frag_struct *frag;
730 struct nfp_net_r_vector *r_vec;
731 struct nfp_net_tx_desc *txd, txdg;
732 struct nfp_net_tx_buf *txbuf;
733 struct nfp_net_tx_ring *tx_ring;
734 struct netdev_queue *nd_q;
741 qidx = skb_get_queue_mapping(skb);
742 tx_ring = &nn->tx_rings[qidx];
743 r_vec = tx_ring->r_vec;
744 nd_q = netdev_get_tx_queue(nn->netdev, qidx);
746 nr_frags = skb_shinfo(skb)->nr_frags;
748 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
749 nn_warn_ratelimit(nn, "TX ring %d busy. wrp=%u rdp=%u\n",
750 qidx, tx_ring->wr_p, tx_ring->rd_p);
751 netif_tx_stop_queue(nd_q);
752 u64_stats_update_begin(&r_vec->tx_sync);
754 u64_stats_update_end(&r_vec->tx_sync);
755 return NETDEV_TX_BUSY;
758 /* Start with the head skbuf */
759 dma_addr = dma_map_single(&nn->pdev->dev, skb->data, skb_headlen(skb),
761 if (dma_mapping_error(&nn->pdev->dev, dma_addr))
764 wr_idx = tx_ring->wr_p % tx_ring->cnt;
766 /* Stash the soft descriptor of the head then initialize it */
767 txbuf = &tx_ring->txbufs[wr_idx];
769 txbuf->dma_addr = dma_addr;
772 txbuf->real_len = skb->len;
774 /* Build TX descriptor */
775 txd = &tx_ring->txds[wr_idx];
776 txd->offset_eop = (nr_frags == 0) ? PCIE_DESC_TX_EOP : 0;
777 txd->dma_len = cpu_to_le16(skb_headlen(skb));
778 nfp_desc_set_dma_addr(txd, dma_addr);
779 txd->data_len = cpu_to_le16(skb->len);
785 nfp_net_tx_tso(nn, r_vec, txbuf, txd, skb);
787 nfp_net_tx_csum(nn, r_vec, txbuf, txd, skb);
789 if (skb_vlan_tag_present(skb) && nn->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
790 txd->flags |= PCIE_DESC_TX_VLAN;
791 txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
796 /* all descs must match except for in addr, length and eop */
799 for (f = 0; f < nr_frags; f++) {
800 frag = &skb_shinfo(skb)->frags[f];
801 fsize = skb_frag_size(frag);
803 dma_addr = skb_frag_dma_map(&nn->pdev->dev, frag, 0,
804 fsize, DMA_TO_DEVICE);
805 if (dma_mapping_error(&nn->pdev->dev, dma_addr))
808 wr_idx = (wr_idx + 1) % tx_ring->cnt;
809 tx_ring->txbufs[wr_idx].skb = skb;
810 tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
811 tx_ring->txbufs[wr_idx].fidx = f;
813 txd = &tx_ring->txds[wr_idx];
815 txd->dma_len = cpu_to_le16(fsize);
816 nfp_desc_set_dma_addr(txd, dma_addr);
818 (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0;
821 u64_stats_update_begin(&r_vec->tx_sync);
823 u64_stats_update_end(&r_vec->tx_sync);
826 netdev_tx_sent_queue(nd_q, txbuf->real_len);
828 tx_ring->wr_p += nr_frags + 1;
829 if (nfp_net_tx_ring_should_stop(tx_ring))
830 nfp_net_tx_ring_stop(nd_q, tx_ring);
832 tx_ring->wr_ptr_add += nr_frags + 1;
833 if (!skb->xmit_more || netif_xmit_stopped(nd_q)) {
834 /* force memory write before we let HW know */
836 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
837 tx_ring->wr_ptr_add = 0;
840 skb_tx_timestamp(skb);
847 frag = &skb_shinfo(skb)->frags[f];
848 dma_unmap_page(&nn->pdev->dev,
849 tx_ring->txbufs[wr_idx].dma_addr,
850 skb_frag_size(frag), DMA_TO_DEVICE);
851 tx_ring->txbufs[wr_idx].skb = NULL;
852 tx_ring->txbufs[wr_idx].dma_addr = 0;
853 tx_ring->txbufs[wr_idx].fidx = -2;
856 wr_idx += tx_ring->cnt;
858 dma_unmap_single(&nn->pdev->dev, tx_ring->txbufs[wr_idx].dma_addr,
859 skb_headlen(skb), DMA_TO_DEVICE);
860 tx_ring->txbufs[wr_idx].skb = NULL;
861 tx_ring->txbufs[wr_idx].dma_addr = 0;
862 tx_ring->txbufs[wr_idx].fidx = -2;
864 nn_warn_ratelimit(nn, "Failed to map DMA TX buffer\n");
865 u64_stats_update_begin(&r_vec->tx_sync);
867 u64_stats_update_end(&r_vec->tx_sync);
868 dev_kfree_skb_any(skb);
873 * nfp_net_tx_complete() - Handled completed TX packets
874 * @tx_ring: TX ring structure
876 * Return: Number of completed TX descriptors
878 static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
880 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
881 struct nfp_net *nn = r_vec->nfp_net;
882 const struct skb_frag_struct *frag;
883 struct netdev_queue *nd_q;
884 u32 done_pkts = 0, done_bytes = 0;
891 /* Work out how many descriptors have been transmitted */
892 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
894 if (qcp_rd_p == tx_ring->qcp_rd_p)
897 if (qcp_rd_p > tx_ring->qcp_rd_p)
898 todo = qcp_rd_p - tx_ring->qcp_rd_p;
900 todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
903 idx = tx_ring->rd_p % tx_ring->cnt;
906 skb = tx_ring->txbufs[idx].skb;
910 nr_frags = skb_shinfo(skb)->nr_frags;
911 fidx = tx_ring->txbufs[idx].fidx;
915 dma_unmap_single(&nn->pdev->dev,
916 tx_ring->txbufs[idx].dma_addr,
917 skb_headlen(skb), DMA_TO_DEVICE);
919 done_pkts += tx_ring->txbufs[idx].pkt_cnt;
920 done_bytes += tx_ring->txbufs[idx].real_len;
923 frag = &skb_shinfo(skb)->frags[fidx];
924 dma_unmap_page(&nn->pdev->dev,
925 tx_ring->txbufs[idx].dma_addr,
926 skb_frag_size(frag), DMA_TO_DEVICE);
929 /* check for last gather fragment */
930 if (fidx == nr_frags - 1)
931 dev_kfree_skb_any(skb);
933 tx_ring->txbufs[idx].dma_addr = 0;
934 tx_ring->txbufs[idx].skb = NULL;
935 tx_ring->txbufs[idx].fidx = -2;
938 tx_ring->qcp_rd_p = qcp_rd_p;
940 u64_stats_update_begin(&r_vec->tx_sync);
941 r_vec->tx_bytes += done_bytes;
942 r_vec->tx_pkts += done_pkts;
943 u64_stats_update_end(&r_vec->tx_sync);
945 nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
946 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
947 if (nfp_net_tx_ring_should_wake(tx_ring)) {
948 /* Make sure TX thread will see updated tx_ring->rd_p */
951 if (unlikely(netif_tx_queue_stopped(nd_q)))
952 netif_tx_wake_queue(nd_q);
955 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
956 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
957 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
961 * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
962 * @nn: NFP Net device
963 * @tx_ring: TX ring structure
965 * Assumes that the device is stopped
968 nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
970 const struct skb_frag_struct *frag;
971 struct netdev_queue *nd_q;
972 struct pci_dev *pdev = nn->pdev;
974 while (tx_ring->rd_p != tx_ring->wr_p) {
975 int nr_frags, fidx, idx;
978 idx = tx_ring->rd_p % tx_ring->cnt;
979 skb = tx_ring->txbufs[idx].skb;
980 nr_frags = skb_shinfo(skb)->nr_frags;
981 fidx = tx_ring->txbufs[idx].fidx;
985 dma_unmap_single(&pdev->dev,
986 tx_ring->txbufs[idx].dma_addr,
987 skb_headlen(skb), DMA_TO_DEVICE);
990 frag = &skb_shinfo(skb)->frags[fidx];
991 dma_unmap_page(&pdev->dev,
992 tx_ring->txbufs[idx].dma_addr,
993 skb_frag_size(frag), DMA_TO_DEVICE);
996 /* check for last gather fragment */
997 if (fidx == nr_frags - 1)
998 dev_kfree_skb_any(skb);
1000 tx_ring->txbufs[idx].dma_addr = 0;
1001 tx_ring->txbufs[idx].skb = NULL;
1002 tx_ring->txbufs[idx].fidx = -2;
1004 tx_ring->qcp_rd_p++;
1008 memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
1011 tx_ring->qcp_rd_p = 0;
1012 tx_ring->wr_ptr_add = 0;
1014 nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
1015 netdev_tx_reset_queue(nd_q);
1018 static void nfp_net_tx_timeout(struct net_device *netdev)
1020 struct nfp_net *nn = netdev_priv(netdev);
1023 for (i = 0; i < nn->num_tx_rings; i++) {
1024 if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
1026 nn_warn(nn, "TX timeout on ring: %d\n", i);
1028 nn_warn(nn, "TX watchdog timeout\n");
1031 /* Receive processing
1034 nfp_net_calc_fl_bufsz(struct nfp_net *nn, unsigned int mtu)
1036 unsigned int fl_bufsz;
1038 if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1039 fl_bufsz = NFP_NET_MAX_PREPEND;
1041 fl_bufsz = nn->rx_offset;
1042 fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + mtu;
1048 * nfp_net_rx_alloc_one() - Allocate and map skb for RX
1049 * @rx_ring: RX ring structure of the skb
1050 * @dma_addr: Pointer to storage for DMA address (output param)
1051 * @fl_bufsz: size of freelist buffers
1053 * This function will allcate a new skb, map it for DMA.
1055 * Return: allocated skb or NULL on failure.
1057 static struct sk_buff *
1058 nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
1059 unsigned int fl_bufsz)
1061 struct nfp_net *nn = rx_ring->r_vec->nfp_net;
1062 struct sk_buff *skb;
1064 skb = netdev_alloc_skb(nn->netdev, fl_bufsz);
1066 nn_warn_ratelimit(nn, "Failed to alloc receive SKB\n");
1070 *dma_addr = dma_map_single(&nn->pdev->dev, skb->data,
1071 fl_bufsz, DMA_FROM_DEVICE);
1072 if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
1073 dev_kfree_skb_any(skb);
1074 nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
1082 * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
1083 * @rx_ring: RX ring structure
1084 * @skb: Skb to put on rings
1085 * @dma_addr: DMA address of skb mapping
1087 static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
1088 struct sk_buff *skb, dma_addr_t dma_addr)
1090 unsigned int wr_idx;
1092 wr_idx = rx_ring->wr_p % rx_ring->cnt;
1094 /* Stash SKB and DMA address away */
1095 rx_ring->rxbufs[wr_idx].skb = skb;
1096 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
1098 /* Fill freelist descriptor */
1099 rx_ring->rxds[wr_idx].fld.reserved = 0;
1100 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
1101 nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, dma_addr);
1104 rx_ring->wr_ptr_add++;
1105 if (rx_ring->wr_ptr_add >= NFP_NET_FL_BATCH) {
1106 /* Update write pointer of the freelist queue. Make
1107 * sure all writes are flushed before telling the hardware.
1110 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, rx_ring->wr_ptr_add);
1111 rx_ring->wr_ptr_add = 0;
1116 * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
1117 * @rx_ring: RX ring structure
1119 * Warning: Do *not* call if ring buffers were never put on the FW freelist
1120 * (i.e. device was not enabled)!
1122 static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
1124 unsigned int wr_idx, last_idx;
1126 /* Move the empty entry to the end of the list */
1127 wr_idx = rx_ring->wr_p % rx_ring->cnt;
1128 last_idx = rx_ring->cnt - 1;
1129 rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
1130 rx_ring->rxbufs[wr_idx].skb = rx_ring->rxbufs[last_idx].skb;
1131 rx_ring->rxbufs[last_idx].dma_addr = 0;
1132 rx_ring->rxbufs[last_idx].skb = NULL;
1134 memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
1137 rx_ring->wr_ptr_add = 0;
1141 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
1142 * @nn: NFP Net device
1143 * @rx_ring: RX ring to remove buffers from
1145 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
1146 * entries. After device is disabled nfp_net_rx_ring_reset() must be called
1147 * to restore required ring geometry.
1150 nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
1152 struct pci_dev *pdev = nn->pdev;
1155 for (i = 0; i < rx_ring->cnt - 1; i++) {
1156 /* NULL skb can only happen when initial filling of the ring
1157 * fails to allocate enough buffers and calls here to free
1158 * already allocated ones.
1160 if (!rx_ring->rxbufs[i].skb)
1163 dma_unmap_single(&pdev->dev, rx_ring->rxbufs[i].dma_addr,
1164 rx_ring->bufsz, DMA_FROM_DEVICE);
1165 dev_kfree_skb_any(rx_ring->rxbufs[i].skb);
1166 rx_ring->rxbufs[i].dma_addr = 0;
1167 rx_ring->rxbufs[i].skb = NULL;
1172 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
1173 * @nn: NFP Net device
1174 * @rx_ring: RX ring to remove buffers from
1177 nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
1179 struct nfp_net_rx_buf *rxbufs;
1182 rxbufs = rx_ring->rxbufs;
1184 for (i = 0; i < rx_ring->cnt - 1; i++) {
1186 nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr,
1188 if (!rxbufs[i].skb) {
1189 nfp_net_rx_ring_bufs_free(nn, rx_ring);
1198 * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
1199 * @rx_ring: RX ring to fill
1201 static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
1205 for (i = 0; i < rx_ring->cnt - 1; i++)
1206 nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].skb,
1207 rx_ring->rxbufs[i].dma_addr);
1211 * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
1212 * @flags: RX descriptor flags field in CPU byte order
1214 static int nfp_net_rx_csum_has_errors(u16 flags)
1216 u16 csum_all_checked, csum_all_ok;
1218 csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
1219 csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
1221 return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
1225 * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
1226 * @nn: NFP Net device
1227 * @r_vec: per-ring structure
1228 * @rxd: Pointer to RX descriptor
1229 * @skb: Pointer to SKB
1231 static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1232 struct nfp_net_rx_desc *rxd, struct sk_buff *skb)
1234 skb_checksum_none_assert(skb);
1236 if (!(nn->netdev->features & NETIF_F_RXCSUM))
1239 if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
1240 u64_stats_update_begin(&r_vec->rx_sync);
1241 r_vec->hw_csum_rx_error++;
1242 u64_stats_update_end(&r_vec->rx_sync);
1246 /* Assume that the firmware will never report inner CSUM_OK unless outer
1247 * L4 headers were successfully parsed. FW will always report zero UDP
1248 * checksum as CSUM_OK.
1250 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
1251 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
1252 __skb_incr_checksum_unnecessary(skb);
1253 u64_stats_update_begin(&r_vec->rx_sync);
1254 r_vec->hw_csum_rx_ok++;
1255 u64_stats_update_end(&r_vec->rx_sync);
1258 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
1259 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
1260 __skb_incr_checksum_unnecessary(skb);
1261 u64_stats_update_begin(&r_vec->rx_sync);
1262 r_vec->hw_csum_rx_inner_ok++;
1263 u64_stats_update_end(&r_vec->rx_sync);
1267 static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
1268 unsigned int type, __be32 *hash)
1270 if (!(netdev->features & NETIF_F_RXHASH))
1274 case NFP_NET_RSS_IPV4:
1275 case NFP_NET_RSS_IPV6:
1276 case NFP_NET_RSS_IPV6_EX:
1277 skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L3);
1280 skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L4);
1286 nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb,
1287 struct nfp_net_rx_desc *rxd)
1289 struct nfp_net_rx_hash *rx_hash;
1291 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1294 rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash));
1296 nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type),
1301 nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
1304 u8 *data = skb->data - meta_len;
1307 meta_info = get_unaligned_be32(data);
1311 switch (meta_info & NFP_NET_META_FIELD_MASK) {
1312 case NFP_NET_META_HASH:
1313 meta_info >>= NFP_NET_META_FIELD_SIZE;
1314 nfp_net_set_hash(netdev, skb,
1315 meta_info & NFP_NET_META_FIELD_MASK,
1319 case NFP_NET_META_MARK:
1320 skb->mark = get_unaligned_be32(data);
1327 meta_info >>= NFP_NET_META_FIELD_SIZE;
1334 * nfp_net_rx() - receive up to @budget packets on @rx_ring
1335 * @rx_ring: RX ring to receive from
1336 * @budget: NAPI budget
1338 * Note, this function is separated out from the napi poll function to
1339 * more cleanly separate packet receive code from other bookkeeping
1340 * functions performed in the napi poll function.
1342 * Return: Number of packets received.
1344 static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1346 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1347 struct nfp_net *nn = r_vec->nfp_net;
1348 unsigned int data_len, meta_len;
1349 struct sk_buff *skb, *new_skb;
1350 struct nfp_net_rx_desc *rxd;
1351 dma_addr_t new_dma_addr;
1352 int pkts_polled = 0;
1355 while (pkts_polled < budget) {
1356 idx = rx_ring->rd_p % rx_ring->cnt;
1358 rxd = &rx_ring->rxds[idx];
1359 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
1362 /* Memory barrier to ensure that we won't do other reads
1363 * before the DD bit.
1370 skb = rx_ring->rxbufs[idx].skb;
1372 new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr,
1375 nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb,
1376 rx_ring->rxbufs[idx].dma_addr);
1377 u64_stats_update_begin(&r_vec->rx_sync);
1379 u64_stats_update_end(&r_vec->rx_sync);
1383 dma_unmap_single(&nn->pdev->dev,
1384 rx_ring->rxbufs[idx].dma_addr,
1385 nn->fl_bufsz, DMA_FROM_DEVICE);
1387 nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr);
1390 * <-- [rx_offset] -->
1391 * ---------------------------------------------------------
1392 * | [XX] | metadata | packet | XXXX |
1393 * ---------------------------------------------------------
1394 * <---------------- data_len --------------->
1396 * The rx_offset is fixed for all packets, the meta_len can vary
1397 * on a packet by packet basis. If rx_offset is set to zero
1398 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
1399 * buffer and is immediately followed by the packet (no [XX]).
1401 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1402 data_len = le16_to_cpu(rxd->rxd.data_len);
1404 if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1405 skb_reserve(skb, meta_len);
1407 skb_reserve(skb, nn->rx_offset);
1408 skb_put(skb, data_len - meta_len);
1411 u64_stats_update_begin(&r_vec->rx_sync);
1413 r_vec->rx_bytes += skb->len;
1414 u64_stats_update_end(&r_vec->rx_sync);
1416 if (nn->fw_ver.major <= 3) {
1417 nfp_net_set_hash_desc(nn->netdev, skb, rxd);
1418 } else if (meta_len) {
1421 end = nfp_net_parse_meta(nn->netdev, skb, meta_len);
1422 if (unlikely(end != skb->data)) {
1423 u64_stats_update_begin(&r_vec->rx_sync);
1425 u64_stats_update_end(&r_vec->rx_sync);
1427 dev_kfree_skb_any(skb);
1428 nn_warn_ratelimit(nn, "invalid RX packet metadata\n");
1433 skb_record_rx_queue(skb, rx_ring->idx);
1434 skb->protocol = eth_type_trans(skb, nn->netdev);
1436 nfp_net_rx_csum(nn, r_vec, rxd, skb);
1438 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
1439 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1440 le16_to_cpu(rxd->rxd.vlan));
1442 napi_gro_receive(&rx_ring->r_vec->napi, skb);
1449 * nfp_net_poll() - napi poll function
1450 * @napi: NAPI structure
1451 * @budget: NAPI budget
1453 * Return: number of packets polled.
1455 static int nfp_net_poll(struct napi_struct *napi, int budget)
1457 struct nfp_net_r_vector *r_vec =
1458 container_of(napi, struct nfp_net_r_vector, napi);
1459 unsigned int pkts_polled;
1461 nfp_net_tx_complete(r_vec->tx_ring);
1463 pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
1465 if (pkts_polled < budget) {
1466 napi_complete_done(napi, pkts_polled);
1467 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_idx);
1473 /* Setup and Configuration
1477 * nfp_net_tx_ring_free() - Free resources allocated to a TX ring
1478 * @tx_ring: TX ring to free
1480 static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
1482 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1483 struct nfp_net *nn = r_vec->nfp_net;
1484 struct pci_dev *pdev = nn->pdev;
1486 kfree(tx_ring->txbufs);
1489 dma_free_coherent(&pdev->dev, tx_ring->size,
1490 tx_ring->txds, tx_ring->dma);
1493 tx_ring->txbufs = NULL;
1494 tx_ring->txds = NULL;
1500 * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
1501 * @tx_ring: TX Ring structure to allocate
1502 * @cnt: Ring buffer count
1504 * Return: 0 on success, negative errno otherwise.
1506 static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt)
1508 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1509 struct nfp_net *nn = r_vec->nfp_net;
1510 struct pci_dev *pdev = nn->pdev;
1515 tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
1516 tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
1517 &tx_ring->dma, GFP_KERNEL);
1521 sz = sizeof(*tx_ring->txbufs) * tx_ring->cnt;
1522 tx_ring->txbufs = kzalloc(sz, GFP_KERNEL);
1523 if (!tx_ring->txbufs)
1526 netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, tx_ring->idx);
1528 nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n",
1529 tx_ring->idx, tx_ring->qcidx,
1530 tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds);
1535 nfp_net_tx_ring_free(tx_ring);
1539 static struct nfp_net_tx_ring *
1540 nfp_net_shadow_tx_rings_prepare(struct nfp_net *nn, u32 buf_cnt)
1542 struct nfp_net_tx_ring *rings;
1545 rings = kcalloc(nn->num_tx_rings, sizeof(*rings), GFP_KERNEL);
1549 for (r = 0; r < nn->num_tx_rings; r++) {
1550 nfp_net_tx_ring_init(&rings[r], nn->tx_rings[r].r_vec, r);
1552 if (nfp_net_tx_ring_alloc(&rings[r], buf_cnt))
1560 nfp_net_tx_ring_free(&rings[r]);
1565 static struct nfp_net_tx_ring *
1566 nfp_net_shadow_tx_rings_swap(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
1568 struct nfp_net_tx_ring *old = nn->tx_rings;
1571 for (r = 0; r < nn->num_tx_rings; r++)
1572 old[r].r_vec->tx_ring = &rings[r];
1574 nn->tx_rings = rings;
1579 nfp_net_shadow_tx_rings_free(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
1586 for (r = 0; r < nn->num_tx_rings; r++)
1587 nfp_net_tx_ring_free(&rings[r]);
1593 * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
1594 * @rx_ring: RX ring to free
1596 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
1598 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1599 struct nfp_net *nn = r_vec->nfp_net;
1600 struct pci_dev *pdev = nn->pdev;
1602 kfree(rx_ring->rxbufs);
1605 dma_free_coherent(&pdev->dev, rx_ring->size,
1606 rx_ring->rxds, rx_ring->dma);
1609 rx_ring->rxbufs = NULL;
1610 rx_ring->rxds = NULL;
1616 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
1617 * @rx_ring: RX ring to allocate
1618 * @fl_bufsz: Size of buffers to allocate
1619 * @cnt: Ring buffer count
1621 * Return: 0 on success, negative errno otherwise.
1624 nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
1627 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1628 struct nfp_net *nn = r_vec->nfp_net;
1629 struct pci_dev *pdev = nn->pdev;
1633 rx_ring->bufsz = fl_bufsz;
1635 rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
1636 rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
1637 &rx_ring->dma, GFP_KERNEL);
1641 sz = sizeof(*rx_ring->rxbufs) * rx_ring->cnt;
1642 rx_ring->rxbufs = kzalloc(sz, GFP_KERNEL);
1643 if (!rx_ring->rxbufs)
1646 nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n",
1647 rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx,
1648 rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds);
1653 nfp_net_rx_ring_free(rx_ring);
1657 static struct nfp_net_rx_ring *
1658 nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz,
1661 struct nfp_net_rx_ring *rings;
1664 rings = kcalloc(nn->num_rx_rings, sizeof(*rings), GFP_KERNEL);
1668 for (r = 0; r < nn->num_rx_rings; r++) {
1669 nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r);
1671 if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, buf_cnt))
1674 if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r]))
1682 nfp_net_rx_ring_bufs_free(nn, &rings[r]);
1684 nfp_net_rx_ring_free(&rings[r]);
1690 static struct nfp_net_rx_ring *
1691 nfp_net_shadow_rx_rings_swap(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
1693 struct nfp_net_rx_ring *old = nn->rx_rings;
1696 for (r = 0; r < nn->num_rx_rings; r++)
1697 old[r].r_vec->rx_ring = &rings[r];
1699 nn->rx_rings = rings;
1704 nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
1711 for (r = 0; r < nn->num_r_vecs; r++) {
1712 nfp_net_rx_ring_bufs_free(nn, &rings[r]);
1713 nfp_net_rx_ring_free(&rings[r]);
1720 nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1723 struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
1726 r_vec->tx_ring = &nn->tx_rings[idx];
1727 nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, idx);
1729 r_vec->rx_ring = &nn->rx_rings[idx];
1730 nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, idx);
1732 snprintf(r_vec->name, sizeof(r_vec->name),
1733 "%s-rxtx-%d", nn->netdev->name, idx);
1734 err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec);
1736 nn_err(nn, "Error requesting IRQ %d\n", entry->vector);
1739 disable_irq(entry->vector);
1742 netif_napi_add(nn->netdev, &r_vec->napi,
1743 nfp_net_poll, NAPI_POLL_WEIGHT);
1745 irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
1747 nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry);
1753 nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
1755 struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
1757 irq_set_affinity_hint(entry->vector, NULL);
1758 netif_napi_del(&r_vec->napi);
1759 free_irq(entry->vector, r_vec);
1763 * nfp_net_rss_write_itbl() - Write RSS indirection table to device
1764 * @nn: NFP Net device to reconfigure
1766 void nfp_net_rss_write_itbl(struct nfp_net *nn)
1770 for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4)
1771 nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i,
1772 get_unaligned_le32(nn->rss_itbl + i));
1776 * nfp_net_rss_write_key() - Write RSS hash key to device
1777 * @nn: NFP Net device to reconfigure
1779 void nfp_net_rss_write_key(struct nfp_net *nn)
1783 for (i = 0; i < NFP_NET_CFG_RSS_KEY_SZ; i += 4)
1784 nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
1785 get_unaligned_le32(nn->rss_key + i));
1789 * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW
1790 * @nn: NFP Net device to reconfigure
1792 void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
1798 /* Compute factor used to convert coalesce '_usecs' parameters to
1799 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
1802 factor = nn->me_freq_mhz / 16;
1804 /* copy RX interrupt coalesce parameters */
1805 value = (nn->rx_coalesce_max_frames << 16) |
1806 (factor * nn->rx_coalesce_usecs);
1807 for (i = 0; i < nn->num_r_vecs; i++)
1808 nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
1810 /* copy TX interrupt coalesce parameters */
1811 value = (nn->tx_coalesce_max_frames << 16) |
1812 (factor * nn->tx_coalesce_usecs);
1813 for (i = 0; i < nn->num_r_vecs; i++)
1814 nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
1818 * nfp_net_write_mac_addr() - Write mac address to the device control BAR
1819 * @nn: NFP Net device to reconfigure
1821 * Writes the MAC address from the netdev to the device control BAR. Does not
1822 * perform the required reconfig. We do a bit of byte swapping dance because
1825 static void nfp_net_write_mac_addr(struct nfp_net *nn)
1827 nn_writel(nn, NFP_NET_CFG_MACADDR + 0,
1828 get_unaligned_be32(nn->netdev->dev_addr));
1829 nn_writew(nn, NFP_NET_CFG_MACADDR + 6,
1830 get_unaligned_be16(nn->netdev->dev_addr + 4));
1833 static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
1835 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
1836 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
1837 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
1839 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
1840 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
1841 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
1845 * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
1846 * @nn: NFP Net device to reconfigure
1848 static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
1850 u32 new_ctrl, update;
1854 new_ctrl = nn->ctrl;
1855 new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
1856 update = NFP_NET_CFG_UPDATE_GEN;
1857 update |= NFP_NET_CFG_UPDATE_MSIX;
1858 update |= NFP_NET_CFG_UPDATE_RING;
1860 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
1861 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
1863 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
1864 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
1866 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1867 err = nfp_net_reconfig(nn, update);
1869 nn_err(nn, "Could not disable device: %d\n", err);
1871 for (r = 0; r < nn->num_r_vecs; r++) {
1872 nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring);
1873 nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring);
1874 nfp_net_vec_clear_ring_data(nn, r);
1877 nn->ctrl = new_ctrl;
1881 nfp_net_vec_write_ring_data(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1884 /* Write the DMA address, size and MSI-X info to the device */
1885 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), r_vec->rx_ring->dma);
1886 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(r_vec->rx_ring->cnt));
1887 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), r_vec->irq_idx);
1889 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), r_vec->tx_ring->dma);
1890 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(r_vec->tx_ring->cnt));
1891 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), r_vec->irq_idx);
1894 static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
1896 u32 new_ctrl, update = 0;
1900 new_ctrl = nn->ctrl;
1902 if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
1903 nfp_net_rss_write_key(nn);
1904 nfp_net_rss_write_itbl(nn);
1905 nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
1906 update |= NFP_NET_CFG_UPDATE_RSS;
1909 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
1910 nfp_net_coalesce_write_cfg(nn);
1912 new_ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
1913 update |= NFP_NET_CFG_UPDATE_IRQMOD;
1916 for (r = 0; r < nn->num_r_vecs; r++)
1917 nfp_net_vec_write_ring_data(nn, &nn->r_vecs[r], r);
1919 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ?
1920 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1);
1922 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ?
1923 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1);
1925 nfp_net_write_mac_addr(nn);
1927 nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
1928 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz);
1931 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
1932 update |= NFP_NET_CFG_UPDATE_GEN;
1933 update |= NFP_NET_CFG_UPDATE_MSIX;
1934 update |= NFP_NET_CFG_UPDATE_RING;
1935 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
1936 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
1938 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1939 err = nfp_net_reconfig(nn, update);
1941 nn->ctrl = new_ctrl;
1943 for (r = 0; r < nn->num_r_vecs; r++)
1944 nfp_net_rx_ring_fill_freelist(nn->r_vecs[r].rx_ring);
1946 /* Since reconfiguration requests while NFP is down are ignored we
1947 * have to wipe the entire VXLAN configuration and reinitialize it.
1949 if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) {
1950 memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
1951 memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
1952 udp_tunnel_get_rx_info(nn->netdev);
1959 * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
1960 * @nn: NFP Net device to reconfigure
1962 static int nfp_net_set_config_and_enable(struct nfp_net *nn)
1966 err = __nfp_net_set_config_and_enable(nn);
1968 nfp_net_clear_config_and_disable(nn);
1974 * nfp_net_open_stack() - Start the device from stack's perspective
1975 * @nn: NFP Net device to reconfigure
1977 static void nfp_net_open_stack(struct nfp_net *nn)
1981 for (r = 0; r < nn->num_r_vecs; r++) {
1982 napi_enable(&nn->r_vecs[r].napi);
1983 enable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
1986 netif_tx_wake_all_queues(nn->netdev);
1988 enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
1989 nfp_net_read_link_status(nn);
1992 static int nfp_net_netdev_open(struct net_device *netdev)
1994 struct nfp_net *nn = netdev_priv(netdev);
1997 if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
1998 nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
2002 /* Step 1: Allocate resources for rings and the like
2003 * - Request interrupts
2004 * - Allocate RX and TX ring resources
2005 * - Setup initial RSS table
2007 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
2008 nn->exn_name, sizeof(nn->exn_name),
2009 NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
2012 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
2013 nn->lsc_name, sizeof(nn->lsc_name),
2014 NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
2017 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2019 nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
2021 if (!nn->rx_rings) {
2025 nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
2027 if (!nn->tx_rings) {
2029 goto err_free_rx_rings;
2032 for (r = 0; r < nn->num_r_vecs; r++) {
2033 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
2035 goto err_free_prev_vecs;
2037 err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring, nn->txd_cnt);
2039 goto err_cleanup_vec_p;
2041 err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring,
2042 nn->fl_bufsz, nn->rxd_cnt);
2044 goto err_free_tx_ring_p;
2046 err = nfp_net_rx_ring_bufs_alloc(nn, nn->r_vecs[r].rx_ring);
2048 goto err_flush_rx_ring_p;
2051 err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
2053 goto err_free_rings;
2055 err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
2057 goto err_free_rings;
2059 /* Step 2: Configure the NFP
2060 * - Enable rings from 0 to tx_rings/rx_rings - 1.
2061 * - Write MAC address (in case it changed)
2063 * - Set the Freelist buffer size
2066 err = nfp_net_set_config_and_enable(nn);
2068 goto err_free_rings;
2070 /* Step 3: Enable for kernel
2071 * - put some freelist descriptors on each RX ring
2072 * - enable NAPI on each ring
2073 * - enable all TX queues
2076 nfp_net_open_stack(nn);
2084 nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
2085 err_flush_rx_ring_p:
2086 nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
2088 nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
2090 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2092 kfree(nn->tx_rings);
2094 kfree(nn->rx_rings);
2096 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2098 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2103 * nfp_net_close_stack() - Quiescent the stack (part of close)
2104 * @nn: NFP Net device to reconfigure
2106 static void nfp_net_close_stack(struct nfp_net *nn)
2110 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2111 netif_carrier_off(nn->netdev);
2112 nn->link_up = false;
2114 for (r = 0; r < nn->num_r_vecs; r++) {
2115 disable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
2116 napi_disable(&nn->r_vecs[r].napi);
2119 netif_tx_disable(nn->netdev);
2123 * nfp_net_close_free_all() - Free all runtime resources
2124 * @nn: NFP Net device to reconfigure
2126 static void nfp_net_close_free_all(struct nfp_net *nn)
2130 for (r = 0; r < nn->num_r_vecs; r++) {
2131 nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
2132 nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
2133 nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
2134 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2137 kfree(nn->rx_rings);
2138 kfree(nn->tx_rings);
2140 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2141 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2145 * nfp_net_netdev_close() - Called when the device is downed
2146 * @netdev: netdev structure
2148 static int nfp_net_netdev_close(struct net_device *netdev)
2150 struct nfp_net *nn = netdev_priv(netdev);
2152 if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) {
2153 nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl);
2157 /* Step 1: Disable RX and TX rings from the Linux kernel perspective
2159 nfp_net_close_stack(nn);
2163 nfp_net_clear_config_and_disable(nn);
2165 /* Step 3: Free resources
2167 nfp_net_close_free_all(nn);
2169 nn_dbg(nn, "%s down", netdev->name);
2173 static void nfp_net_set_rx_mode(struct net_device *netdev)
2175 struct nfp_net *nn = netdev_priv(netdev);
2178 new_ctrl = nn->ctrl;
2180 if (netdev->flags & IFF_PROMISC) {
2181 if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
2182 new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
2184 nn_warn(nn, "FW does not support promiscuous mode\n");
2186 new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
2189 if (new_ctrl == nn->ctrl)
2192 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2193 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
2195 nn->ctrl = new_ctrl;
2198 static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
2200 unsigned int old_mtu, old_fl_bufsz, new_fl_bufsz;
2201 struct nfp_net *nn = netdev_priv(netdev);
2202 struct nfp_net_rx_ring *tmp_rings;
2205 old_mtu = netdev->mtu;
2206 old_fl_bufsz = nn->fl_bufsz;
2207 new_fl_bufsz = nfp_net_calc_fl_bufsz(nn, new_mtu);
2209 if (!netif_running(netdev)) {
2210 netdev->mtu = new_mtu;
2211 nn->fl_bufsz = new_fl_bufsz;
2215 /* Prepare new rings */
2216 tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz,
2221 /* Stop device, swap in new rings, try to start the firmware */
2222 nfp_net_close_stack(nn);
2223 nfp_net_clear_config_and_disable(nn);
2225 tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
2227 netdev->mtu = new_mtu;
2228 nn->fl_bufsz = new_fl_bufsz;
2230 err = nfp_net_set_config_and_enable(nn);
2232 const int err_new = err;
2234 /* Try with old configuration and old rings */
2235 tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
2237 netdev->mtu = old_mtu;
2238 nn->fl_bufsz = old_fl_bufsz;
2240 err = __nfp_net_set_config_and_enable(nn);
2242 nn_err(nn, "Can't restore MTU - FW communication failed (%d,%d)\n",
2246 nfp_net_shadow_rx_rings_free(nn, tmp_rings);
2248 nfp_net_open_stack(nn);
2253 int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
2255 struct nfp_net_tx_ring *tx_rings = NULL;
2256 struct nfp_net_rx_ring *rx_rings = NULL;
2257 u32 old_rxd_cnt, old_txd_cnt;
2260 if (!netif_running(nn->netdev)) {
2261 nn->rxd_cnt = rxd_cnt;
2262 nn->txd_cnt = txd_cnt;
2266 old_rxd_cnt = nn->rxd_cnt;
2267 old_txd_cnt = nn->txd_cnt;
2269 /* Prepare new rings */
2270 if (nn->rxd_cnt != rxd_cnt) {
2271 rx_rings = nfp_net_shadow_rx_rings_prepare(nn, nn->fl_bufsz,
2276 if (nn->txd_cnt != txd_cnt) {
2277 tx_rings = nfp_net_shadow_tx_rings_prepare(nn, txd_cnt);
2279 nfp_net_shadow_rx_rings_free(nn, rx_rings);
2284 /* Stop device, swap in new rings, try to start the firmware */
2285 nfp_net_close_stack(nn);
2286 nfp_net_clear_config_and_disable(nn);
2289 rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
2291 tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
2293 nn->rxd_cnt = rxd_cnt;
2294 nn->txd_cnt = txd_cnt;
2296 err = nfp_net_set_config_and_enable(nn);
2298 const int err_new = err;
2300 /* Try with old configuration and old rings */
2302 rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
2304 tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
2306 nn->rxd_cnt = old_rxd_cnt;
2307 nn->txd_cnt = old_txd_cnt;
2309 err = __nfp_net_set_config_and_enable(nn);
2311 nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
2315 nfp_net_shadow_rx_rings_free(nn, rx_rings);
2316 nfp_net_shadow_tx_rings_free(nn, tx_rings);
2318 nfp_net_open_stack(nn);
2323 static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
2324 struct rtnl_link_stats64 *stats)
2326 struct nfp_net *nn = netdev_priv(netdev);
2329 for (r = 0; r < nn->num_r_vecs; r++) {
2330 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
2335 start = u64_stats_fetch_begin(&r_vec->rx_sync);
2336 data[0] = r_vec->rx_pkts;
2337 data[1] = r_vec->rx_bytes;
2338 data[2] = r_vec->rx_drops;
2339 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
2340 stats->rx_packets += data[0];
2341 stats->rx_bytes += data[1];
2342 stats->rx_dropped += data[2];
2345 start = u64_stats_fetch_begin(&r_vec->tx_sync);
2346 data[0] = r_vec->tx_pkts;
2347 data[1] = r_vec->tx_bytes;
2348 data[2] = r_vec->tx_errors;
2349 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
2350 stats->tx_packets += data[0];
2351 stats->tx_bytes += data[1];
2352 stats->tx_errors += data[2];
2358 static bool nfp_net_ebpf_capable(struct nfp_net *nn)
2360 if (nn->cap & NFP_NET_CFG_CTRL_BPF &&
2361 nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI)
2367 nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
2368 struct tc_to_netdev *tc)
2370 struct nfp_net *nn = netdev_priv(netdev);
2372 if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
2374 if (proto != htons(ETH_P_ALL))
2377 if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn))
2378 return nfp_net_bpf_offload(nn, handle, proto, tc->cls_bpf);
2383 static int nfp_net_set_features(struct net_device *netdev,
2384 netdev_features_t features)
2386 netdev_features_t changed = netdev->features ^ features;
2387 struct nfp_net *nn = netdev_priv(netdev);
2391 /* Assume this is not called with features we have not advertised */
2393 new_ctrl = nn->ctrl;
2395 if (changed & NETIF_F_RXCSUM) {
2396 if (features & NETIF_F_RXCSUM)
2397 new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
2399 new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM;
2402 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2403 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
2404 new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
2406 new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM;
2409 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
2410 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
2411 new_ctrl |= NFP_NET_CFG_CTRL_LSO;
2413 new_ctrl &= ~NFP_NET_CFG_CTRL_LSO;
2416 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2417 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2418 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
2420 new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
2423 if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
2424 if (features & NETIF_F_HW_VLAN_CTAG_TX)
2425 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
2427 new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
2430 if (changed & NETIF_F_SG) {
2431 if (features & NETIF_F_SG)
2432 new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
2434 new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
2437 if (changed & NETIF_F_HW_TC && nn->ctrl & NFP_NET_CFG_CTRL_BPF) {
2438 nn_err(nn, "Cannot disable HW TC offload while in use\n");
2442 nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
2443 netdev->features, features, changed);
2445 if (new_ctrl == nn->ctrl)
2448 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->ctrl, new_ctrl);
2449 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2450 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
2454 nn->ctrl = new_ctrl;
2459 static netdev_features_t
2460 nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
2461 netdev_features_t features)
2465 /* We can't do TSO over double tagged packets (802.1AD) */
2466 features &= vlan_features_check(skb, features);
2468 if (!skb->encapsulation)
2471 /* Ensure that inner L4 header offset fits into TX descriptor field */
2472 if (skb_is_gso(skb)) {
2475 hdrlen = skb_inner_transport_header(skb) - skb->data +
2476 inner_tcp_hdrlen(skb);
2478 if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ))
2479 features &= ~NETIF_F_GSO_MASK;
2482 /* VXLAN/GRE check */
2483 switch (vlan_get_protocol(skb)) {
2484 case htons(ETH_P_IP):
2485 l4_hdr = ip_hdr(skb)->protocol;
2487 case htons(ETH_P_IPV6):
2488 l4_hdr = ipv6_hdr(skb)->nexthdr;
2491 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2494 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
2495 skb->inner_protocol != htons(ETH_P_TEB) ||
2496 (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) ||
2497 (l4_hdr == IPPROTO_UDP &&
2498 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
2499 sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
2500 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2506 * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
2507 * @nn: NFP Net device to reconfigure
2508 * @idx: Index into the port table where new port should be written
2509 * @port: UDP port to configure (pass zero to remove VXLAN port)
2511 static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
2515 nn->vxlan_ports[idx] = port;
2517 if (!(nn->ctrl & NFP_NET_CFG_CTRL_VXLAN))
2520 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
2521 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2)
2522 nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
2523 be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
2524 be16_to_cpu(nn->vxlan_ports[i]));
2526 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN);
2530 * nfp_net_find_vxlan_idx() - find table entry of the port or a free one
2531 * @nn: NFP Network structure
2532 * @port: UDP port to look for
2534 * Return: if the port is already in the table -- it's position;
2535 * if the port is not in the table -- free position to use;
2536 * if the table is full -- -ENOSPC.
2538 static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port)
2540 int i, free_idx = -ENOSPC;
2542 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
2543 if (nn->vxlan_ports[i] == port)
2545 if (!nn->vxlan_usecnt[i])
2552 static void nfp_net_add_vxlan_port(struct net_device *netdev,
2553 struct udp_tunnel_info *ti)
2555 struct nfp_net *nn = netdev_priv(netdev);
2558 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2561 idx = nfp_net_find_vxlan_idx(nn, ti->port);
2565 if (!nn->vxlan_usecnt[idx]++)
2566 nfp_net_set_vxlan_port(nn, idx, ti->port);
2569 static void nfp_net_del_vxlan_port(struct net_device *netdev,
2570 struct udp_tunnel_info *ti)
2572 struct nfp_net *nn = netdev_priv(netdev);
2575 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2578 idx = nfp_net_find_vxlan_idx(nn, ti->port);
2579 if (idx == -ENOSPC || !nn->vxlan_usecnt[idx])
2582 if (!--nn->vxlan_usecnt[idx])
2583 nfp_net_set_vxlan_port(nn, idx, 0);
2586 static const struct net_device_ops nfp_net_netdev_ops = {
2587 .ndo_open = nfp_net_netdev_open,
2588 .ndo_stop = nfp_net_netdev_close,
2589 .ndo_start_xmit = nfp_net_tx,
2590 .ndo_get_stats64 = nfp_net_stat64,
2591 .ndo_setup_tc = nfp_net_setup_tc,
2592 .ndo_tx_timeout = nfp_net_tx_timeout,
2593 .ndo_set_rx_mode = nfp_net_set_rx_mode,
2594 .ndo_change_mtu = nfp_net_change_mtu,
2595 .ndo_set_mac_address = eth_mac_addr,
2596 .ndo_set_features = nfp_net_set_features,
2597 .ndo_features_check = nfp_net_features_check,
2598 .ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
2599 .ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
2603 * nfp_net_info() - Print general info about the NIC
2604 * @nn: NFP Net device to reconfigure
2606 void nfp_net_info(struct nfp_net *nn)
2608 nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
2609 nn->is_vf ? "VF " : "",
2610 nn->num_tx_rings, nn->max_tx_rings,
2611 nn->num_rx_rings, nn->max_rx_rings);
2612 nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
2613 nn->fw_ver.resv, nn->fw_ver.class,
2614 nn->fw_ver.major, nn->fw_ver.minor,
2616 nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
2618 nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
2619 nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
2620 nn->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
2621 nn->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
2622 nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
2623 nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
2624 nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
2625 nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
2626 nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
2627 nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
2628 nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
2629 nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
2630 nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
2631 nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
2632 nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
2633 nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
2634 nfp_net_ebpf_capable(nn) ? "BPF " : "");
2638 * nfp_net_netdev_alloc() - Allocate netdev and related structure
2640 * @max_tx_rings: Maximum number of TX rings supported by device
2641 * @max_rx_rings: Maximum number of RX rings supported by device
2643 * This function allocates a netdev device and fills in the initial
2644 * part of the @struct nfp_net structure.
2646 * Return: NFP Net device structure, or ERR_PTR on error.
2648 struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
2649 int max_tx_rings, int max_rx_rings)
2651 struct net_device *netdev;
2655 netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
2656 max_tx_rings, max_rx_rings);
2658 return ERR_PTR(-ENOMEM);
2660 SET_NETDEV_DEV(netdev, &pdev->dev);
2661 nn = netdev_priv(netdev);
2663 nn->netdev = netdev;
2666 nn->max_tx_rings = max_tx_rings;
2667 nn->max_rx_rings = max_rx_rings;
2669 nqs = netif_get_num_default_rss_queues();
2670 nn->num_tx_rings = min_t(int, nqs, max_tx_rings);
2671 nn->num_rx_rings = min_t(int, nqs, max_rx_rings);
2673 nn->txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
2674 nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
2676 spin_lock_init(&nn->reconfig_lock);
2677 spin_lock_init(&nn->rx_filter_lock);
2678 spin_lock_init(&nn->link_status_lock);
2680 setup_timer(&nn->reconfig_timer,
2681 nfp_net_reconfig_timer, (unsigned long)nn);
2682 setup_timer(&nn->rx_filter_stats_timer,
2683 nfp_net_filter_stats_timer, (unsigned long)nn);
2689 * nfp_net_netdev_free() - Undo what @nfp_net_netdev_alloc() did
2690 * @nn: NFP Net device to reconfigure
2692 void nfp_net_netdev_free(struct nfp_net *nn)
2694 free_netdev(nn->netdev);
2698 * nfp_net_rss_init() - Set the initial RSS parameters
2699 * @nn: NFP Net device to reconfigure
2701 static void nfp_net_rss_init(struct nfp_net *nn)
2705 netdev_rss_key_fill(nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
2707 for (i = 0; i < sizeof(nn->rss_itbl); i++)
2709 ethtool_rxfh_indir_default(i, nn->num_rx_rings);
2711 /* Enable IPv4/IPv6 TCP by default */
2712 nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
2713 NFP_NET_CFG_RSS_IPV6_TCP |
2714 NFP_NET_CFG_RSS_TOEPLITZ |
2715 NFP_NET_CFG_RSS_MASK;
2719 * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters
2720 * @nn: NFP Net device to reconfigure
2722 static void nfp_net_irqmod_init(struct nfp_net *nn)
2724 nn->rx_coalesce_usecs = 50;
2725 nn->rx_coalesce_max_frames = 64;
2726 nn->tx_coalesce_usecs = 50;
2727 nn->tx_coalesce_max_frames = 64;
2731 * nfp_net_netdev_init() - Initialise/finalise the netdev structure
2732 * @netdev: netdev structure
2734 * Return: 0 on success or negative errno on error.
2736 int nfp_net_netdev_init(struct net_device *netdev)
2738 struct nfp_net *nn = netdev_priv(netdev);
2741 /* Get some of the read-only fields from the BAR */
2742 nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
2743 nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
2745 nfp_net_write_mac_addr(nn);
2747 /* Determine RX packet/metadata boundary offset */
2748 if (nn->fw_ver.major >= 2)
2749 nn->rx_offset = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
2751 nn->rx_offset = NFP_NET_RX_OFFSET;
2753 /* Set default MTU and Freelist buffer size */
2754 if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
2755 netdev->mtu = nn->max_mtu;
2757 netdev->mtu = NFP_NET_DEFAULT_MTU;
2758 nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, netdev->mtu);
2760 /* Advertise/enable offloads based on capabilities
2762 * Note: netdev->features show the currently enabled features
2763 * and netdev->hw_features advertises which features are
2764 * supported. By default we enable most features.
2766 netdev->hw_features = NETIF_F_HIGHDMA;
2767 if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) {
2768 netdev->hw_features |= NETIF_F_RXCSUM;
2769 nn->ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
2771 if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
2772 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2773 nn->ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
2775 if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
2776 netdev->hw_features |= NETIF_F_SG;
2777 nn->ctrl |= NFP_NET_CFG_CTRL_GATHER;
2779 if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) {
2780 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2781 nn->ctrl |= NFP_NET_CFG_CTRL_LSO;
2783 if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
2784 netdev->hw_features |= NETIF_F_RXHASH;
2785 nfp_net_rss_init(nn);
2786 nn->ctrl |= NFP_NET_CFG_CTRL_RSS;
2788 if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
2789 nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
2790 if (nn->cap & NFP_NET_CFG_CTRL_LSO)
2791 netdev->hw_features |= NETIF_F_GSO_GRE |
2792 NETIF_F_GSO_UDP_TUNNEL;
2793 nn->ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
2795 netdev->hw_enc_features = netdev->hw_features;
2798 netdev->vlan_features = netdev->hw_features;
2800 if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
2801 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
2802 nn->ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
2804 if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
2805 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
2806 nn->ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
2809 netdev->features = netdev->hw_features;
2811 if (nfp_net_ebpf_capable(nn))
2812 netdev->hw_features |= NETIF_F_HW_TC;
2814 /* Advertise but disable TSO by default. */
2815 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
2817 /* Allow L2 Broadcast and Multicast through by default, if supported */
2818 if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
2819 nn->ctrl |= NFP_NET_CFG_CTRL_L2BC;
2820 if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
2821 nn->ctrl |= NFP_NET_CFG_CTRL_L2MC;
2823 /* Allow IRQ moderation, if supported */
2824 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
2825 nfp_net_irqmod_init(nn);
2826 nn->ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
2829 /* Stash the re-configuration queue away. First odd queue in TX Bar */
2830 nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
2832 /* Make sure the FW knows the netdev is supposed to be disabled here */
2833 nn_writel(nn, NFP_NET_CFG_CTRL, 0);
2834 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
2835 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
2836 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
2837 NFP_NET_CFG_UPDATE_GEN);
2841 /* Finalise the netdev setup */
2842 netdev->netdev_ops = &nfp_net_netdev_ops;
2843 netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
2845 /* MTU range: 68 - hw-specific max */
2846 netdev->min_mtu = ETH_MIN_MTU;
2847 netdev->max_mtu = nn->max_mtu;
2849 netif_carrier_off(netdev);
2851 nfp_net_set_ethtool_ops(netdev);
2852 nfp_net_irqs_assign(netdev);
2854 return register_netdev(netdev);
2858 * nfp_net_netdev_clean() - Undo what nfp_net_netdev_init() did.
2859 * @netdev: netdev structure
2861 void nfp_net_netdev_clean(struct net_device *netdev)
2863 unregister_netdev(netdev);