1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
6 * Netronome network device driver: Common functions between PF and VF
7 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
8 * Jason McMullan <jason.mcmullan@netronome.com>
9 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
10 * Brad Petrus <brad.petrus@netronome.com>
11 * Chris Telfer <chris.telfer@netronome.com>
14 #include <linux/bitfield.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf_trace.h>
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/interrupt.h>
25 #include <linux/ipv6.h>
26 #include <linux/lockdep.h>
28 #include <linux/overflow.h>
29 #include <linux/page_ref.h>
30 #include <linux/pci.h>
31 #include <linux/pci_regs.h>
32 #include <linux/msi.h>
33 #include <linux/ethtool.h>
34 #include <linux/log2.h>
35 #include <linux/if_vlan.h>
36 #include <linux/random.h>
37 #include <linux/vmalloc.h>
38 #include <linux/ktime.h>
40 #include <net/vxlan.h>
42 #include "nfpcore/nfp_nsp.h"
44 #include "nfp_net_ctrl.h"
46 #include "nfp_net_sriov.h"
50 * nfp_net_get_fw_version() - Read and parse the FW version
51 * @fw_ver: Output fw_version structure to read to
52 * @ctrl_bar: Mapped address of the control BAR
54 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
55 void __iomem *ctrl_bar)
59 reg = readl(ctrl_bar + NFP_NET_CFG_VERSION);
60 put_unaligned_le32(reg, fw_ver);
63 static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
65 return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
66 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
67 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
71 nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
73 dma_sync_single_for_device(dp->dev, dma_addr,
74 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
78 static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr)
80 dma_unmap_single_attrs(dp->dev, dma_addr,
81 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
82 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
85 static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr,
88 dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
94 * Firmware reconfig may take a while so we have two versions of it -
95 * synchronous and asynchronous (posted). All synchronous callers are holding
96 * RTNL so we don't have to worry about serializing them.
98 static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
100 nn_writel(nn, NFP_NET_CFG_UPDATE, update);
101 /* ensure update is written before pinging HW */
103 nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
104 nn->reconfig_in_progress_update = update;
107 /* Pass 0 as update to run posted reconfigs. */
108 static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
110 update |= nn->reconfig_posted;
111 nn->reconfig_posted = 0;
113 nfp_net_reconfig_start(nn, update);
115 nn->reconfig_timer_active = true;
116 mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
119 static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
123 reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
126 if (reg & NFP_NET_CFG_UPDATE_ERR) {
127 nn_err(nn, "Reconfig error (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
128 reg, nn->reconfig_in_progress_update,
129 nn_readl(nn, NFP_NET_CFG_CTRL));
131 } else if (last_check) {
132 nn_err(nn, "Reconfig timeout (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
133 reg, nn->reconfig_in_progress_update,
134 nn_readl(nn, NFP_NET_CFG_CTRL));
141 static bool __nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
143 bool timed_out = false;
146 /* Poll update field, waiting for NFP to ack the config.
147 * Do an opportunistic wait-busy loop, afterward sleep.
149 for (i = 0; i < 50; i++) {
150 if (nfp_net_reconfig_check_done(nn, false))
155 while (!nfp_net_reconfig_check_done(nn, timed_out)) {
156 usleep_range(250, 500);
157 timed_out = time_is_before_eq_jiffies(deadline);
163 static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
165 if (__nfp_net_reconfig_wait(nn, deadline))
168 if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
174 static void nfp_net_reconfig_timer(struct timer_list *t)
176 struct nfp_net *nn = from_timer(nn, t, reconfig_timer);
178 spin_lock_bh(&nn->reconfig_lock);
180 nn->reconfig_timer_active = false;
182 /* If sync caller is present it will take over from us */
183 if (nn->reconfig_sync_present)
186 /* Read reconfig status and report errors */
187 nfp_net_reconfig_check_done(nn, true);
189 if (nn->reconfig_posted)
190 nfp_net_reconfig_start_async(nn, 0);
192 spin_unlock_bh(&nn->reconfig_lock);
196 * nfp_net_reconfig_post() - Post async reconfig request
197 * @nn: NFP Net device to reconfigure
198 * @update: The value for the update field in the BAR config
200 * Record FW reconfiguration request. Reconfiguration will be kicked off
201 * whenever reconfiguration machinery is idle. Multiple requests can be
204 static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
206 spin_lock_bh(&nn->reconfig_lock);
208 /* Sync caller will kick off async reconf when it's done, just post */
209 if (nn->reconfig_sync_present) {
210 nn->reconfig_posted |= update;
214 /* Opportunistically check if the previous command is done */
215 if (!nn->reconfig_timer_active ||
216 nfp_net_reconfig_check_done(nn, false))
217 nfp_net_reconfig_start_async(nn, update);
219 nn->reconfig_posted |= update;
221 spin_unlock_bh(&nn->reconfig_lock);
224 static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
226 bool cancelled_timer = false;
227 u32 pre_posted_requests;
229 spin_lock_bh(&nn->reconfig_lock);
231 nn->reconfig_sync_present = true;
233 if (nn->reconfig_timer_active) {
234 nn->reconfig_timer_active = false;
235 cancelled_timer = true;
237 pre_posted_requests = nn->reconfig_posted;
238 nn->reconfig_posted = 0;
240 spin_unlock_bh(&nn->reconfig_lock);
242 if (cancelled_timer) {
243 del_timer_sync(&nn->reconfig_timer);
244 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
247 /* Run the posted reconfigs which were issued before we started */
248 if (pre_posted_requests) {
249 nfp_net_reconfig_start(nn, pre_posted_requests);
250 nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
254 static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
256 nfp_net_reconfig_sync_enter(nn);
258 spin_lock_bh(&nn->reconfig_lock);
259 nn->reconfig_sync_present = false;
260 spin_unlock_bh(&nn->reconfig_lock);
264 * __nfp_net_reconfig() - Reconfigure the firmware
265 * @nn: NFP Net device to reconfigure
266 * @update: The value for the update field in the BAR config
268 * Write the update word to the BAR and ping the reconfig queue. The
269 * poll until the firmware has acknowledged the update by zeroing the
272 * Return: Negative errno on error, 0 on success
274 static int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
278 lockdep_assert_held(&nn->bar_lock);
280 nfp_net_reconfig_sync_enter(nn);
282 nfp_net_reconfig_start(nn, update);
283 ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
285 spin_lock_bh(&nn->reconfig_lock);
287 if (nn->reconfig_posted)
288 nfp_net_reconfig_start_async(nn, 0);
290 nn->reconfig_sync_present = false;
292 spin_unlock_bh(&nn->reconfig_lock);
297 int nfp_net_reconfig(struct nfp_net *nn, u32 update)
301 nn_ctrl_bar_lock(nn);
302 ret = __nfp_net_reconfig(nn, update);
303 nn_ctrl_bar_unlock(nn);
308 int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size)
310 if (nn->tlv_caps.mbox_len < NFP_NET_CFG_MBOX_SIMPLE_VAL + data_size) {
311 nn_err(nn, "mailbox too small for %u of data (%u)\n",
312 data_size, nn->tlv_caps.mbox_len);
316 nn_ctrl_bar_lock(nn);
321 * nfp_net_mbox_reconfig() - Reconfigure the firmware via the mailbox
322 * @nn: NFP Net device to reconfigure
323 * @mbox_cmd: The value for the mailbox command
325 * Helper function for mailbox updates
327 * Return: Negative errno on error, 0 on success
329 int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
331 u32 mbox = nn->tlv_caps.mbox_off;
334 lockdep_assert_held(&nn->bar_lock);
335 nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
337 ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
339 nn_err(nn, "Mailbox update error\n");
343 return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
346 int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
350 ret = nfp_net_mbox_reconfig(nn, mbox_cmd);
351 nn_ctrl_bar_unlock(nn);
355 /* Interrupt configuration and handling
359 * nfp_net_irq_unmask() - Unmask automasked interrupt
360 * @nn: NFP Network structure
361 * @entry_nr: MSI-X table entry
363 * Clear the ICR for the IRQ entry.
365 static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
367 nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
372 * nfp_net_irqs_alloc() - allocates MSI-X irqs
373 * @pdev: PCI device structure
374 * @irq_entries: Array to be initialized and used to hold the irq entries
375 * @min_irqs: Minimal acceptable number of interrupts
376 * @wanted_irqs: Target number of interrupts to allocate
378 * Return: Number of irqs obtained or 0 on error.
381 nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
382 unsigned int min_irqs, unsigned int wanted_irqs)
387 for (i = 0; i < wanted_irqs; i++)
388 irq_entries[i].entry = i;
390 got_irqs = pci_enable_msix_range(pdev, irq_entries,
391 min_irqs, wanted_irqs);
393 dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n",
394 min_irqs, wanted_irqs, got_irqs);
398 if (got_irqs < wanted_irqs)
399 dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n",
400 wanted_irqs, got_irqs);
406 * nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev
407 * @nn: NFP Network structure
408 * @irq_entries: Table of allocated interrupts
409 * @n: Size of @irq_entries (number of entries to grab)
411 * After interrupts are allocated with nfp_net_irqs_alloc() this function
412 * should be called to assign them to a specific netdev (port).
415 nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
418 struct nfp_net_dp *dp = &nn->dp;
420 nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
421 dp->num_r_vecs = nn->max_r_vecs;
423 memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
425 if (dp->num_rx_rings > dp->num_r_vecs ||
426 dp->num_tx_rings > dp->num_r_vecs)
427 dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n",
428 dp->num_rx_rings, dp->num_tx_rings,
431 dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings);
432 dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings);
433 dp->num_stack_tx_rings = dp->num_tx_rings;
437 * nfp_net_irqs_disable() - Disable interrupts
438 * @pdev: PCI device structure
440 * Undoes what @nfp_net_irqs_alloc() does.
442 void nfp_net_irqs_disable(struct pci_dev *pdev)
444 pci_disable_msix(pdev);
448 * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings.
450 * @data: Opaque data structure
452 * Return: Indicate if the interrupt has been handled.
454 static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
456 struct nfp_net_r_vector *r_vec = data;
458 napi_schedule_irqoff(&r_vec->napi);
460 /* The FW auto-masks any interrupt, either via the MASK bit in
461 * the MSI-X table or via the per entry ICR field. So there
462 * is no need to disable interrupts here.
467 static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data)
469 struct nfp_net_r_vector *r_vec = data;
471 tasklet_schedule(&r_vec->tasklet);
477 * nfp_net_read_link_status() - Reread link status from control BAR
478 * @nn: NFP Network structure
480 static void nfp_net_read_link_status(struct nfp_net *nn)
486 spin_lock_irqsave(&nn->link_status_lock, flags);
488 sts = nn_readl(nn, NFP_NET_CFG_STS);
489 link_up = !!(sts & NFP_NET_CFG_STS_LINK);
491 if (nn->link_up == link_up)
494 nn->link_up = link_up;
496 set_bit(NFP_PORT_CHANGED, &nn->port->flags);
499 netif_carrier_on(nn->dp.netdev);
500 netdev_info(nn->dp.netdev, "NIC Link is Up\n");
502 netif_carrier_off(nn->dp.netdev);
503 netdev_info(nn->dp.netdev, "NIC Link is Down\n");
506 spin_unlock_irqrestore(&nn->link_status_lock, flags);
510 * nfp_net_irq_lsc() - Interrupt service routine for link state changes
512 * @data: Opaque data structure
514 * Return: Indicate if the interrupt has been handled.
516 static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
518 struct nfp_net *nn = data;
519 struct msix_entry *entry;
521 entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX];
523 nfp_net_read_link_status(nn);
525 nfp_net_irq_unmask(nn, entry->entry);
531 * nfp_net_irq_exn() - Interrupt service routine for exceptions
533 * @data: Opaque data structure
535 * Return: Indicate if the interrupt has been handled.
537 static irqreturn_t nfp_net_irq_exn(int irq, void *data)
539 struct nfp_net *nn = data;
541 nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__);
542 /* XXX TO BE IMPLEMENTED */
547 * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
548 * @tx_ring: TX ring structure
549 * @r_vec: IRQ vector servicing this ring
551 * @is_xdp: Is this an XDP TX ring?
554 nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
555 struct nfp_net_r_vector *r_vec, unsigned int idx,
558 struct nfp_net *nn = r_vec->nfp_net;
561 tx_ring->r_vec = r_vec;
562 tx_ring->is_xdp = is_xdp;
563 u64_stats_init(&tx_ring->r_vec->tx_sync);
565 tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
566 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
570 * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
571 * @rx_ring: RX ring structure
572 * @r_vec: IRQ vector servicing this ring
576 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
577 struct nfp_net_r_vector *r_vec, unsigned int idx)
579 struct nfp_net *nn = r_vec->nfp_net;
582 rx_ring->r_vec = r_vec;
583 u64_stats_init(&rx_ring->r_vec->rx_sync);
585 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
586 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
590 * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
591 * @nn: NFP Network structure
592 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
593 * @format: printf-style format to construct the interrupt name
594 * @name: Pointer to allocated space for interrupt name
595 * @name_sz: Size of space for interrupt name
596 * @vector_idx: Index of MSI-X vector used for this interrupt
597 * @handler: IRQ handler to register for this interrupt
600 nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
601 const char *format, char *name, size_t name_sz,
602 unsigned int vector_idx, irq_handler_t handler)
604 struct msix_entry *entry;
607 entry = &nn->irq_entries[vector_idx];
609 snprintf(name, name_sz, format, nfp_net_name(nn));
610 err = request_irq(entry->vector, handler, 0, name, nn);
612 nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
616 nn_writeb(nn, ctrl_offset, entry->entry);
617 nfp_net_irq_unmask(nn, entry->entry);
623 * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN)
624 * @nn: NFP Network structure
625 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
626 * @vector_idx: Index of MSI-X vector used for this interrupt
628 static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
629 unsigned int vector_idx)
631 nn_writeb(nn, ctrl_offset, 0xff);
633 free_irq(nn->irq_entries[vector_idx].vector, nn);
638 * One queue controller peripheral queue is used for transmit. The
639 * driver en-queues packets for transmit by advancing the write
640 * pointer. The device indicates that packets have transmitted by
641 * advancing the read pointer. The driver maintains a local copy of
642 * the read and write pointer in @struct nfp_net_tx_ring. The driver
643 * keeps @wr_p in sync with the queue controller write pointer and can
644 * determine how many packets have been transmitted by comparing its
645 * copy of the read pointer @rd_p with the read pointer maintained by
646 * the queue controller peripheral.
650 * nfp_net_tx_full() - Check if the TX ring is full
651 * @tx_ring: TX ring to check
652 * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
654 * This function checks, based on the *host copy* of read/write
655 * pointer if a given TX ring is full. The real TX queue may have
656 * some newly made available slots.
658 * Return: True if the ring is full.
660 static int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
662 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
665 /* Wrappers for deciding when to stop and restart TX queues */
666 static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
668 return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
671 static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
673 return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
677 * nfp_net_tx_ring_stop() - stop tx ring
678 * @nd_q: netdev queue
679 * @tx_ring: driver tx queue structure
681 * Safely stop TX ring. Remember that while we are running .start_xmit()
682 * someone else may be cleaning the TX ring completions so we need to be
683 * extra careful here.
685 static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
686 struct nfp_net_tx_ring *tx_ring)
688 netif_tx_stop_queue(nd_q);
690 /* We can race with the TX completion out of NAPI so recheck */
692 if (unlikely(nfp_net_tx_ring_should_wake(tx_ring)))
693 netif_tx_start_queue(nd_q);
697 * nfp_net_tx_tso() - Set up Tx descriptor for LSO
698 * @r_vec: per-ring structure
699 * @txbuf: Pointer to driver soft TX descriptor
700 * @txd: Pointer to HW TX descriptor
701 * @skb: Pointer to SKB
702 * @md_bytes: Prepend length
704 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
705 * Return error on packet header greater than maximum supported LSO header size.
707 static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
708 struct nfp_net_tx_buf *txbuf,
709 struct nfp_net_tx_desc *txd, struct sk_buff *skb,
712 u32 l3_offset, l4_offset, hdrlen;
715 if (!skb_is_gso(skb))
718 if (!skb->encapsulation) {
719 l3_offset = skb_network_offset(skb);
720 l4_offset = skb_transport_offset(skb);
721 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
723 l3_offset = skb_inner_network_offset(skb);
724 l4_offset = skb_inner_transport_offset(skb);
725 hdrlen = skb_inner_transport_header(skb) - skb->data +
726 inner_tcp_hdrlen(skb);
729 txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
730 txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
732 mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
733 txd->l3_offset = l3_offset - md_bytes;
734 txd->l4_offset = l4_offset - md_bytes;
735 txd->lso_hdrlen = hdrlen - md_bytes;
736 txd->mss = cpu_to_le16(mss);
737 txd->flags |= PCIE_DESC_TX_LSO;
739 u64_stats_update_begin(&r_vec->tx_sync);
741 u64_stats_update_end(&r_vec->tx_sync);
745 * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
746 * @dp: NFP Net data path struct
747 * @r_vec: per-ring structure
748 * @txbuf: Pointer to driver soft TX descriptor
749 * @txd: Pointer to TX descriptor
750 * @skb: Pointer to SKB
752 * This function sets the TX checksum flags in the TX descriptor based
753 * on the configuration and the protocol of the packet to be transmitted.
755 static void nfp_net_tx_csum(struct nfp_net_dp *dp,
756 struct nfp_net_r_vector *r_vec,
757 struct nfp_net_tx_buf *txbuf,
758 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
760 struct ipv6hdr *ipv6h;
764 if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
767 if (skb->ip_summed != CHECKSUM_PARTIAL)
770 txd->flags |= PCIE_DESC_TX_CSUM;
771 if (skb->encapsulation)
772 txd->flags |= PCIE_DESC_TX_ENCAP;
774 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
775 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
777 if (iph->version == 4) {
778 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
779 l4_hdr = iph->protocol;
780 } else if (ipv6h->version == 6) {
781 l4_hdr = ipv6h->nexthdr;
783 nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
789 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
792 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
795 nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
799 u64_stats_update_begin(&r_vec->tx_sync);
800 if (skb->encapsulation)
801 r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
803 r_vec->hw_csum_tx += txbuf->pkt_cnt;
804 u64_stats_update_end(&r_vec->tx_sync);
807 static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
810 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
811 tx_ring->wr_ptr_add = 0;
814 static int nfp_net_prep_port_id(struct sk_buff *skb)
816 struct metadata_dst *md_dst = skb_metadata_dst(skb);
821 if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
824 if (unlikely(skb_cow_head(skb, 8)))
827 data = skb_push(skb, 8);
828 put_unaligned_be32(NFP_NET_META_PORTID, data);
829 put_unaligned_be32(md_dst->u.port_info.port_id, data + 4);
835 * nfp_net_tx() - Main transmit entry point
836 * @skb: SKB to transmit
837 * @netdev: netdev structure
839 * Return: NETDEV_TX_OK on success.
841 static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
843 struct nfp_net *nn = netdev_priv(netdev);
844 const struct skb_frag_struct *frag;
845 int f, nr_frags, wr_idx, md_bytes;
846 struct nfp_net_tx_ring *tx_ring;
847 struct nfp_net_r_vector *r_vec;
848 struct nfp_net_tx_buf *txbuf;
849 struct nfp_net_tx_desc *txd;
850 struct netdev_queue *nd_q;
851 struct nfp_net_dp *dp;
857 qidx = skb_get_queue_mapping(skb);
858 tx_ring = &dp->tx_rings[qidx];
859 r_vec = tx_ring->r_vec;
861 nr_frags = skb_shinfo(skb)->nr_frags;
863 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
864 nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
865 qidx, tx_ring->wr_p, tx_ring->rd_p);
866 nd_q = netdev_get_tx_queue(dp->netdev, qidx);
867 netif_tx_stop_queue(nd_q);
868 nfp_net_tx_xmit_more_flush(tx_ring);
869 u64_stats_update_begin(&r_vec->tx_sync);
871 u64_stats_update_end(&r_vec->tx_sync);
872 return NETDEV_TX_BUSY;
875 md_bytes = nfp_net_prep_port_id(skb);
876 if (unlikely(md_bytes < 0)) {
877 nfp_net_tx_xmit_more_flush(tx_ring);
878 dev_kfree_skb_any(skb);
882 /* Start with the head skbuf */
883 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
885 if (dma_mapping_error(dp->dev, dma_addr))
888 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
890 /* Stash the soft descriptor of the head then initialize it */
891 txbuf = &tx_ring->txbufs[wr_idx];
893 txbuf->dma_addr = dma_addr;
896 txbuf->real_len = skb->len;
898 /* Build TX descriptor */
899 txd = &tx_ring->txds[wr_idx];
900 txd->offset_eop = (nr_frags ? 0 : PCIE_DESC_TX_EOP) | md_bytes;
901 txd->dma_len = cpu_to_le16(skb_headlen(skb));
902 nfp_desc_set_dma_addr(txd, dma_addr);
903 txd->data_len = cpu_to_le16(skb->len);
909 /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
910 nfp_net_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
911 nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
912 if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
913 txd->flags |= PCIE_DESC_TX_VLAN;
914 txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
921 /* all descs must match except for in addr, length and eop */
922 second_half = txd->vals8[1];
924 for (f = 0; f < nr_frags; f++) {
925 frag = &skb_shinfo(skb)->frags[f];
926 fsize = skb_frag_size(frag);
928 dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
929 fsize, DMA_TO_DEVICE);
930 if (dma_mapping_error(dp->dev, dma_addr))
933 wr_idx = D_IDX(tx_ring, wr_idx + 1);
934 tx_ring->txbufs[wr_idx].skb = skb;
935 tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
936 tx_ring->txbufs[wr_idx].fidx = f;
938 txd = &tx_ring->txds[wr_idx];
939 txd->dma_len = cpu_to_le16(fsize);
940 nfp_desc_set_dma_addr(txd, dma_addr);
941 txd->offset_eop = md_bytes |
942 ((f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0);
943 txd->vals8[1] = second_half;
946 u64_stats_update_begin(&r_vec->tx_sync);
948 u64_stats_update_end(&r_vec->tx_sync);
951 skb_tx_timestamp(skb);
953 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
955 tx_ring->wr_p += nr_frags + 1;
956 if (nfp_net_tx_ring_should_stop(tx_ring))
957 nfp_net_tx_ring_stop(nd_q, tx_ring);
959 tx_ring->wr_ptr_add += nr_frags + 1;
960 if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
961 nfp_net_tx_xmit_more_flush(tx_ring);
967 frag = &skb_shinfo(skb)->frags[f];
968 dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
969 skb_frag_size(frag), DMA_TO_DEVICE);
970 tx_ring->txbufs[wr_idx].skb = NULL;
971 tx_ring->txbufs[wr_idx].dma_addr = 0;
972 tx_ring->txbufs[wr_idx].fidx = -2;
975 wr_idx += tx_ring->cnt;
977 dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
978 skb_headlen(skb), DMA_TO_DEVICE);
979 tx_ring->txbufs[wr_idx].skb = NULL;
980 tx_ring->txbufs[wr_idx].dma_addr = 0;
981 tx_ring->txbufs[wr_idx].fidx = -2;
983 nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
984 nfp_net_tx_xmit_more_flush(tx_ring);
985 u64_stats_update_begin(&r_vec->tx_sync);
987 u64_stats_update_end(&r_vec->tx_sync);
988 dev_kfree_skb_any(skb);
993 * nfp_net_tx_complete() - Handled completed TX packets
994 * @tx_ring: TX ring structure
995 * @budget: NAPI budget (only used as bool to determine if in NAPI context)
997 static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
999 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1000 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
1001 struct netdev_queue *nd_q;
1002 u32 done_pkts = 0, done_bytes = 0;
1006 if (tx_ring->wr_p == tx_ring->rd_p)
1009 /* Work out how many descriptors have been transmitted */
1010 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
1012 if (qcp_rd_p == tx_ring->qcp_rd_p)
1015 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
1018 const struct skb_frag_struct *frag;
1019 struct nfp_net_tx_buf *tx_buf;
1020 struct sk_buff *skb;
1024 idx = D_IDX(tx_ring, tx_ring->rd_p++);
1025 tx_buf = &tx_ring->txbufs[idx];
1031 nr_frags = skb_shinfo(skb)->nr_frags;
1032 fidx = tx_buf->fidx;
1036 dma_unmap_single(dp->dev, tx_buf->dma_addr,
1037 skb_headlen(skb), DMA_TO_DEVICE);
1039 done_pkts += tx_buf->pkt_cnt;
1040 done_bytes += tx_buf->real_len;
1042 /* unmap fragment */
1043 frag = &skb_shinfo(skb)->frags[fidx];
1044 dma_unmap_page(dp->dev, tx_buf->dma_addr,
1045 skb_frag_size(frag), DMA_TO_DEVICE);
1048 /* check for last gather fragment */
1049 if (fidx == nr_frags - 1)
1050 napi_consume_skb(skb, budget);
1052 tx_buf->dma_addr = 0;
1057 tx_ring->qcp_rd_p = qcp_rd_p;
1059 u64_stats_update_begin(&r_vec->tx_sync);
1060 r_vec->tx_bytes += done_bytes;
1061 r_vec->tx_pkts += done_pkts;
1062 u64_stats_update_end(&r_vec->tx_sync);
1067 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
1068 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
1069 if (nfp_net_tx_ring_should_wake(tx_ring)) {
1070 /* Make sure TX thread will see updated tx_ring->rd_p */
1073 if (unlikely(netif_tx_queue_stopped(nd_q)))
1074 netif_tx_wake_queue(nd_q);
1077 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
1078 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
1079 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
1082 static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
1084 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1085 u32 done_pkts = 0, done_bytes = 0;
1090 /* Work out how many descriptors have been transmitted */
1091 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
1093 if (qcp_rd_p == tx_ring->qcp_rd_p)
1096 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
1098 done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
1099 todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
1101 tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
1105 idx = D_IDX(tx_ring, tx_ring->rd_p);
1108 done_bytes += tx_ring->txbufs[idx].real_len;
1111 u64_stats_update_begin(&r_vec->tx_sync);
1112 r_vec->tx_bytes += done_bytes;
1113 r_vec->tx_pkts += done_pkts;
1114 u64_stats_update_end(&r_vec->tx_sync);
1116 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
1117 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
1118 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
1124 * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
1125 * @dp: NFP Net data path struct
1126 * @tx_ring: TX ring structure
1128 * Assumes that the device is stopped, must be idempotent.
1131 nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
1133 const struct skb_frag_struct *frag;
1134 struct netdev_queue *nd_q;
1136 while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
1137 struct nfp_net_tx_buf *tx_buf;
1138 struct sk_buff *skb;
1141 idx = D_IDX(tx_ring, tx_ring->rd_p);
1142 tx_buf = &tx_ring->txbufs[idx];
1144 skb = tx_ring->txbufs[idx].skb;
1145 nr_frags = skb_shinfo(skb)->nr_frags;
1147 if (tx_buf->fidx == -1) {
1149 dma_unmap_single(dp->dev, tx_buf->dma_addr,
1150 skb_headlen(skb), DMA_TO_DEVICE);
1152 /* unmap fragment */
1153 frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
1154 dma_unmap_page(dp->dev, tx_buf->dma_addr,
1155 skb_frag_size(frag), DMA_TO_DEVICE);
1158 /* check for last gather fragment */
1159 if (tx_buf->fidx == nr_frags - 1)
1160 dev_kfree_skb_any(skb);
1162 tx_buf->dma_addr = 0;
1166 tx_ring->qcp_rd_p++;
1170 memset(tx_ring->txds, 0, tx_ring->size);
1173 tx_ring->qcp_rd_p = 0;
1174 tx_ring->wr_ptr_add = 0;
1176 if (tx_ring->is_xdp || !dp->netdev)
1179 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
1180 netdev_tx_reset_queue(nd_q);
1183 static void nfp_net_tx_timeout(struct net_device *netdev)
1185 struct nfp_net *nn = netdev_priv(netdev);
1188 for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) {
1189 if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
1191 nn_warn(nn, "TX timeout on ring: %d\n", i);
1193 nn_warn(nn, "TX watchdog timeout\n");
1196 /* Receive processing
1199 nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
1201 unsigned int fl_bufsz;
1203 fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
1204 fl_bufsz += dp->rx_dma_off;
1205 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1206 fl_bufsz += NFP_NET_MAX_PREPEND;
1208 fl_bufsz += dp->rx_offset;
1209 fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
1211 fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
1212 fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1218 nfp_net_free_frag(void *frag, bool xdp)
1221 skb_free_frag(frag);
1223 __free_page(virt_to_page(frag));
1227 * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
1228 * @dp: NFP Net data path struct
1229 * @dma_addr: Pointer to storage for DMA address (output param)
1231 * This function will allcate a new page frag, map it for DMA.
1233 * Return: allocated page frag or NULL on failure.
1235 static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1239 if (!dp->xdp_prog) {
1240 frag = netdev_alloc_frag(dp->fl_bufsz);
1244 page = alloc_page(GFP_KERNEL);
1245 frag = page ? page_address(page) : NULL;
1248 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1252 *dma_addr = nfp_net_dma_map_rx(dp, frag);
1253 if (dma_mapping_error(dp->dev, *dma_addr)) {
1254 nfp_net_free_frag(frag, dp->xdp_prog);
1255 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
1262 static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1266 if (!dp->xdp_prog) {
1267 frag = napi_alloc_frag(dp->fl_bufsz);
1268 if (unlikely(!frag))
1273 page = dev_alloc_page();
1274 if (unlikely(!page))
1276 frag = page_address(page);
1279 *dma_addr = nfp_net_dma_map_rx(dp, frag);
1280 if (dma_mapping_error(dp->dev, *dma_addr)) {
1281 nfp_net_free_frag(frag, dp->xdp_prog);
1282 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
1290 * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
1291 * @dp: NFP Net data path struct
1292 * @rx_ring: RX ring structure
1293 * @frag: page fragment buffer
1294 * @dma_addr: DMA address of skb mapping
1296 static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
1297 struct nfp_net_rx_ring *rx_ring,
1298 void *frag, dma_addr_t dma_addr)
1300 unsigned int wr_idx;
1302 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
1304 nfp_net_dma_sync_dev_rx(dp, dma_addr);
1306 /* Stash SKB and DMA address away */
1307 rx_ring->rxbufs[wr_idx].frag = frag;
1308 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
1310 /* Fill freelist descriptor */
1311 rx_ring->rxds[wr_idx].fld.reserved = 0;
1312 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
1313 nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
1314 dma_addr + dp->rx_dma_off);
1317 if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
1318 /* Update write pointer of the freelist queue. Make
1319 * sure all writes are flushed before telling the hardware.
1322 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
1327 * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
1328 * @rx_ring: RX ring structure
1330 * Assumes that the device is stopped, must be idempotent.
1332 static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
1334 unsigned int wr_idx, last_idx;
1336 /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always
1337 * kept at cnt - 1 FL bufs.
1339 if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
1342 /* Move the empty entry to the end of the list */
1343 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
1344 last_idx = rx_ring->cnt - 1;
1345 rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
1346 rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag;
1347 rx_ring->rxbufs[last_idx].dma_addr = 0;
1348 rx_ring->rxbufs[last_idx].frag = NULL;
1350 memset(rx_ring->rxds, 0, rx_ring->size);
1356 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
1357 * @dp: NFP Net data path struct
1358 * @rx_ring: RX ring to remove buffers from
1360 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
1361 * entries. After device is disabled nfp_net_rx_ring_reset() must be called
1362 * to restore required ring geometry.
1365 nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
1366 struct nfp_net_rx_ring *rx_ring)
1370 for (i = 0; i < rx_ring->cnt - 1; i++) {
1371 /* NULL skb can only happen when initial filling of the ring
1372 * fails to allocate enough buffers and calls here to free
1373 * already allocated ones.
1375 if (!rx_ring->rxbufs[i].frag)
1378 nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
1379 nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
1380 rx_ring->rxbufs[i].dma_addr = 0;
1381 rx_ring->rxbufs[i].frag = NULL;
1386 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
1387 * @dp: NFP Net data path struct
1388 * @rx_ring: RX ring to remove buffers from
1391 nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
1392 struct nfp_net_rx_ring *rx_ring)
1394 struct nfp_net_rx_buf *rxbufs;
1397 rxbufs = rx_ring->rxbufs;
1399 for (i = 0; i < rx_ring->cnt - 1; i++) {
1400 rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
1401 if (!rxbufs[i].frag) {
1402 nfp_net_rx_ring_bufs_free(dp, rx_ring);
1411 * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
1412 * @dp: NFP Net data path struct
1413 * @rx_ring: RX ring to fill
1416 nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
1417 struct nfp_net_rx_ring *rx_ring)
1421 for (i = 0; i < rx_ring->cnt - 1; i++)
1422 nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
1423 rx_ring->rxbufs[i].dma_addr);
1427 * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
1428 * @flags: RX descriptor flags field in CPU byte order
1430 static int nfp_net_rx_csum_has_errors(u16 flags)
1432 u16 csum_all_checked, csum_all_ok;
1434 csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
1435 csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
1437 return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
1441 * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
1442 * @dp: NFP Net data path struct
1443 * @r_vec: per-ring structure
1444 * @rxd: Pointer to RX descriptor
1445 * @meta: Parsed metadata prepend
1446 * @skb: Pointer to SKB
1448 static void nfp_net_rx_csum(struct nfp_net_dp *dp,
1449 struct nfp_net_r_vector *r_vec,
1450 struct nfp_net_rx_desc *rxd,
1451 struct nfp_meta_parsed *meta, struct sk_buff *skb)
1453 skb_checksum_none_assert(skb);
1455 if (!(dp->netdev->features & NETIF_F_RXCSUM))
1458 if (meta->csum_type) {
1459 skb->ip_summed = meta->csum_type;
1460 skb->csum = meta->csum;
1461 u64_stats_update_begin(&r_vec->rx_sync);
1462 r_vec->hw_csum_rx_complete++;
1463 u64_stats_update_end(&r_vec->rx_sync);
1467 if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
1468 u64_stats_update_begin(&r_vec->rx_sync);
1469 r_vec->hw_csum_rx_error++;
1470 u64_stats_update_end(&r_vec->rx_sync);
1474 /* Assume that the firmware will never report inner CSUM_OK unless outer
1475 * L4 headers were successfully parsed. FW will always report zero UDP
1476 * checksum as CSUM_OK.
1478 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
1479 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
1480 __skb_incr_checksum_unnecessary(skb);
1481 u64_stats_update_begin(&r_vec->rx_sync);
1482 r_vec->hw_csum_rx_ok++;
1483 u64_stats_update_end(&r_vec->rx_sync);
1486 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
1487 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
1488 __skb_incr_checksum_unnecessary(skb);
1489 u64_stats_update_begin(&r_vec->rx_sync);
1490 r_vec->hw_csum_rx_inner_ok++;
1491 u64_stats_update_end(&r_vec->rx_sync);
1496 nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
1497 unsigned int type, __be32 *hash)
1499 if (!(netdev->features & NETIF_F_RXHASH))
1503 case NFP_NET_RSS_IPV4:
1504 case NFP_NET_RSS_IPV6:
1505 case NFP_NET_RSS_IPV6_EX:
1506 meta->hash_type = PKT_HASH_TYPE_L3;
1509 meta->hash_type = PKT_HASH_TYPE_L4;
1513 meta->hash = get_unaligned_be32(hash);
1517 nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
1518 void *data, struct nfp_net_rx_desc *rxd)
1520 struct nfp_net_rx_hash *rx_hash = data;
1522 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1525 nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
1530 nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
1531 void *data, int meta_len)
1535 meta_info = get_unaligned_be32(data);
1539 switch (meta_info & NFP_NET_META_FIELD_MASK) {
1540 case NFP_NET_META_HASH:
1541 meta_info >>= NFP_NET_META_FIELD_SIZE;
1542 nfp_net_set_hash(netdev, meta,
1543 meta_info & NFP_NET_META_FIELD_MASK,
1547 case NFP_NET_META_MARK:
1548 meta->mark = get_unaligned_be32(data);
1551 case NFP_NET_META_PORTID:
1552 meta->portid = get_unaligned_be32(data);
1555 case NFP_NET_META_CSUM:
1556 meta->csum_type = CHECKSUM_COMPLETE;
1558 (__force __wsum)__get_unaligned_cpu32(data);
1565 meta_info >>= NFP_NET_META_FIELD_SIZE;
1572 nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
1573 struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
1574 struct sk_buff *skb)
1576 u64_stats_update_begin(&r_vec->rx_sync);
1578 /* If we have both skb and rxbuf the replacement buffer allocation
1579 * must have failed, count this as an alloc failure.
1582 r_vec->rx_replace_buf_alloc_fail++;
1583 u64_stats_update_end(&r_vec->rx_sync);
1585 /* skb is build based on the frag, free_skb() would free the frag
1586 * so to be able to reuse it we need an extra ref.
1588 if (skb && rxbuf && skb->head == rxbuf->frag)
1589 page_ref_inc(virt_to_head_page(rxbuf->frag));
1591 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
1593 dev_kfree_skb_any(skb);
1597 nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
1598 struct nfp_net_tx_ring *tx_ring,
1599 struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
1600 unsigned int pkt_len, bool *completed)
1602 struct nfp_net_tx_buf *txbuf;
1603 struct nfp_net_tx_desc *txd;
1606 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1608 nfp_net_xdp_complete(tx_ring);
1612 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1613 nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
1619 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
1621 /* Stash the soft descriptor of the head then initialize it */
1622 txbuf = &tx_ring->txbufs[wr_idx];
1624 nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
1626 txbuf->frag = rxbuf->frag;
1627 txbuf->dma_addr = rxbuf->dma_addr;
1630 txbuf->real_len = pkt_len;
1632 dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
1633 pkt_len, DMA_BIDIRECTIONAL);
1635 /* Build TX descriptor */
1636 txd = &tx_ring->txds[wr_idx];
1637 txd->offset_eop = PCIE_DESC_TX_EOP;
1638 txd->dma_len = cpu_to_le16(pkt_len);
1639 nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
1640 txd->data_len = cpu_to_le16(pkt_len);
1644 txd->lso_hdrlen = 0;
1647 tx_ring->wr_ptr_add++;
1652 * nfp_net_rx() - receive up to @budget packets on @rx_ring
1653 * @rx_ring: RX ring to receive from
1654 * @budget: NAPI budget
1656 * Note, this function is separated out from the napi poll function to
1657 * more cleanly separate packet receive code from other bookkeeping
1658 * functions performed in the napi poll function.
1660 * Return: Number of packets received.
1662 static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1664 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1665 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
1666 struct nfp_net_tx_ring *tx_ring;
1667 struct bpf_prog *xdp_prog;
1668 bool xdp_tx_cmpl = false;
1669 unsigned int true_bufsz;
1670 struct sk_buff *skb;
1671 int pkts_polled = 0;
1672 struct xdp_buff xdp;
1676 xdp_prog = READ_ONCE(dp->xdp_prog);
1677 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
1678 xdp.rxq = &rx_ring->xdp_rxq;
1679 tx_ring = r_vec->xdp_ring;
1681 while (pkts_polled < budget) {
1682 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
1683 struct nfp_net_rx_buf *rxbuf;
1684 struct nfp_net_rx_desc *rxd;
1685 struct nfp_meta_parsed meta;
1686 bool redir_egress = false;
1687 struct net_device *netdev;
1688 dma_addr_t new_dma_addr;
1689 u32 meta_len_xdp = 0;
1692 idx = D_IDX(rx_ring, rx_ring->rd_p);
1694 rxd = &rx_ring->rxds[idx];
1695 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
1698 /* Memory barrier to ensure that we won't do other reads
1699 * before the DD bit.
1703 memset(&meta, 0, sizeof(meta));
1708 rxbuf = &rx_ring->rxbufs[idx];
1710 * <-- [rx_offset] -->
1711 * ---------------------------------------------------------
1712 * | [XX] | metadata | packet | XXXX |
1713 * ---------------------------------------------------------
1714 * <---------------- data_len --------------->
1716 * The rx_offset is fixed for all packets, the meta_len can vary
1717 * on a packet by packet basis. If rx_offset is set to zero
1718 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
1719 * buffer and is immediately followed by the packet (no [XX]).
1721 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1722 data_len = le16_to_cpu(rxd->rxd.data_len);
1723 pkt_len = data_len - meta_len;
1725 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
1726 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1727 pkt_off += meta_len;
1729 pkt_off += dp->rx_offset;
1730 meta_off = pkt_off - meta_len;
1733 u64_stats_update_begin(&r_vec->rx_sync);
1735 r_vec->rx_bytes += pkt_len;
1736 u64_stats_update_end(&r_vec->rx_sync);
1738 if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
1739 (dp->rx_offset && meta_len > dp->rx_offset))) {
1740 nn_dp_warn(dp, "oversized RX packet metadata %u\n",
1742 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1746 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
1749 if (!dp->chained_metadata_format) {
1750 nfp_net_set_hash_desc(dp->netdev, &meta,
1751 rxbuf->frag + meta_off, rxd);
1752 } else if (meta_len) {
1755 end = nfp_net_parse_meta(dp->netdev, &meta,
1756 rxbuf->frag + meta_off,
1758 if (unlikely(end != rxbuf->frag + pkt_off)) {
1759 nn_dp_warn(dp, "invalid RX packet metadata\n");
1760 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
1766 if (xdp_prog && !meta.portid) {
1767 void *orig_data = rxbuf->frag + pkt_off;
1768 unsigned int dma_off;
1771 xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
1772 xdp.data = orig_data;
1773 xdp.data_meta = orig_data;
1774 xdp.data_end = orig_data + pkt_len;
1776 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1778 pkt_len = xdp.data_end - xdp.data;
1779 pkt_off += xdp.data - orig_data;
1783 meta_len_xdp = xdp.data - xdp.data_meta;
1786 dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
1787 if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
1792 trace_xdp_exception(dp->netdev,
1796 bpf_warn_invalid_xdp_action(act);
1799 trace_xdp_exception(dp->netdev, xdp_prog, act);
1802 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
1808 if (likely(!meta.portid)) {
1809 netdev = dp->netdev;
1810 } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
1811 struct nfp_net *nn = netdev_priv(dp->netdev);
1813 nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
1815 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
1821 nn = netdev_priv(dp->netdev);
1822 netdev = nfp_app_dev_get(nn->app, meta.portid,
1824 if (unlikely(!netdev)) {
1825 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
1830 if (nfp_netdev_is_nfp_repr(netdev))
1831 nfp_repr_inc_rx_stats(netdev, pkt_len);
1834 skb = build_skb(rxbuf->frag, true_bufsz);
1835 if (unlikely(!skb)) {
1836 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1839 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
1840 if (unlikely(!new_frag)) {
1841 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
1845 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1847 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1849 skb_reserve(skb, pkt_off);
1850 skb_put(skb, pkt_len);
1852 skb->mark = meta.mark;
1853 skb_set_hash(skb, meta.hash, meta.hash_type);
1855 skb_record_rx_queue(skb, rx_ring->idx);
1856 skb->protocol = eth_type_trans(skb, netdev);
1858 nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
1860 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
1861 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1862 le16_to_cpu(rxd->rxd.vlan));
1864 skb_metadata_set(skb, meta_len_xdp);
1866 if (likely(!redir_egress)) {
1867 napi_gro_receive(&rx_ring->r_vec->napi, skb);
1870 __skb_push(skb, ETH_HLEN);
1871 dev_queue_xmit(skb);
1876 if (tx_ring->wr_ptr_add)
1877 nfp_net_tx_xmit_more_flush(tx_ring);
1878 else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
1880 if (!nfp_net_xdp_complete(tx_ring))
1881 pkts_polled = budget;
1889 * nfp_net_poll() - napi poll function
1890 * @napi: NAPI structure
1891 * @budget: NAPI budget
1893 * Return: number of packets polled.
1895 static int nfp_net_poll(struct napi_struct *napi, int budget)
1897 struct nfp_net_r_vector *r_vec =
1898 container_of(napi, struct nfp_net_r_vector, napi);
1899 unsigned int pkts_polled = 0;
1902 nfp_net_tx_complete(r_vec->tx_ring, budget);
1904 pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
1906 if (pkts_polled < budget)
1907 if (napi_complete_done(napi, pkts_polled))
1908 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
1913 /* Control device data path
1917 nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1918 struct sk_buff *skb, bool old)
1920 unsigned int real_len = skb->len, meta_len = 0;
1921 struct nfp_net_tx_ring *tx_ring;
1922 struct nfp_net_tx_buf *txbuf;
1923 struct nfp_net_tx_desc *txd;
1924 struct nfp_net_dp *dp;
1925 dma_addr_t dma_addr;
1928 dp = &r_vec->nfp_net->dp;
1929 tx_ring = r_vec->tx_ring;
1931 if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
1932 nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
1936 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1937 u64_stats_update_begin(&r_vec->tx_sync);
1939 u64_stats_update_end(&r_vec->tx_sync);
1941 __skb_queue_tail(&r_vec->queue, skb);
1943 __skb_queue_head(&r_vec->queue, skb);
1947 if (nfp_app_ctrl_has_meta(nn->app)) {
1948 if (unlikely(skb_headroom(skb) < 8)) {
1949 nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
1953 put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
1954 put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
1957 /* Start with the head skbuf */
1958 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
1960 if (dma_mapping_error(dp->dev, dma_addr))
1963 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
1965 /* Stash the soft descriptor of the head then initialize it */
1966 txbuf = &tx_ring->txbufs[wr_idx];
1968 txbuf->dma_addr = dma_addr;
1971 txbuf->real_len = real_len;
1973 /* Build TX descriptor */
1974 txd = &tx_ring->txds[wr_idx];
1975 txd->offset_eop = meta_len | PCIE_DESC_TX_EOP;
1976 txd->dma_len = cpu_to_le16(skb_headlen(skb));
1977 nfp_desc_set_dma_addr(txd, dma_addr);
1978 txd->data_len = cpu_to_le16(skb->len);
1982 txd->lso_hdrlen = 0;
1985 tx_ring->wr_ptr_add++;
1986 nfp_net_tx_xmit_more_flush(tx_ring);
1991 nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
1993 u64_stats_update_begin(&r_vec->tx_sync);
1995 u64_stats_update_end(&r_vec->tx_sync);
1996 dev_kfree_skb_any(skb);
2000 bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
2002 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
2004 return nfp_ctrl_tx_one(nn, r_vec, skb, false);
2007 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
2009 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
2012 spin_lock_bh(&r_vec->lock);
2013 ret = nfp_ctrl_tx_one(nn, r_vec, skb, false);
2014 spin_unlock_bh(&r_vec->lock);
2019 static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
2021 struct sk_buff *skb;
2023 while ((skb = __skb_dequeue(&r_vec->queue)))
2024 if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
2029 nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
2031 u32 meta_type, meta_tag;
2033 if (!nfp_app_ctrl_has_meta(nn->app))
2039 meta_type = get_unaligned_be32(data);
2040 meta_tag = get_unaligned_be32(data + 4);
2042 return (meta_type == NFP_NET_META_PORTID &&
2043 meta_tag == NFP_META_PORT_ID_CTRL);
2047 nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
2048 struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
2050 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
2051 struct nfp_net_rx_buf *rxbuf;
2052 struct nfp_net_rx_desc *rxd;
2053 dma_addr_t new_dma_addr;
2054 struct sk_buff *skb;
2058 idx = D_IDX(rx_ring, rx_ring->rd_p);
2060 rxd = &rx_ring->rxds[idx];
2061 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
2064 /* Memory barrier to ensure that we won't do other reads
2065 * before the DD bit.
2071 rxbuf = &rx_ring->rxbufs[idx];
2072 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
2073 data_len = le16_to_cpu(rxd->rxd.data_len);
2074 pkt_len = data_len - meta_len;
2076 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
2077 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
2078 pkt_off += meta_len;
2080 pkt_off += dp->rx_offset;
2081 meta_off = pkt_off - meta_len;
2084 u64_stats_update_begin(&r_vec->rx_sync);
2086 r_vec->rx_bytes += pkt_len;
2087 u64_stats_update_end(&r_vec->rx_sync);
2089 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
2091 if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
2092 nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
2094 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
2098 skb = build_skb(rxbuf->frag, dp->fl_bufsz);
2099 if (unlikely(!skb)) {
2100 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
2103 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
2104 if (unlikely(!new_frag)) {
2105 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
2109 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
2111 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
2113 skb_reserve(skb, pkt_off);
2114 skb_put(skb, pkt_len);
2116 nfp_app_ctrl_rx(nn->app, skb);
2121 static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
2123 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
2124 struct nfp_net *nn = r_vec->nfp_net;
2125 struct nfp_net_dp *dp = &nn->dp;
2126 unsigned int budget = 512;
2128 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
2134 static void nfp_ctrl_poll(unsigned long arg)
2136 struct nfp_net_r_vector *r_vec = (void *)arg;
2138 spin_lock(&r_vec->lock);
2139 nfp_net_tx_complete(r_vec->tx_ring, 0);
2140 __nfp_ctrl_tx_queued(r_vec);
2141 spin_unlock(&r_vec->lock);
2143 if (nfp_ctrl_rx(r_vec)) {
2144 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
2146 tasklet_schedule(&r_vec->tasklet);
2147 nn_dp_warn(&r_vec->nfp_net->dp,
2148 "control message budget exceeded!\n");
2152 /* Setup and Configuration
2156 * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
2157 * @nn: NFP Network structure
2159 static void nfp_net_vecs_init(struct nfp_net *nn)
2161 struct nfp_net_r_vector *r_vec;
2164 nn->lsc_handler = nfp_net_irq_lsc;
2165 nn->exn_handler = nfp_net_irq_exn;
2167 for (r = 0; r < nn->max_r_vecs; r++) {
2168 struct msix_entry *entry;
2170 entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
2172 r_vec = &nn->r_vecs[r];
2173 r_vec->nfp_net = nn;
2174 r_vec->irq_entry = entry->entry;
2175 r_vec->irq_vector = entry->vector;
2177 if (nn->dp.netdev) {
2178 r_vec->handler = nfp_net_irq_rxtx;
2180 r_vec->handler = nfp_ctrl_irq_rxtx;
2182 __skb_queue_head_init(&r_vec->queue);
2183 spin_lock_init(&r_vec->lock);
2184 tasklet_init(&r_vec->tasklet, nfp_ctrl_poll,
2185 (unsigned long)r_vec);
2186 tasklet_disable(&r_vec->tasklet);
2189 cpumask_set_cpu(r, &r_vec->affinity_mask);
2194 * nfp_net_tx_ring_free() - Free resources allocated to a TX ring
2195 * @tx_ring: TX ring to free
2197 static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
2199 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
2200 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
2202 kvfree(tx_ring->txbufs);
2205 dma_free_coherent(dp->dev, tx_ring->size,
2206 tx_ring->txds, tx_ring->dma);
2209 tx_ring->txbufs = NULL;
2210 tx_ring->txds = NULL;
2216 * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
2217 * @dp: NFP Net data path struct
2218 * @tx_ring: TX Ring structure to allocate
2220 * Return: 0 on success, negative errno otherwise.
2223 nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
2225 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
2227 tx_ring->cnt = dp->txd_cnt;
2229 tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
2230 tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
2232 GFP_KERNEL | __GFP_NOWARN);
2233 if (!tx_ring->txds) {
2234 netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
2239 tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
2241 if (!tx_ring->txbufs)
2244 if (!tx_ring->is_xdp && dp->netdev)
2245 netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
2251 nfp_net_tx_ring_free(tx_ring);
2256 nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
2257 struct nfp_net_tx_ring *tx_ring)
2261 if (!tx_ring->is_xdp)
2264 for (i = 0; i < tx_ring->cnt; i++) {
2265 if (!tx_ring->txbufs[i].frag)
2268 nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
2269 __free_page(virt_to_page(tx_ring->txbufs[i].frag));
2274 nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
2275 struct nfp_net_tx_ring *tx_ring)
2277 struct nfp_net_tx_buf *txbufs = tx_ring->txbufs;
2280 if (!tx_ring->is_xdp)
2283 for (i = 0; i < tx_ring->cnt; i++) {
2284 txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
2285 if (!txbufs[i].frag) {
2286 nfp_net_tx_ring_bufs_free(dp, tx_ring);
2294 static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
2298 dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
2303 for (r = 0; r < dp->num_tx_rings; r++) {
2306 if (r >= dp->num_stack_tx_rings)
2307 bias = dp->num_stack_tx_rings;
2309 nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias],
2312 if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
2315 if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
2323 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
2325 nfp_net_tx_ring_free(&dp->tx_rings[r]);
2327 kfree(dp->tx_rings);
2331 static void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
2335 for (r = 0; r < dp->num_tx_rings; r++) {
2336 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
2337 nfp_net_tx_ring_free(&dp->tx_rings[r]);
2340 kfree(dp->tx_rings);
2344 * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
2345 * @rx_ring: RX ring to free
2347 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
2349 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
2350 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
2353 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
2354 kvfree(rx_ring->rxbufs);
2357 dma_free_coherent(dp->dev, rx_ring->size,
2358 rx_ring->rxds, rx_ring->dma);
2361 rx_ring->rxbufs = NULL;
2362 rx_ring->rxds = NULL;
2368 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
2369 * @dp: NFP Net data path struct
2370 * @rx_ring: RX ring to allocate
2372 * Return: 0 on success, negative errno otherwise.
2375 nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
2380 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
2386 rx_ring->cnt = dp->rxd_cnt;
2387 rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
2388 rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
2390 GFP_KERNEL | __GFP_NOWARN);
2391 if (!rx_ring->rxds) {
2392 netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
2397 rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs),
2399 if (!rx_ring->rxbufs)
2405 nfp_net_rx_ring_free(rx_ring);
2409 static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
2413 dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
2418 for (r = 0; r < dp->num_rx_rings; r++) {
2419 nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
2421 if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
2424 if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
2432 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
2434 nfp_net_rx_ring_free(&dp->rx_rings[r]);
2436 kfree(dp->rx_rings);
2440 static void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
2444 for (r = 0; r < dp->num_rx_rings; r++) {
2445 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
2446 nfp_net_rx_ring_free(&dp->rx_rings[r]);
2449 kfree(dp->rx_rings);
2453 nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
2454 struct nfp_net_r_vector *r_vec, int idx)
2456 r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL;
2458 idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL;
2460 r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
2461 &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
2465 nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
2472 netif_napi_add(nn->dp.netdev, &r_vec->napi,
2473 nfp_net_poll, NAPI_POLL_WEIGHT);
2475 tasklet_enable(&r_vec->tasklet);
2477 snprintf(r_vec->name, sizeof(r_vec->name),
2478 "%s-rxtx-%d", nfp_net_name(nn), idx);
2479 err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
2483 netif_napi_del(&r_vec->napi);
2485 tasklet_disable(&r_vec->tasklet);
2487 nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
2490 disable_irq(r_vec->irq_vector);
2492 irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
2494 nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
2501 nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
2503 irq_set_affinity_hint(r_vec->irq_vector, NULL);
2505 netif_napi_del(&r_vec->napi);
2507 tasklet_disable(&r_vec->tasklet);
2509 free_irq(r_vec->irq_vector, r_vec);
2513 * nfp_net_rss_write_itbl() - Write RSS indirection table to device
2514 * @nn: NFP Net device to reconfigure
2516 void nfp_net_rss_write_itbl(struct nfp_net *nn)
2520 for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4)
2521 nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i,
2522 get_unaligned_le32(nn->rss_itbl + i));
2526 * nfp_net_rss_write_key() - Write RSS hash key to device
2527 * @nn: NFP Net device to reconfigure
2529 void nfp_net_rss_write_key(struct nfp_net *nn)
2533 for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4)
2534 nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
2535 get_unaligned_le32(nn->rss_key + i));
2539 * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW
2540 * @nn: NFP Net device to reconfigure
2542 void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
2548 /* Compute factor used to convert coalesce '_usecs' parameters to
2549 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
2552 factor = nn->tlv_caps.me_freq_mhz / 16;
2554 /* copy RX interrupt coalesce parameters */
2555 value = (nn->rx_coalesce_max_frames << 16) |
2556 (factor * nn->rx_coalesce_usecs);
2557 for (i = 0; i < nn->dp.num_rx_rings; i++)
2558 nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
2560 /* copy TX interrupt coalesce parameters */
2561 value = (nn->tx_coalesce_max_frames << 16) |
2562 (factor * nn->tx_coalesce_usecs);
2563 for (i = 0; i < nn->dp.num_tx_rings; i++)
2564 nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
2568 * nfp_net_write_mac_addr() - Write mac address to the device control BAR
2569 * @nn: NFP Net device to reconfigure
2570 * @addr: MAC address to write
2572 * Writes the MAC address from the netdev to the device control BAR. Does not
2573 * perform the required reconfig. We do a bit of byte swapping dance because
2576 static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
2578 nn_writel(nn, NFP_NET_CFG_MACADDR + 0, get_unaligned_be32(addr));
2579 nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
2582 static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
2584 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
2585 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
2586 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
2588 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
2589 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
2590 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
2594 * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
2595 * @nn: NFP Net device to reconfigure
2597 * Warning: must be fully idempotent.
2599 static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
2601 u32 new_ctrl, update;
2605 new_ctrl = nn->dp.ctrl;
2606 new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
2607 update = NFP_NET_CFG_UPDATE_GEN;
2608 update |= NFP_NET_CFG_UPDATE_MSIX;
2609 update |= NFP_NET_CFG_UPDATE_RING;
2611 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
2612 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
2614 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
2615 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
2617 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2618 err = nfp_net_reconfig(nn, update);
2620 nn_err(nn, "Could not disable device: %d\n", err);
2622 for (r = 0; r < nn->dp.num_rx_rings; r++)
2623 nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
2624 for (r = 0; r < nn->dp.num_tx_rings; r++)
2625 nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
2626 for (r = 0; r < nn->dp.num_r_vecs; r++)
2627 nfp_net_vec_clear_ring_data(nn, r);
2629 nn->dp.ctrl = new_ctrl;
2633 nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
2634 struct nfp_net_rx_ring *rx_ring, unsigned int idx)
2636 /* Write the DMA address, size and MSI-X info to the device */
2637 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
2638 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
2639 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
2643 nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
2644 struct nfp_net_tx_ring *tx_ring, unsigned int idx)
2646 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
2647 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
2648 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
2652 * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
2653 * @nn: NFP Net device to reconfigure
2655 static int nfp_net_set_config_and_enable(struct nfp_net *nn)
2657 u32 bufsz, new_ctrl, update = 0;
2661 new_ctrl = nn->dp.ctrl;
2663 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) {
2664 nfp_net_rss_write_key(nn);
2665 nfp_net_rss_write_itbl(nn);
2666 nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
2667 update |= NFP_NET_CFG_UPDATE_RSS;
2670 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) {
2671 nfp_net_coalesce_write_cfg(nn);
2672 update |= NFP_NET_CFG_UPDATE_IRQMOD;
2675 for (r = 0; r < nn->dp.num_tx_rings; r++)
2676 nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r);
2677 for (r = 0; r < nn->dp.num_rx_rings; r++)
2678 nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
2680 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ?
2681 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1);
2683 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
2684 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
2687 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
2689 nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu);
2691 bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
2692 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
2695 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
2696 update |= NFP_NET_CFG_UPDATE_GEN;
2697 update |= NFP_NET_CFG_UPDATE_MSIX;
2698 update |= NFP_NET_CFG_UPDATE_RING;
2699 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
2700 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
2702 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2703 err = nfp_net_reconfig(nn, update);
2705 nfp_net_clear_config_and_disable(nn);
2709 nn->dp.ctrl = new_ctrl;
2711 for (r = 0; r < nn->dp.num_rx_rings; r++)
2712 nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
2714 /* Since reconfiguration requests while NFP is down are ignored we
2715 * have to wipe the entire VXLAN configuration and reinitialize it.
2717 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) {
2718 memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
2719 memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
2720 udp_tunnel_get_rx_info(nn->dp.netdev);
2727 * nfp_net_close_stack() - Quiesce the stack (part of close)
2728 * @nn: NFP Net device to reconfigure
2730 static void nfp_net_close_stack(struct nfp_net *nn)
2734 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2735 netif_carrier_off(nn->dp.netdev);
2736 nn->link_up = false;
2738 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2739 disable_irq(nn->r_vecs[r].irq_vector);
2740 napi_disable(&nn->r_vecs[r].napi);
2743 netif_tx_disable(nn->dp.netdev);
2747 * nfp_net_close_free_all() - Free all runtime resources
2748 * @nn: NFP Net device to reconfigure
2750 static void nfp_net_close_free_all(struct nfp_net *nn)
2754 nfp_net_tx_rings_free(&nn->dp);
2755 nfp_net_rx_rings_free(&nn->dp);
2757 for (r = 0; r < nn->dp.num_r_vecs; r++)
2758 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2760 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2761 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2765 * nfp_net_netdev_close() - Called when the device is downed
2766 * @netdev: netdev structure
2768 static int nfp_net_netdev_close(struct net_device *netdev)
2770 struct nfp_net *nn = netdev_priv(netdev);
2772 /* Step 1: Disable RX and TX rings from the Linux kernel perspective
2774 nfp_net_close_stack(nn);
2778 nfp_net_clear_config_and_disable(nn);
2779 nfp_port_configure(netdev, false);
2781 /* Step 3: Free resources
2783 nfp_net_close_free_all(nn);
2785 nn_dbg(nn, "%s down", netdev->name);
2789 void nfp_ctrl_close(struct nfp_net *nn)
2795 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2796 disable_irq(nn->r_vecs[r].irq_vector);
2797 tasklet_disable(&nn->r_vecs[r].tasklet);
2800 nfp_net_clear_config_and_disable(nn);
2802 nfp_net_close_free_all(nn);
2808 * nfp_net_open_stack() - Start the device from stack's perspective
2809 * @nn: NFP Net device to reconfigure
2811 static void nfp_net_open_stack(struct nfp_net *nn)
2815 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2816 napi_enable(&nn->r_vecs[r].napi);
2817 enable_irq(nn->r_vecs[r].irq_vector);
2820 netif_tx_wake_all_queues(nn->dp.netdev);
2822 enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2823 nfp_net_read_link_status(nn);
2826 static int nfp_net_open_alloc_all(struct nfp_net *nn)
2830 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
2831 nn->exn_name, sizeof(nn->exn_name),
2832 NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
2835 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
2836 nn->lsc_name, sizeof(nn->lsc_name),
2837 NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
2840 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2842 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2843 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
2845 goto err_cleanup_vec_p;
2848 err = nfp_net_rx_rings_prepare(nn, &nn->dp);
2850 goto err_cleanup_vec;
2852 err = nfp_net_tx_rings_prepare(nn, &nn->dp);
2854 goto err_free_rx_rings;
2856 for (r = 0; r < nn->max_r_vecs; r++)
2857 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
2862 nfp_net_rx_rings_free(&nn->dp);
2864 r = nn->dp.num_r_vecs;
2867 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2868 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2870 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2874 static int nfp_net_netdev_open(struct net_device *netdev)
2876 struct nfp_net *nn = netdev_priv(netdev);
2879 /* Step 1: Allocate resources for rings and the like
2880 * - Request interrupts
2881 * - Allocate RX and TX ring resources
2882 * - Setup initial RSS table
2884 err = nfp_net_open_alloc_all(nn);
2888 err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
2892 err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
2896 /* Step 2: Configure the NFP
2897 * - Ifup the physical interface if it exists
2898 * - Enable rings from 0 to tx_rings/rx_rings - 1.
2899 * - Write MAC address (in case it changed)
2901 * - Set the Freelist buffer size
2904 err = nfp_port_configure(netdev, true);
2908 err = nfp_net_set_config_and_enable(nn);
2910 goto err_port_disable;
2912 /* Step 3: Enable for kernel
2913 * - put some freelist descriptors on each RX ring
2914 * - enable NAPI on each ring
2915 * - enable all TX queues
2918 nfp_net_open_stack(nn);
2923 nfp_port_configure(netdev, false);
2925 nfp_net_close_free_all(nn);
2929 int nfp_ctrl_open(struct nfp_net *nn)
2933 /* ring dumping depends on vNICs being opened/closed under rtnl */
2936 err = nfp_net_open_alloc_all(nn);
2940 err = nfp_net_set_config_and_enable(nn);
2944 for (r = 0; r < nn->dp.num_r_vecs; r++)
2945 enable_irq(nn->r_vecs[r].irq_vector);
2952 nfp_net_close_free_all(nn);
2958 static void nfp_net_set_rx_mode(struct net_device *netdev)
2960 struct nfp_net *nn = netdev_priv(netdev);
2963 new_ctrl = nn->dp.ctrl;
2965 if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI)
2966 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC;
2968 new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC;
2970 if (netdev->flags & IFF_PROMISC) {
2971 if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
2972 new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
2974 nn_warn(nn, "FW does not support promiscuous mode\n");
2976 new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
2979 if (new_ctrl == nn->dp.ctrl)
2982 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2983 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
2985 nn->dp.ctrl = new_ctrl;
2988 static void nfp_net_rss_init_itbl(struct nfp_net *nn)
2992 for (i = 0; i < sizeof(nn->rss_itbl); i++)
2994 ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings);
2997 static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
2999 struct nfp_net_dp new_dp = *dp;
3004 nn->dp.netdev->mtu = new_dp.mtu;
3006 if (!netif_is_rxfh_configured(nn->dp.netdev))
3007 nfp_net_rss_init_itbl(nn);
3010 static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
3015 nfp_net_dp_swap(nn, dp);
3017 for (r = 0; r < nn->max_r_vecs; r++)
3018 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
3020 err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings);
3024 if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) {
3025 err = netif_set_real_num_tx_queues(nn->dp.netdev,
3026 nn->dp.num_stack_tx_rings);
3031 return nfp_net_set_config_and_enable(nn);
3034 struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
3036 struct nfp_net_dp *new;
3038 new = kmalloc(sizeof(*new), GFP_KERNEL);
3044 /* Clear things which need to be recomputed */
3046 new->tx_rings = NULL;
3047 new->rx_rings = NULL;
3048 new->num_r_vecs = 0;
3049 new->num_stack_tx_rings = 0;
3055 nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
3056 struct netlink_ext_ack *extack)
3058 /* XDP-enabled tests */
3061 if (dp->fl_bufsz > PAGE_SIZE) {
3062 NL_SET_ERR_MSG_MOD(extack, "MTU too large w/ XDP enabled");
3065 if (dp->num_tx_rings > nn->max_tx_rings) {
3066 NL_SET_ERR_MSG_MOD(extack, "Insufficient number of TX rings w/ XDP enabled");
3073 int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,
3074 struct netlink_ext_ack *extack)
3078 dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp);
3080 dp->num_stack_tx_rings = dp->num_tx_rings;
3082 dp->num_stack_tx_rings -= dp->num_rx_rings;
3084 dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings);
3086 err = nfp_net_check_config(nn, dp, extack);
3090 if (!netif_running(dp->netdev)) {
3091 nfp_net_dp_swap(nn, dp);
3096 /* Prepare new rings */
3097 for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) {
3098 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
3101 goto err_cleanup_vecs;
3105 err = nfp_net_rx_rings_prepare(nn, dp);
3107 goto err_cleanup_vecs;
3109 err = nfp_net_tx_rings_prepare(nn, dp);
3113 /* Stop device, swap in new rings, try to start the firmware */
3114 nfp_net_close_stack(nn);
3115 nfp_net_clear_config_and_disable(nn);
3117 err = nfp_net_dp_swap_enable(nn, dp);
3121 nfp_net_clear_config_and_disable(nn);
3123 /* Try with old configuration and old rings */
3124 err2 = nfp_net_dp_swap_enable(nn, dp);
3126 nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
3129 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
3130 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
3132 nfp_net_rx_rings_free(dp);
3133 nfp_net_tx_rings_free(dp);
3135 nfp_net_open_stack(nn);
3142 nfp_net_rx_rings_free(dp);
3144 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
3145 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
3150 static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
3152 struct nfp_net *nn = netdev_priv(netdev);
3153 struct nfp_net_dp *dp;
3156 err = nfp_app_check_mtu(nn->app, netdev, new_mtu);
3160 dp = nfp_net_clone_dp(nn);
3166 return nfp_net_ring_reconfig(nn, dp, NULL);
3170 nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3172 const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD;
3173 struct nfp_net *nn = netdev_priv(netdev);
3176 /* Priority tagged packets with vlan id 0 are processed by the
3177 * NFP as untagged packets
3182 err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
3186 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
3187 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
3190 return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
3194 nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3196 const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL;
3197 struct nfp_net *nn = netdev_priv(netdev);
3200 /* Priority tagged packets with vlan id 0 are processed by the
3201 * NFP as untagged packets
3206 err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
3210 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
3211 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
3214 return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
3217 static void nfp_net_stat64(struct net_device *netdev,
3218 struct rtnl_link_stats64 *stats)
3220 struct nfp_net *nn = netdev_priv(netdev);
3223 /* Collect software stats */
3224 for (r = 0; r < nn->max_r_vecs; r++) {
3225 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
3230 start = u64_stats_fetch_begin(&r_vec->rx_sync);
3231 data[0] = r_vec->rx_pkts;
3232 data[1] = r_vec->rx_bytes;
3233 data[2] = r_vec->rx_drops;
3234 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
3235 stats->rx_packets += data[0];
3236 stats->rx_bytes += data[1];
3237 stats->rx_dropped += data[2];
3240 start = u64_stats_fetch_begin(&r_vec->tx_sync);
3241 data[0] = r_vec->tx_pkts;
3242 data[1] = r_vec->tx_bytes;
3243 data[2] = r_vec->tx_errors;
3244 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
3245 stats->tx_packets += data[0];
3246 stats->tx_bytes += data[1];
3247 stats->tx_errors += data[2];
3250 /* Add in device stats */
3251 stats->multicast += nn_readq(nn, NFP_NET_CFG_STATS_RX_MC_FRAMES);
3252 stats->rx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_RX_DISCARDS);
3253 stats->rx_errors += nn_readq(nn, NFP_NET_CFG_STATS_RX_ERRORS);
3255 stats->tx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_TX_DISCARDS);
3256 stats->tx_errors += nn_readq(nn, NFP_NET_CFG_STATS_TX_ERRORS);
3259 static int nfp_net_set_features(struct net_device *netdev,
3260 netdev_features_t features)
3262 netdev_features_t changed = netdev->features ^ features;
3263 struct nfp_net *nn = netdev_priv(netdev);
3267 /* Assume this is not called with features we have not advertised */
3269 new_ctrl = nn->dp.ctrl;
3271 if (changed & NETIF_F_RXCSUM) {
3272 if (features & NETIF_F_RXCSUM)
3273 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
3275 new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM_ANY;
3278 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3279 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
3280 new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
3282 new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM;
3285 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
3286 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
3287 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
3288 NFP_NET_CFG_CTRL_LSO;
3290 new_ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
3293 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
3294 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3295 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
3297 new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
3300 if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
3301 if (features & NETIF_F_HW_VLAN_CTAG_TX)
3302 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
3304 new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
3307 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
3308 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
3309 new_ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
3311 new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER;
3314 if (changed & NETIF_F_SG) {
3315 if (features & NETIF_F_SG)
3316 new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
3318 new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
3321 err = nfp_port_set_features(netdev, features);
3325 nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
3326 netdev->features, features, changed);
3328 if (new_ctrl == nn->dp.ctrl)
3331 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl);
3332 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
3333 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
3337 nn->dp.ctrl = new_ctrl;
3342 static netdev_features_t
3343 nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
3344 netdev_features_t features)
3348 /* We can't do TSO over double tagged packets (802.1AD) */
3349 features &= vlan_features_check(skb, features);
3351 if (!skb->encapsulation)
3354 /* Ensure that inner L4 header offset fits into TX descriptor field */
3355 if (skb_is_gso(skb)) {
3358 hdrlen = skb_inner_transport_header(skb) - skb->data +
3359 inner_tcp_hdrlen(skb);
3361 /* Assume worst case scenario of having longest possible
3362 * metadata prepend - 8B
3364 if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ - 8))
3365 features &= ~NETIF_F_GSO_MASK;
3368 /* VXLAN/GRE check */
3369 switch (vlan_get_protocol(skb)) {
3370 case htons(ETH_P_IP):
3371 l4_hdr = ip_hdr(skb)->protocol;
3373 case htons(ETH_P_IPV6):
3374 l4_hdr = ipv6_hdr(skb)->nexthdr;
3377 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3380 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
3381 skb->inner_protocol != htons(ETH_P_TEB) ||
3382 (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) ||
3383 (l4_hdr == IPPROTO_UDP &&
3384 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
3385 sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
3386 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3392 nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
3394 struct nfp_net *nn = netdev_priv(netdev);
3397 /* If port is defined, devlink_port is registered and devlink core
3398 * is taking care of name formatting.
3403 if (nn->dp.is_vf || nn->vnic_no_name)
3406 n = snprintf(name, len, "n%d", nn->id);
3414 * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
3415 * @nn: NFP Net device to reconfigure
3416 * @idx: Index into the port table where new port should be written
3417 * @port: UDP port to configure (pass zero to remove VXLAN port)
3419 static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
3423 nn->vxlan_ports[idx] = port;
3425 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN))
3428 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
3429 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2)
3430 nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
3431 be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
3432 be16_to_cpu(nn->vxlan_ports[i]));
3434 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN);
3438 * nfp_net_find_vxlan_idx() - find table entry of the port or a free one
3439 * @nn: NFP Network structure
3440 * @port: UDP port to look for
3442 * Return: if the port is already in the table -- it's position;
3443 * if the port is not in the table -- free position to use;
3444 * if the table is full -- -ENOSPC.
3446 static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port)
3448 int i, free_idx = -ENOSPC;
3450 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
3451 if (nn->vxlan_ports[i] == port)
3453 if (!nn->vxlan_usecnt[i])
3460 static void nfp_net_add_vxlan_port(struct net_device *netdev,
3461 struct udp_tunnel_info *ti)
3463 struct nfp_net *nn = netdev_priv(netdev);
3466 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3469 idx = nfp_net_find_vxlan_idx(nn, ti->port);
3473 if (!nn->vxlan_usecnt[idx]++)
3474 nfp_net_set_vxlan_port(nn, idx, ti->port);
3477 static void nfp_net_del_vxlan_port(struct net_device *netdev,
3478 struct udp_tunnel_info *ti)
3480 struct nfp_net *nn = netdev_priv(netdev);
3483 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3486 idx = nfp_net_find_vxlan_idx(nn, ti->port);
3487 if (idx == -ENOSPC || !nn->vxlan_usecnt[idx])
3490 if (!--nn->vxlan_usecnt[idx])
3491 nfp_net_set_vxlan_port(nn, idx, 0);
3494 static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
3496 struct bpf_prog *prog = bpf->prog;
3497 struct nfp_net_dp *dp;
3500 if (!xdp_attachment_flags_ok(&nn->xdp, bpf))
3503 if (!prog == !nn->dp.xdp_prog) {
3504 WRITE_ONCE(nn->dp.xdp_prog, prog);
3505 xdp_attachment_setup(&nn->xdp, bpf);
3509 dp = nfp_net_clone_dp(nn);
3513 dp->xdp_prog = prog;
3514 dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
3515 dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
3516 dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
3518 /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
3519 err = nfp_net_ring_reconfig(nn, dp, bpf->extack);
3523 xdp_attachment_setup(&nn->xdp, bpf);
3527 static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
3531 if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf))
3534 err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
3538 xdp_attachment_setup(&nn->xdp_hw, bpf);
3542 static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
3544 struct nfp_net *nn = netdev_priv(netdev);
3546 switch (xdp->command) {
3547 case XDP_SETUP_PROG:
3548 return nfp_net_xdp_setup_drv(nn, xdp);
3549 case XDP_SETUP_PROG_HW:
3550 return nfp_net_xdp_setup_hw(nn, xdp);
3551 case XDP_QUERY_PROG:
3552 return xdp_attachment_query(&nn->xdp, xdp);
3553 case XDP_QUERY_PROG_HW:
3554 return xdp_attachment_query(&nn->xdp_hw, xdp);
3556 return nfp_app_bpf(nn->app, nn, xdp);
3560 static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
3562 struct nfp_net *nn = netdev_priv(netdev);
3563 struct sockaddr *saddr = addr;
3566 err = eth_prepare_mac_addr_change(netdev, addr);
3570 nfp_net_write_mac_addr(nn, saddr->sa_data);
3572 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MACADDR);
3576 eth_commit_mac_addr_change(netdev, addr);
3581 const struct net_device_ops nfp_net_netdev_ops = {
3582 .ndo_init = nfp_app_ndo_init,
3583 .ndo_uninit = nfp_app_ndo_uninit,
3584 .ndo_open = nfp_net_netdev_open,
3585 .ndo_stop = nfp_net_netdev_close,
3586 .ndo_start_xmit = nfp_net_tx,
3587 .ndo_get_stats64 = nfp_net_stat64,
3588 .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
3589 .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
3590 .ndo_set_vf_mac = nfp_app_set_vf_mac,
3591 .ndo_set_vf_vlan = nfp_app_set_vf_vlan,
3592 .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
3593 .ndo_set_vf_trust = nfp_app_set_vf_trust,
3594 .ndo_get_vf_config = nfp_app_get_vf_config,
3595 .ndo_set_vf_link_state = nfp_app_set_vf_link_state,
3596 .ndo_setup_tc = nfp_port_setup_tc,
3597 .ndo_tx_timeout = nfp_net_tx_timeout,
3598 .ndo_set_rx_mode = nfp_net_set_rx_mode,
3599 .ndo_change_mtu = nfp_net_change_mtu,
3600 .ndo_set_mac_address = nfp_net_set_mac_address,
3601 .ndo_set_features = nfp_net_set_features,
3602 .ndo_features_check = nfp_net_features_check,
3603 .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
3604 .ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
3605 .ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
3606 .ndo_bpf = nfp_net_xdp,
3607 .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
3611 * nfp_net_info() - Print general info about the NIC
3612 * @nn: NFP Net device to reconfigure
3614 void nfp_net_info(struct nfp_net *nn)
3616 nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
3617 nn->dp.is_vf ? "VF " : "",
3618 nn->dp.num_tx_rings, nn->max_tx_rings,
3619 nn->dp.num_rx_rings, nn->max_rx_rings);
3620 nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
3621 nn->fw_ver.resv, nn->fw_ver.class,
3622 nn->fw_ver.major, nn->fw_ver.minor,
3624 nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
3626 nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
3627 nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
3628 nn->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
3629 nn->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
3630 nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
3631 nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
3632 nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
3633 nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
3634 nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
3635 nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "",
3636 nn->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSO2 " : "",
3637 nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
3638 nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
3639 nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
3640 nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
3641 nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
3642 nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
3643 nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
3644 nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
3645 "RXCSUM_COMPLETE " : "",
3646 nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
3647 nfp_app_extra_cap(nn->app, nn));
3651 * nfp_net_alloc() - Allocate netdev and related structure
3653 * @ctrl_bar: PCI IOMEM with vNIC config memory
3654 * @needs_netdev: Whether to allocate a netdev for this vNIC
3655 * @max_tx_rings: Maximum number of TX rings supported by device
3656 * @max_rx_rings: Maximum number of RX rings supported by device
3658 * This function allocates a netdev device and fills in the initial
3659 * part of the @struct nfp_net structure. In case of control device
3660 * nfp_net structure is allocated without the netdev.
3662 * Return: NFP Net device structure, or ERR_PTR on error.
3665 nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
3666 unsigned int max_tx_rings, unsigned int max_rx_rings)
3672 struct net_device *netdev;
3674 netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
3675 max_tx_rings, max_rx_rings);
3677 return ERR_PTR(-ENOMEM);
3679 SET_NETDEV_DEV(netdev, &pdev->dev);
3680 nn = netdev_priv(netdev);
3681 nn->dp.netdev = netdev;
3683 nn = vzalloc(sizeof(*nn));
3685 return ERR_PTR(-ENOMEM);
3688 nn->dp.dev = &pdev->dev;
3689 nn->dp.ctrl_bar = ctrl_bar;
3692 nn->max_tx_rings = max_tx_rings;
3693 nn->max_rx_rings = max_rx_rings;
3695 nn->dp.num_tx_rings = min_t(unsigned int,
3696 max_tx_rings, num_online_cpus());
3697 nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings,
3698 netif_get_num_default_rss_queues());
3700 nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
3701 nn->dp.num_r_vecs = min_t(unsigned int,
3702 nn->dp.num_r_vecs, num_online_cpus());
3704 nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
3705 nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
3707 mutex_init(&nn->bar_lock);
3709 spin_lock_init(&nn->reconfig_lock);
3710 spin_lock_init(&nn->link_status_lock);
3712 timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
3714 err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
3723 free_netdev(nn->dp.netdev);
3726 return ERR_PTR(err);
3730 * nfp_net_free() - Undo what @nfp_net_alloc() did
3731 * @nn: NFP Net device to reconfigure
3733 void nfp_net_free(struct nfp_net *nn)
3735 WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
3737 mutex_destroy(&nn->bar_lock);
3740 free_netdev(nn->dp.netdev);
3746 * nfp_net_rss_key_sz() - Get current size of the RSS key
3747 * @nn: NFP Net device instance
3749 * Return: size of the RSS key for currently selected hash function.
3751 unsigned int nfp_net_rss_key_sz(struct nfp_net *nn)
3753 switch (nn->rss_hfunc) {
3754 case ETH_RSS_HASH_TOP:
3755 return NFP_NET_CFG_RSS_KEY_SZ;
3756 case ETH_RSS_HASH_XOR:
3758 case ETH_RSS_HASH_CRC32:
3762 nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc);
3767 * nfp_net_rss_init() - Set the initial RSS parameters
3768 * @nn: NFP Net device to reconfigure
3770 static void nfp_net_rss_init(struct nfp_net *nn)
3772 unsigned long func_bit, rss_cap_hfunc;
3775 /* Read the RSS function capability and select first supported func */
3776 reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP);
3777 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg);
3779 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC,
3780 NFP_NET_CFG_RSS_TOEPLITZ);
3782 func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS);
3783 if (func_bit == NFP_NET_CFG_RSS_HFUNCS) {
3784 dev_warn(nn->dp.dev,
3785 "Bad RSS config, defaulting to Toeplitz hash\n");
3786 func_bit = ETH_RSS_HASH_TOP_BIT;
3788 nn->rss_hfunc = 1 << func_bit;
3790 netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn));
3792 nfp_net_rss_init_itbl(nn);
3794 /* Enable IPv4/IPv6 TCP by default */
3795 nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
3796 NFP_NET_CFG_RSS_IPV6_TCP |
3797 FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) |
3798 NFP_NET_CFG_RSS_MASK;
3802 * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters
3803 * @nn: NFP Net device to reconfigure
3805 static void nfp_net_irqmod_init(struct nfp_net *nn)
3807 nn->rx_coalesce_usecs = 50;
3808 nn->rx_coalesce_max_frames = 64;
3809 nn->tx_coalesce_usecs = 50;
3810 nn->tx_coalesce_max_frames = 64;
3813 static void nfp_net_netdev_init(struct nfp_net *nn)
3815 struct net_device *netdev = nn->dp.netdev;
3817 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
3819 netdev->mtu = nn->dp.mtu;
3821 /* Advertise/enable offloads based on capabilities
3823 * Note: netdev->features show the currently enabled features
3824 * and netdev->hw_features advertises which features are
3825 * supported. By default we enable most features.
3827 if (nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
3828 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3830 netdev->hw_features = NETIF_F_HIGHDMA;
3831 if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY) {
3832 netdev->hw_features |= NETIF_F_RXCSUM;
3833 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
3835 if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
3836 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3837 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
3839 if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
3840 netdev->hw_features |= NETIF_F_SG;
3841 nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
3843 if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
3844 nn->cap & NFP_NET_CFG_CTRL_LSO2) {
3845 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3846 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
3847 NFP_NET_CFG_CTRL_LSO;
3849 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
3850 netdev->hw_features |= NETIF_F_RXHASH;
3851 if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) {
3852 if (nn->cap & NFP_NET_CFG_CTRL_LSO)
3853 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
3854 nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN;
3856 if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
3857 if (nn->cap & NFP_NET_CFG_CTRL_LSO)
3858 netdev->hw_features |= NETIF_F_GSO_GRE;
3859 nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE;
3861 if (nn->cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE))
3862 netdev->hw_enc_features = netdev->hw_features;
3864 netdev->vlan_features = netdev->hw_features;
3866 if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
3867 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
3868 nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
3870 if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
3871 if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
3872 nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
3874 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
3875 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
3878 if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) {
3879 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3880 nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
3883 netdev->features = netdev->hw_features;
3885 if (nfp_app_has_tc(nn->app) && nn->port)
3886 netdev->hw_features |= NETIF_F_HW_TC;
3888 /* Advertise but disable TSO by default. */
3889 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
3890 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
3892 /* Finalise the netdev setup */
3893 netdev->netdev_ops = &nfp_net_netdev_ops;
3894 netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
3896 /* MTU range: 68 - hw-specific max */
3897 netdev->min_mtu = ETH_MIN_MTU;
3898 netdev->max_mtu = nn->max_mtu;
3900 netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
3902 netif_carrier_off(netdev);
3904 nfp_net_set_ethtool_ops(netdev);
3907 static int nfp_net_read_caps(struct nfp_net *nn)
3909 /* Get some of the read-only fields from the BAR */
3910 nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
3911 nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
3913 /* ABI 4.x and ctrl vNIC always use chained metadata, in other cases
3914 * we allow use of non-chained metadata if RSS(v1) is the only
3915 * advertised capability requiring metadata.
3917 nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
3919 !(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
3920 nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
3921 /* RSS(v1) uses non-chained metadata format, except in ABI 4.x where
3922 * it has the same meaning as RSSv2.
3924 if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
3925 nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
3927 /* Determine RX packet/metadata boundary offset */
3928 if (nn->fw_ver.major >= 2) {
3931 reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
3932 if (reg > NFP_NET_MAX_PREPEND) {
3933 nn_err(nn, "Invalid rx offset: %d\n", reg);
3936 nn->dp.rx_offset = reg;
3938 nn->dp.rx_offset = NFP_NET_RX_OFFSET;
3941 /* For control vNICs mask out the capabilities app doesn't want. */
3943 nn->cap &= nn->app->type->ctrl_cap_mask;
3949 * nfp_net_init() - Initialise/finalise the nfp_net structure
3950 * @nn: NFP Net device structure
3952 * Return: 0 on success or negative errno on error.
3954 int nfp_net_init(struct nfp_net *nn)
3958 nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
3960 err = nfp_net_read_caps(nn);
3964 /* Set default MTU and Freelist buffer size */
3965 if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) {
3966 if (nn->app->ctrl_mtu <= nn->max_mtu) {
3967 nn->dp.mtu = nn->app->ctrl_mtu;
3969 if (nn->app->ctrl_mtu != NFP_APP_CTRL_MTU_MAX)
3970 nn_warn(nn, "app requested MTU above max supported %u > %u\n",
3971 nn->app->ctrl_mtu, nn->max_mtu);
3972 nn->dp.mtu = nn->max_mtu;
3974 } else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) {
3975 nn->dp.mtu = nn->max_mtu;
3977 nn->dp.mtu = NFP_NET_DEFAULT_MTU;
3979 nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
3981 if (nfp_app_ctrl_uses_data_vnics(nn->app))
3982 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_CMSG_DATA;
3984 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
3985 nfp_net_rss_init(nn);
3986 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
3987 NFP_NET_CFG_CTRL_RSS;
3990 /* Allow L2 Broadcast and Multicast through by default, if supported */
3991 if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
3992 nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
3994 /* Allow IRQ moderation, if supported */
3995 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
3996 nfp_net_irqmod_init(nn);
3997 nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
4000 /* Stash the re-configuration queue away. First odd queue in TX Bar */
4001 nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
4003 /* Make sure the FW knows the netdev is supposed to be disabled here */
4004 nn_writel(nn, NFP_NET_CFG_CTRL, 0);
4005 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
4006 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
4007 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
4008 NFP_NET_CFG_UPDATE_GEN);
4013 nfp_net_netdev_init(nn);
4015 nfp_net_vecs_init(nn);
4019 return register_netdev(nn->dp.netdev);
4023 * nfp_net_clean() - Undo what nfp_net_init() did.
4024 * @nn: NFP Net device structure
4026 void nfp_net_clean(struct nfp_net *nn)
4031 unregister_netdev(nn->dp.netdev);
4032 nfp_net_reconfig_wait_posted(nn);