1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2019 Netronome Systems, Inc. */
4 #include <linux/bpf_trace.h>
5 #include <linux/netdevice.h>
6 #include <linux/overflow.h>
7 #include <linux/sizes.h>
8 #include <linux/bitfield.h>
10 #include "../nfp_app.h"
11 #include "../nfp_net.h"
12 #include "../nfp_net_dp.h"
13 #include "../crypto/crypto.h"
14 #include "../crypto/fw.h"
17 static int nfp_nfdk_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
19 return !nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT * 2);
22 static int nfp_nfdk_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
24 return nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT);
27 static void nfp_nfdk_tx_ring_stop(struct netdev_queue *nd_q,
28 struct nfp_net_tx_ring *tx_ring)
30 netif_tx_stop_queue(nd_q);
32 /* We can race with the TX completion out of NAPI so recheck */
34 if (unlikely(nfp_nfdk_tx_ring_should_wake(tx_ring)))
35 netif_tx_start_queue(nd_q);
39 nfp_nfdk_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfdk_tx_buf *txbuf,
42 u32 segs, hdrlen, l3_offset, l4_offset;
43 struct nfp_nfdk_tx_desc txd;
46 if (!skb->encapsulation) {
47 l3_offset = skb_network_offset(skb);
48 l4_offset = skb_transport_offset(skb);
49 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
51 l3_offset = skb_inner_network_offset(skb);
52 l4_offset = skb_inner_transport_offset(skb);
53 hdrlen = skb_inner_transport_header(skb) - skb->data +
54 inner_tcp_hdrlen(skb);
57 segs = skb_shinfo(skb)->gso_segs;
58 mss = skb_shinfo(skb)->gso_size & NFDK_DESC_TX_MSS_MASK;
60 /* Note: TSO of the packet with metadata prepended to skb is not
61 * supported yet, in which case l3/l4_offset and lso_hdrlen need
62 * be correctly handled here.
64 * The driver doesn't have md_bytes easily available at this point.
65 * The PCI.IN PD ME won't have md_bytes bytes to add to lso_hdrlen,
66 * so it needs the full length there. The app MEs might prefer
67 * l3_offset and l4_offset relative to the start of packet data,
68 * but could probably cope with it being relative to the CTM buf
71 txd.l3_offset = l3_offset;
72 txd.l4_offset = l4_offset;
74 txd.mss = cpu_to_le16(mss);
75 txd.lso_hdrlen = hdrlen;
76 txd.lso_totsegs = segs;
78 txbuf->pkt_cnt = segs;
79 txbuf->real_len = skb->len + hdrlen * (txbuf->pkt_cnt - 1);
81 u64_stats_update_begin(&r_vec->tx_sync);
83 u64_stats_update_end(&r_vec->tx_sync);
89 nfp_nfdk_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
90 unsigned int pkt_cnt, struct sk_buff *skb, u64 flags)
92 struct ipv6hdr *ipv6h;
95 if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
98 if (skb->ip_summed != CHECKSUM_PARTIAL)
101 flags |= NFDK_DESC_TX_L4_CSUM;
103 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
104 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
106 /* L3 checksum offloading flag is not required for ipv6 */
107 if (iph->version == 4) {
108 flags |= NFDK_DESC_TX_L3_CSUM;
109 } else if (ipv6h->version != 6) {
110 nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
114 u64_stats_update_begin(&r_vec->tx_sync);
115 if (!skb->encapsulation) {
116 r_vec->hw_csum_tx += pkt_cnt;
118 flags |= NFDK_DESC_TX_ENCAP;
119 r_vec->hw_csum_tx_inner += pkt_cnt;
121 u64_stats_update_end(&r_vec->tx_sync);
127 nfp_nfdk_tx_maybe_close_block(struct nfp_net_tx_ring *tx_ring,
128 unsigned int nr_frags, struct sk_buff *skb)
130 unsigned int n_descs, wr_p, nop_slots;
131 const skb_frag_t *frag, *fend;
132 struct nfp_nfdk_tx_desc *txd;
137 n_descs = nfp_nfdk_headlen_to_segs(skb_headlen(skb));
139 frag = skb_shinfo(skb)->frags;
140 fend = frag + nr_frags;
141 for (; frag < fend; frag++)
142 n_descs += DIV_ROUND_UP(skb_frag_size(frag),
143 NFDK_TX_MAX_DATA_PER_DESC);
145 if (unlikely(n_descs > NFDK_TX_DESC_GATHER_MAX)) {
146 if (skb_is_nonlinear(skb)) {
147 err = skb_linearize(skb);
155 /* Under count by 1 (don't count meta) for the round down to work out */
156 n_descs += !!skb_is_gso(skb);
158 if (round_down(tx_ring->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
159 round_down(tx_ring->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT))
162 if ((u32)tx_ring->data_pending + skb->len > NFDK_TX_MAX_DATA_PER_BLOCK)
168 wr_p = tx_ring->wr_p;
169 nop_slots = D_BLOCK_CPL(wr_p);
171 wr_idx = D_IDX(tx_ring, wr_p);
172 tx_ring->ktxbufs[wr_idx].skb = NULL;
173 txd = &tx_ring->ktxds[wr_idx];
175 memset(txd, 0, array_size(nop_slots, sizeof(struct nfp_nfdk_tx_desc)));
177 tx_ring->data_pending = 0;
178 tx_ring->wr_p += nop_slots;
179 tx_ring->wr_ptr_add += nop_slots;
184 static int nfp_nfdk_prep_port_id(struct sk_buff *skb)
186 struct metadata_dst *md_dst = skb_metadata_dst(skb);
191 if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
194 /* Note: Unsupported case when TSO a skb with metedata prepended.
195 * See the comments in `nfp_nfdk_tx_tso` for details.
197 if (unlikely(md_dst && skb_is_gso(skb)))
200 if (unlikely(skb_cow_head(skb, sizeof(md_dst->u.port_info.port_id))))
203 data = skb_push(skb, sizeof(md_dst->u.port_info.port_id));
204 put_unaligned_be32(md_dst->u.port_info.port_id, data);
206 return sizeof(md_dst->u.port_info.port_id);
210 nfp_nfdk_prep_tx_meta(struct nfp_app *app, struct sk_buff *skb,
211 struct nfp_net_r_vector *r_vec)
217 res = nfp_nfdk_prep_port_id(skb);
218 if (unlikely(res <= 0))
222 meta_id = NFP_NET_META_PORTID;
224 if (unlikely(skb_cow_head(skb, sizeof(meta_id))))
227 md_bytes += sizeof(meta_id);
229 meta_id = FIELD_PREP(NFDK_META_LEN, md_bytes) |
230 FIELD_PREP(NFDK_META_FIELDS, meta_id);
232 data = skb_push(skb, sizeof(meta_id));
233 put_unaligned_be32(meta_id, data);
235 return NFDK_DESC_TX_CHAIN_META;
239 * nfp_nfdk_tx() - Main transmit entry point
240 * @skb: SKB to transmit
241 * @netdev: netdev structure
243 * Return: NETDEV_TX_OK on success.
245 netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
247 struct nfp_net *nn = netdev_priv(netdev);
248 struct nfp_nfdk_tx_buf *txbuf, *etxbuf;
249 u32 cnt, tmp_dlen, dlen_type = 0;
250 struct nfp_net_tx_ring *tx_ring;
251 struct nfp_net_r_vector *r_vec;
252 const skb_frag_t *frag, *fend;
253 struct nfp_nfdk_tx_desc *txd;
254 unsigned int real_len, qidx;
255 unsigned int dma_len, type;
256 struct netdev_queue *nd_q;
257 struct nfp_net_dp *dp;
258 int nr_frags, wr_idx;
263 qidx = skb_get_queue_mapping(skb);
264 tx_ring = &dp->tx_rings[qidx];
265 r_vec = tx_ring->r_vec;
266 nd_q = netdev_get_tx_queue(dp->netdev, qidx);
268 /* Don't bother counting frags, assume the worst */
269 if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
270 nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
271 qidx, tx_ring->wr_p, tx_ring->rd_p);
272 netif_tx_stop_queue(nd_q);
273 nfp_net_tx_xmit_more_flush(tx_ring);
274 u64_stats_update_begin(&r_vec->tx_sync);
276 u64_stats_update_end(&r_vec->tx_sync);
277 return NETDEV_TX_BUSY;
280 metadata = nfp_nfdk_prep_tx_meta(nn->app, skb, r_vec);
281 if (unlikely((int)metadata < 0))
284 nr_frags = skb_shinfo(skb)->nr_frags;
285 if (nfp_nfdk_tx_maybe_close_block(tx_ring, nr_frags, skb))
289 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
290 txd = &tx_ring->ktxds[wr_idx];
291 txbuf = &tx_ring->ktxbufs[wr_idx];
293 dma_len = skb_headlen(skb);
295 type = NFDK_DESC_TX_TYPE_TSO;
296 else if (!nr_frags && dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
297 type = NFDK_DESC_TX_TYPE_SIMPLE;
299 type = NFDK_DESC_TX_TYPE_GATHER;
301 dma_addr = dma_map_single(dp->dev, skb->data, dma_len, DMA_TO_DEVICE);
302 if (dma_mapping_error(dp->dev, dma_addr))
308 txbuf->dma_addr = dma_addr;
311 /* FIELD_PREP() implicitly truncates to chunk */
313 dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
314 FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
316 txd->dma_len_type = cpu_to_le16(dlen_type);
317 nfp_desc_set_dma_addr(txd, dma_addr);
319 /* starts at bit 0 */
320 BUILD_BUG_ON(!(NFDK_DESC_TX_DMA_LEN_HEAD & 1));
322 /* Preserve the original dlen_type, this way below the EOP logic
325 tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
327 dma_addr += tmp_dlen + 1;
330 /* The rest of the data (if any) will be in larger dma descritors
331 * and is handled with the fragment loop.
333 frag = skb_shinfo(skb)->frags;
334 fend = frag + nr_frags;
337 while (dma_len > 0) {
339 dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
341 txd->dma_len_type = cpu_to_le16(dlen_type);
342 nfp_desc_set_dma_addr(txd, dma_addr);
344 dma_len -= dlen_type;
345 dma_addr += dlen_type + 1;
352 dma_len = skb_frag_size(frag);
353 dma_addr = skb_frag_dma_map(dp->dev, frag, 0, dma_len,
355 if (dma_mapping_error(dp->dev, dma_addr))
358 txbuf->dma_addr = dma_addr;
364 (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
366 if (!skb_is_gso(skb)) {
369 metadata = nfp_nfdk_tx_csum(dp, r_vec, 1, skb, metadata);
370 txd->raw = cpu_to_le64(metadata);
373 /* lso desc should be placed after metadata desc */
374 (txd + 1)->raw = nfp_nfdk_tx_tso(r_vec, txbuf, skb);
375 real_len = txbuf->real_len;
377 metadata = nfp_nfdk_tx_csum(dp, r_vec, txbuf->pkt_cnt, skb, metadata);
378 txd->raw = cpu_to_le64(metadata);
383 cnt = txd - tx_ring->ktxds - wr_idx;
384 if (unlikely(round_down(wr_idx, NFDK_TX_DESC_BLOCK_CNT) !=
385 round_down(wr_idx + cnt - 1, NFDK_TX_DESC_BLOCK_CNT)))
386 goto err_warn_overflow;
388 skb_tx_timestamp(skb);
390 tx_ring->wr_p += cnt;
391 if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
392 tx_ring->data_pending += skb->len;
394 tx_ring->data_pending = 0;
396 if (nfp_nfdk_tx_ring_should_stop(tx_ring))
397 nfp_nfdk_tx_ring_stop(nd_q, tx_ring);
399 tx_ring->wr_ptr_add += cnt;
400 if (__netdev_tx_sent_queue(nd_q, real_len, netdev_xmit_more()))
401 nfp_net_tx_xmit_more_flush(tx_ring);
406 WARN_ONCE(1, "unable to fit packet into a descriptor wr_idx:%d head:%d frags:%d cnt:%d",
407 wr_idx, skb_headlen(skb), nr_frags, cnt);
411 /* txbuf pointed to the next-to-use */
413 /* first txbuf holds the skb */
414 txbuf = &tx_ring->ktxbufs[wr_idx + 1];
415 if (txbuf < etxbuf) {
416 dma_unmap_single(dp->dev, txbuf->dma_addr,
417 skb_headlen(skb), DMA_TO_DEVICE);
421 frag = skb_shinfo(skb)->frags;
422 while (etxbuf < txbuf) {
423 dma_unmap_page(dp->dev, txbuf->dma_addr,
424 skb_frag_size(frag), DMA_TO_DEVICE);
430 nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
432 nfp_net_tx_xmit_more_flush(tx_ring);
433 u64_stats_update_begin(&r_vec->tx_sync);
435 u64_stats_update_end(&r_vec->tx_sync);
436 dev_kfree_skb_any(skb);
441 * nfp_nfdk_tx_complete() - Handled completed TX packets
442 * @tx_ring: TX ring structure
443 * @budget: NAPI budget (only used as bool to determine if in NAPI context)
445 static void nfp_nfdk_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
447 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
448 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
449 u32 done_pkts = 0, done_bytes = 0;
450 struct nfp_nfdk_tx_buf *ktxbufs;
451 struct device *dev = dp->dev;
452 struct netdev_queue *nd_q;
456 rd_p = tx_ring->rd_p;
457 if (tx_ring->wr_p == rd_p)
460 /* Work out how many descriptors have been transmitted */
461 qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
463 if (qcp_rd_p == tx_ring->qcp_rd_p)
466 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
467 ktxbufs = tx_ring->ktxbufs;
470 const skb_frag_t *frag, *fend;
471 unsigned int size, n_descs = 1;
472 struct nfp_nfdk_tx_buf *txbuf;
475 txbuf = &ktxbufs[D_IDX(tx_ring, rd_p)];
481 n_descs = D_BLOCK_CPL(rd_p);
486 size = skb_headlen(skb);
487 n_descs += nfp_nfdk_headlen_to_segs(size);
488 dma_unmap_single(dev, txbuf->dma_addr, size, DMA_TO_DEVICE);
492 frag = skb_shinfo(skb)->frags;
493 fend = frag + skb_shinfo(skb)->nr_frags;
494 for (; frag < fend; frag++) {
495 size = skb_frag_size(frag);
496 n_descs += DIV_ROUND_UP(size,
497 NFDK_TX_MAX_DATA_PER_DESC);
498 dma_unmap_page(dev, txbuf->dma_addr,
499 skb_frag_size(frag), DMA_TO_DEVICE);
503 if (!skb_is_gso(skb)) {
504 done_bytes += skb->len;
507 done_bytes += txbuf->real_len;
508 done_pkts += txbuf->pkt_cnt;
512 napi_consume_skb(skb, budget);
518 tx_ring->rd_p = rd_p;
519 tx_ring->qcp_rd_p = qcp_rd_p;
521 u64_stats_update_begin(&r_vec->tx_sync);
522 r_vec->tx_bytes += done_bytes;
523 r_vec->tx_pkts += done_pkts;
524 u64_stats_update_end(&r_vec->tx_sync);
529 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
530 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
531 if (nfp_nfdk_tx_ring_should_wake(tx_ring)) {
532 /* Make sure TX thread will see updated tx_ring->rd_p */
535 if (unlikely(netif_tx_queue_stopped(nd_q)))
536 netif_tx_wake_queue(nd_q);
539 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
540 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
541 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
544 static bool nfp_nfdk_xdp_complete(struct nfp_net_tx_ring *tx_ring)
549 /* Receive processing */
551 nfp_nfdk_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
556 frag = napi_alloc_frag(dp->fl_bufsz);
562 page = dev_alloc_page();
565 frag = page_address(page);
568 *dma_addr = nfp_net_dma_map_rx(dp, frag);
569 if (dma_mapping_error(dp->dev, *dma_addr)) {
570 nfp_net_free_frag(frag, dp->xdp_prog);
571 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
579 * nfp_nfdk_rx_give_one() - Put mapped skb on the software and hardware rings
580 * @dp: NFP Net data path struct
581 * @rx_ring: RX ring structure
582 * @frag: page fragment buffer
583 * @dma_addr: DMA address of skb mapping
586 nfp_nfdk_rx_give_one(const struct nfp_net_dp *dp,
587 struct nfp_net_rx_ring *rx_ring,
588 void *frag, dma_addr_t dma_addr)
592 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
594 nfp_net_dma_sync_dev_rx(dp, dma_addr);
596 /* Stash SKB and DMA address away */
597 rx_ring->rxbufs[wr_idx].frag = frag;
598 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
600 /* Fill freelist descriptor */
601 rx_ring->rxds[wr_idx].fld.reserved = 0;
602 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
603 nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
604 dma_addr + dp->rx_dma_off);
607 if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
608 /* Update write pointer of the freelist queue. Make
609 * sure all writes are flushed before telling the hardware.
612 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
617 * nfp_nfdk_rx_ring_fill_freelist() - Give buffers from the ring to FW
618 * @dp: NFP Net data path struct
619 * @rx_ring: RX ring to fill
621 void nfp_nfdk_rx_ring_fill_freelist(struct nfp_net_dp *dp,
622 struct nfp_net_rx_ring *rx_ring)
626 for (i = 0; i < rx_ring->cnt - 1; i++)
627 nfp_nfdk_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
628 rx_ring->rxbufs[i].dma_addr);
632 * nfp_nfdk_rx_csum_has_errors() - group check if rxd has any csum errors
633 * @flags: RX descriptor flags field in CPU byte order
635 static int nfp_nfdk_rx_csum_has_errors(u16 flags)
637 u16 csum_all_checked, csum_all_ok;
639 csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
640 csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
642 return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
646 * nfp_nfdk_rx_csum() - set SKB checksum field based on RX descriptor flags
647 * @dp: NFP Net data path struct
648 * @r_vec: per-ring structure
649 * @rxd: Pointer to RX descriptor
650 * @meta: Parsed metadata prepend
651 * @skb: Pointer to SKB
654 nfp_nfdk_rx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
655 struct nfp_net_rx_desc *rxd, struct nfp_meta_parsed *meta,
658 skb_checksum_none_assert(skb);
660 if (!(dp->netdev->features & NETIF_F_RXCSUM))
663 if (meta->csum_type) {
664 skb->ip_summed = meta->csum_type;
665 skb->csum = meta->csum;
666 u64_stats_update_begin(&r_vec->rx_sync);
667 r_vec->hw_csum_rx_complete++;
668 u64_stats_update_end(&r_vec->rx_sync);
672 if (nfp_nfdk_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
673 u64_stats_update_begin(&r_vec->rx_sync);
674 r_vec->hw_csum_rx_error++;
675 u64_stats_update_end(&r_vec->rx_sync);
679 /* Assume that the firmware will never report inner CSUM_OK unless outer
680 * L4 headers were successfully parsed. FW will always report zero UDP
681 * checksum as CSUM_OK.
683 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
684 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
685 __skb_incr_checksum_unnecessary(skb);
686 u64_stats_update_begin(&r_vec->rx_sync);
687 r_vec->hw_csum_rx_ok++;
688 u64_stats_update_end(&r_vec->rx_sync);
691 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
692 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
693 __skb_incr_checksum_unnecessary(skb);
694 u64_stats_update_begin(&r_vec->rx_sync);
695 r_vec->hw_csum_rx_inner_ok++;
696 u64_stats_update_end(&r_vec->rx_sync);
701 nfp_nfdk_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
702 unsigned int type, __be32 *hash)
704 if (!(netdev->features & NETIF_F_RXHASH))
708 case NFP_NET_RSS_IPV4:
709 case NFP_NET_RSS_IPV6:
710 case NFP_NET_RSS_IPV6_EX:
711 meta->hash_type = PKT_HASH_TYPE_L3;
714 meta->hash_type = PKT_HASH_TYPE_L4;
718 meta->hash = get_unaligned_be32(hash);
722 nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
723 void *data, void *pkt, unsigned int pkt_len, int meta_len)
727 meta_info = get_unaligned_be32(data);
731 switch (meta_info & NFP_NET_META_FIELD_MASK) {
732 case NFP_NET_META_HASH:
733 meta_info >>= NFP_NET_META_FIELD_SIZE;
734 nfp_nfdk_set_hash(netdev, meta,
735 meta_info & NFP_NET_META_FIELD_MASK,
739 case NFP_NET_META_MARK:
740 meta->mark = get_unaligned_be32(data);
743 case NFP_NET_META_PORTID:
744 meta->portid = get_unaligned_be32(data);
747 case NFP_NET_META_CSUM:
748 meta->csum_type = CHECKSUM_COMPLETE;
750 (__force __wsum)__get_unaligned_cpu32(data);
753 case NFP_NET_META_RESYNC_INFO:
754 if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
757 data += sizeof(struct nfp_net_tls_resync_req);
763 meta_info >>= NFP_NET_META_FIELD_SIZE;
770 nfp_nfdk_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
771 struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
774 u64_stats_update_begin(&r_vec->rx_sync);
776 /* If we have both skb and rxbuf the replacement buffer allocation
777 * must have failed, count this as an alloc failure.
780 r_vec->rx_replace_buf_alloc_fail++;
781 u64_stats_update_end(&r_vec->rx_sync);
783 /* skb is build based on the frag, free_skb() would free the frag
784 * so to be able to reuse it we need an extra ref.
786 if (skb && rxbuf && skb->head == rxbuf->frag)
787 page_ref_inc(virt_to_head_page(rxbuf->frag));
789 nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
791 dev_kfree_skb_any(skb);
795 * nfp_nfdk_rx() - receive up to @budget packets on @rx_ring
796 * @rx_ring: RX ring to receive from
797 * @budget: NAPI budget
799 * Note, this function is separated out from the napi poll function to
800 * more cleanly separate packet receive code from other bookkeeping
801 * functions performed in the napi poll function.
803 * Return: Number of packets received.
805 static int nfp_nfdk_rx(struct nfp_net_rx_ring *rx_ring, int budget)
807 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
808 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
809 struct nfp_net_tx_ring *tx_ring;
810 struct bpf_prog *xdp_prog;
811 bool xdp_tx_cmpl = false;
812 unsigned int true_bufsz;
818 xdp_prog = READ_ONCE(dp->xdp_prog);
819 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
820 xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
822 tx_ring = r_vec->xdp_ring;
824 while (pkts_polled < budget) {
825 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
826 struct nfp_net_rx_buf *rxbuf;
827 struct nfp_net_rx_desc *rxd;
828 struct nfp_meta_parsed meta;
829 bool redir_egress = false;
830 struct net_device *netdev;
831 dma_addr_t new_dma_addr;
832 u32 meta_len_xdp = 0;
835 idx = D_IDX(rx_ring, rx_ring->rd_p);
837 rxd = &rx_ring->rxds[idx];
838 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
841 /* Memory barrier to ensure that we won't do other reads
846 memset(&meta, 0, sizeof(meta));
851 rxbuf = &rx_ring->rxbufs[idx];
853 * <-- [rx_offset] -->
854 * ---------------------------------------------------------
855 * | [XX] | metadata | packet | XXXX |
856 * ---------------------------------------------------------
857 * <---------------- data_len --------------->
859 * The rx_offset is fixed for all packets, the meta_len can vary
860 * on a packet by packet basis. If rx_offset is set to zero
861 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
862 * buffer and is immediately followed by the packet (no [XX]).
864 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
865 data_len = le16_to_cpu(rxd->rxd.data_len);
866 pkt_len = data_len - meta_len;
868 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
869 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
872 pkt_off += dp->rx_offset;
873 meta_off = pkt_off - meta_len;
876 u64_stats_update_begin(&r_vec->rx_sync);
878 r_vec->rx_bytes += pkt_len;
879 u64_stats_update_end(&r_vec->rx_sync);
881 if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
882 (dp->rx_offset && meta_len > dp->rx_offset))) {
883 nn_dp_warn(dp, "oversized RX packet metadata %u\n",
885 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
889 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
893 if (unlikely(nfp_nfdk_parse_meta(dp->netdev, &meta,
894 rxbuf->frag + meta_off,
895 rxbuf->frag + pkt_off,
896 pkt_len, meta_len))) {
897 nn_dp_warn(dp, "invalid RX packet metadata\n");
898 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf,
904 if (xdp_prog && !meta.portid) {
905 void *orig_data = rxbuf->frag + pkt_off;
908 xdp_prepare_buff(&xdp,
909 rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
910 pkt_off - NFP_NET_RX_BUF_HEADROOM,
913 act = bpf_prog_run_xdp(xdp_prog, &xdp);
915 pkt_len = xdp.data_end - xdp.data;
916 pkt_off += xdp.data - orig_data;
920 meta_len_xdp = xdp.data - xdp.data_meta;
923 bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
926 trace_xdp_exception(dp->netdev, xdp_prog, act);
929 nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag,
935 if (likely(!meta.portid)) {
937 } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
938 struct nfp_net *nn = netdev_priv(dp->netdev);
940 nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
942 nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag,
948 nn = netdev_priv(dp->netdev);
949 netdev = nfp_app_dev_get(nn->app, meta.portid,
951 if (unlikely(!netdev)) {
952 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf,
957 if (nfp_netdev_is_nfp_repr(netdev))
958 nfp_repr_inc_rx_stats(netdev, pkt_len);
961 skb = build_skb(rxbuf->frag, true_bufsz);
962 if (unlikely(!skb)) {
963 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
966 new_frag = nfp_nfdk_napi_alloc_one(dp, &new_dma_addr);
967 if (unlikely(!new_frag)) {
968 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
972 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
974 nfp_nfdk_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
976 skb_reserve(skb, pkt_off);
977 skb_put(skb, pkt_len);
979 skb->mark = meta.mark;
980 skb_set_hash(skb, meta.hash, meta.hash_type);
982 skb_record_rx_queue(skb, rx_ring->idx);
983 skb->protocol = eth_type_trans(skb, netdev);
985 nfp_nfdk_rx_csum(dp, r_vec, rxd, &meta, skb);
987 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
988 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
989 le16_to_cpu(rxd->rxd.vlan));
991 skb_metadata_set(skb, meta_len_xdp);
993 if (likely(!redir_egress)) {
994 napi_gro_receive(&rx_ring->r_vec->napi, skb);
997 skb_reset_network_header(skb);
998 __skb_push(skb, ETH_HLEN);
1004 if (tx_ring->wr_ptr_add)
1005 nfp_net_tx_xmit_more_flush(tx_ring);
1006 else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
1008 if (!nfp_nfdk_xdp_complete(tx_ring))
1009 pkts_polled = budget;
1016 * nfp_nfdk_poll() - napi poll function
1017 * @napi: NAPI structure
1018 * @budget: NAPI budget
1020 * Return: number of packets polled.
1022 int nfp_nfdk_poll(struct napi_struct *napi, int budget)
1024 struct nfp_net_r_vector *r_vec =
1025 container_of(napi, struct nfp_net_r_vector, napi);
1026 unsigned int pkts_polled = 0;
1029 nfp_nfdk_tx_complete(r_vec->tx_ring, budget);
1031 pkts_polled = nfp_nfdk_rx(r_vec->rx_ring, budget);
1033 if (pkts_polled < budget)
1034 if (napi_complete_done(napi, pkts_polled))
1035 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
1037 if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
1038 struct dim_sample dim_sample = {};
1043 start = u64_stats_fetch_begin(&r_vec->rx_sync);
1044 pkts = r_vec->rx_pkts;
1045 bytes = r_vec->rx_bytes;
1046 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
1048 dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
1049 net_dim(&r_vec->rx_dim, dim_sample);
1052 if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
1053 struct dim_sample dim_sample = {};
1058 start = u64_stats_fetch_begin(&r_vec->tx_sync);
1059 pkts = r_vec->tx_pkts;
1060 bytes = r_vec->tx_bytes;
1061 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
1063 dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
1064 net_dim(&r_vec->tx_dim, dim_sample);
1070 /* Control device data path
1074 nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1075 struct sk_buff *skb, bool old)
1077 u32 cnt, tmp_dlen, dlen_type = 0;
1078 struct nfp_net_tx_ring *tx_ring;
1079 struct nfp_nfdk_tx_buf *txbuf;
1080 struct nfp_nfdk_tx_desc *txd;
1081 unsigned int dma_len, type;
1082 struct nfp_net_dp *dp;
1083 dma_addr_t dma_addr;
1087 dp = &r_vec->nfp_net->dp;
1088 tx_ring = r_vec->tx_ring;
1090 if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
1091 nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
1095 /* Don't bother counting frags, assume the worst */
1096 if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
1097 u64_stats_update_begin(&r_vec->tx_sync);
1099 u64_stats_update_end(&r_vec->tx_sync);
1101 __skb_queue_tail(&r_vec->queue, skb);
1103 __skb_queue_head(&r_vec->queue, skb);
1104 return NETDEV_TX_BUSY;
1107 if (nfp_app_ctrl_has_meta(nn->app)) {
1108 if (unlikely(skb_headroom(skb) < 8)) {
1109 nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
1112 metadata = NFDK_DESC_TX_CHAIN_META;
1113 put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
1114 put_unaligned_be32(FIELD_PREP(NFDK_META_LEN, 8) |
1115 FIELD_PREP(NFDK_META_FIELDS,
1116 NFP_NET_META_PORTID),
1120 if (nfp_nfdk_tx_maybe_close_block(tx_ring, 0, skb))
1124 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
1125 txd = &tx_ring->ktxds[wr_idx];
1126 txbuf = &tx_ring->ktxbufs[wr_idx];
1128 dma_len = skb_headlen(skb);
1129 if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
1130 type = NFDK_DESC_TX_TYPE_SIMPLE;
1132 type = NFDK_DESC_TX_TYPE_GATHER;
1134 dma_addr = dma_map_single(dp->dev, skb->data, dma_len, DMA_TO_DEVICE);
1135 if (dma_mapping_error(dp->dev, dma_addr))
1141 txbuf->dma_addr = dma_addr;
1145 dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
1146 FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
1148 txd->dma_len_type = cpu_to_le16(dlen_type);
1149 nfp_desc_set_dma_addr(txd, dma_addr);
1151 tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
1152 dma_len -= tmp_dlen;
1153 dma_addr += tmp_dlen + 1;
1156 while (dma_len > 0) {
1158 dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
1159 txd->dma_len_type = cpu_to_le16(dlen_type);
1160 nfp_desc_set_dma_addr(txd, dma_addr);
1162 dlen_type &= NFDK_DESC_TX_DMA_LEN;
1163 dma_len -= dlen_type;
1164 dma_addr += dlen_type + 1;
1168 (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
1171 txd->raw = cpu_to_le64(metadata);
1174 cnt = txd - tx_ring->ktxds - wr_idx;
1175 if (unlikely(round_down(wr_idx, NFDK_TX_DESC_BLOCK_CNT) !=
1176 round_down(wr_idx + cnt - 1, NFDK_TX_DESC_BLOCK_CNT)))
1177 goto err_warn_overflow;
1179 tx_ring->wr_p += cnt;
1180 if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
1181 tx_ring->data_pending += skb->len;
1183 tx_ring->data_pending = 0;
1185 tx_ring->wr_ptr_add += cnt;
1186 nfp_net_tx_xmit_more_flush(tx_ring);
1188 return NETDEV_TX_OK;
1191 WARN_ONCE(1, "unable to fit packet into a descriptor wr_idx:%d head:%d frags:%d cnt:%d",
1192 wr_idx, skb_headlen(skb), 0, cnt);
1194 dma_unmap_single(dp->dev, txbuf->dma_addr,
1195 skb_headlen(skb), DMA_TO_DEVICE);
1198 nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
1200 u64_stats_update_begin(&r_vec->tx_sync);
1202 u64_stats_update_end(&r_vec->tx_sync);
1203 dev_kfree_skb_any(skb);
1204 return NETDEV_TX_OK;
1207 static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
1209 struct sk_buff *skb;
1211 while ((skb = __skb_dequeue(&r_vec->queue)))
1212 if (nfp_nfdk_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
1217 nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
1219 u32 meta_type, meta_tag;
1221 if (!nfp_app_ctrl_has_meta(nn->app))
1227 meta_type = get_unaligned_be32(data);
1228 meta_tag = get_unaligned_be32(data + 4);
1230 return (meta_type == NFP_NET_META_PORTID &&
1231 meta_tag == NFP_META_PORT_ID_CTRL);
1235 nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
1236 struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
1238 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
1239 struct nfp_net_rx_buf *rxbuf;
1240 struct nfp_net_rx_desc *rxd;
1241 dma_addr_t new_dma_addr;
1242 struct sk_buff *skb;
1246 idx = D_IDX(rx_ring, rx_ring->rd_p);
1248 rxd = &rx_ring->rxds[idx];
1249 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
1252 /* Memory barrier to ensure that we won't do other reads
1253 * before the DD bit.
1259 rxbuf = &rx_ring->rxbufs[idx];
1260 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1261 data_len = le16_to_cpu(rxd->rxd.data_len);
1262 pkt_len = data_len - meta_len;
1264 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
1265 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1266 pkt_off += meta_len;
1268 pkt_off += dp->rx_offset;
1269 meta_off = pkt_off - meta_len;
1272 u64_stats_update_begin(&r_vec->rx_sync);
1274 r_vec->rx_bytes += pkt_len;
1275 u64_stats_update_end(&r_vec->rx_sync);
1277 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
1279 if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
1280 nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
1282 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1286 skb = build_skb(rxbuf->frag, dp->fl_bufsz);
1287 if (unlikely(!skb)) {
1288 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1291 new_frag = nfp_nfdk_napi_alloc_one(dp, &new_dma_addr);
1292 if (unlikely(!new_frag)) {
1293 nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
1297 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1299 nfp_nfdk_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1301 skb_reserve(skb, pkt_off);
1302 skb_put(skb, pkt_len);
1304 nfp_app_ctrl_rx(nn->app, skb);
1309 static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
1311 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
1312 struct nfp_net *nn = r_vec->nfp_net;
1313 struct nfp_net_dp *dp = &nn->dp;
1314 unsigned int budget = 512;
1316 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
1322 void nfp_nfdk_ctrl_poll(struct tasklet_struct *t)
1324 struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
1326 spin_lock(&r_vec->lock);
1327 nfp_nfdk_tx_complete(r_vec->tx_ring, 0);
1328 __nfp_ctrl_tx_queued(r_vec);
1329 spin_unlock(&r_vec->lock);
1331 if (nfp_ctrl_rx(r_vec)) {
1332 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
1334 tasklet_schedule(&r_vec->tasklet);
1335 nn_dp_warn(&r_vec->nfp_net->dp,
1336 "control message budget exceeded!\n");