1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. */
4 #include <linux/bpf_trace.h>
5 #include <linux/stringify.h>
6 #include <net/xdp_sock_drv.h>
10 #include "i40e_txrx_common.h"
13 int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
15 unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;
17 rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
18 return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
21 void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
23 memset(rx_ring->rx_bi_zc, 0,
24 sizeof(*rx_ring->rx_bi_zc) * rx_ring->count);
27 static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
29 return &rx_ring->rx_bi_zc[idx];
33 * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
37 * @qid: Rx ring to associate buffer pool with
39 * Returns 0 on success, <0 on failure
41 static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
42 struct xsk_buff_pool *pool,
45 struct net_device *netdev = vsi->netdev;
49 if (vsi->type != I40E_VSI_MAIN)
52 if (qid >= vsi->num_queue_pairs)
55 if (qid >= netdev->real_num_rx_queues ||
56 qid >= netdev->real_num_tx_queues)
59 err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
63 set_bit(qid, vsi->af_xdp_zc_qps);
65 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
68 err = i40e_queue_pair_disable(vsi, qid);
72 err = i40e_queue_pair_enable(vsi, qid);
76 /* Kick start the NAPI context so that receiving will start */
77 err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
86 * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
89 * @qid: Rx ring to associate buffer pool with
91 * Returns 0 on success, <0 on failure
93 static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
95 struct net_device *netdev = vsi->netdev;
96 struct xsk_buff_pool *pool;
100 pool = xsk_get_pool_from_qid(netdev, qid);
104 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
107 err = i40e_queue_pair_disable(vsi, qid);
112 clear_bit(qid, vsi->af_xdp_zc_qps);
113 xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
116 err = i40e_queue_pair_enable(vsi, qid);
125 * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
128 * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
129 * @qid: Rx ring to (dis)associate buffer pool (from)to
131 * This function enables or disables a buffer pool to a certain ring.
133 * Returns 0 on success, <0 on failure
135 int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
138 return pool ? i40e_xsk_pool_enable(vsi, pool, qid) :
139 i40e_xsk_pool_disable(vsi, qid);
143 * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
145 * @xdp: xdp_buff used as input to the XDP program
147 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
149 static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
151 int err, result = I40E_XDP_PASS;
152 struct i40e_ring *xdp_ring;
153 struct bpf_prog *xdp_prog;
157 /* NB! xdp_prog will always be !NULL, due to the fact that
158 * this path is enabled by setting an XDP program.
160 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
161 act = bpf_prog_run_xdp(xdp_prog, xdp);
163 if (likely(act == XDP_REDIRECT)) {
164 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
165 result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
174 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
175 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
178 bpf_warn_invalid_xdp_action(act);
181 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
182 fallthrough; /* handle aborts by dropping packet */
184 result = I40E_XDP_CONSUMED;
191 bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
193 u16 ntu = rx_ring->next_to_use;
194 union i40e_rx_desc *rx_desc;
195 struct xdp_buff **bi, *xdp;
199 rx_desc = I40E_RX_DESC(rx_ring, ntu);
200 bi = i40e_rx_bi(rx_ring, ntu);
202 xdp = xsk_buff_alloc(rx_ring->xsk_pool);
208 dma = xsk_buff_xdp_get_dma(xdp);
209 rx_desc->read.pkt_addr = cpu_to_le64(dma);
210 rx_desc->read.hdr_addr = 0;
216 if (unlikely(ntu == rx_ring->count)) {
217 rx_desc = I40E_RX_DESC(rx_ring, 0);
218 bi = i40e_rx_bi(rx_ring, 0);
224 if (rx_ring->next_to_use != ntu) {
225 /* clear the status bits for the next_to_use descriptor */
226 rx_desc->wb.qword1.status_error_len = 0;
227 i40e_release_rx_desc(rx_ring, ntu);
234 * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer
238 * This functions allocates a new skb from a zero-copy Rx buffer.
240 * Returns the skb, or NULL on failure.
242 static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
243 struct xdp_buff *xdp)
245 unsigned int metasize = xdp->data - xdp->data_meta;
246 unsigned int datasize = xdp->data_end - xdp->data;
249 /* allocate a skb to store the frags */
250 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
251 xdp->data_end - xdp->data_hard_start,
252 GFP_ATOMIC | __GFP_NOWARN);
256 skb_reserve(skb, xdp->data - xdp->data_hard_start);
257 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
259 skb_metadata_set(skb, metasize);
266 static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
267 struct xdp_buff *xdp_buff,
268 union i40e_rx_desc *rx_desc,
269 unsigned int *rx_packets,
270 unsigned int *rx_bytes,
272 unsigned int xdp_res)
279 if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
282 if (xdp_res == I40E_XDP_CONSUMED) {
283 xsk_buff_free(xdp_buff);
287 if (xdp_res == I40E_XDP_PASS) {
288 /* NB! We are not checking for errors using
289 * i40e_test_staterr with
290 * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
291 * SBP is *not* set in PRT_SBPVSI (default not set).
293 skb = i40e_construct_skb_zc(rx_ring, xdp_buff);
295 rx_ring->rx_stats.alloc_buff_failed++;
301 if (eth_skb_pad(skb)) {
307 *rx_bytes = skb->len;
308 i40e_process_skb_fields(rx_ring, rx_desc, skb);
309 napi_gro_receive(&rx_ring->q_vector->napi, skb);
313 /* Should never get here, as all valid cases have been handled already.
319 * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
321 * @budget: NAPI budget
323 * Returns amount of work completed
325 int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
327 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
328 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
329 u16 next_to_clean = rx_ring->next_to_clean;
330 u16 count_mask = rx_ring->count - 1;
331 unsigned int xdp_res, xdp_xmit = 0;
332 bool failure = false;
334 while (likely(total_rx_packets < (unsigned int)budget)) {
335 union i40e_rx_desc *rx_desc;
336 unsigned int rx_packets;
337 unsigned int rx_bytes;
342 rx_desc = I40E_RX_DESC(rx_ring, next_to_clean);
343 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
345 /* This memory barrier is needed to keep us from reading
346 * any other fields out of the rx_desc until we have
347 * verified the descriptor has been written back.
351 if (i40e_rx_is_programming_status(qword)) {
352 i40e_clean_programming_status(rx_ring,
353 rx_desc->raw.qword[0],
355 bi = *i40e_rx_bi(rx_ring, next_to_clean);
357 next_to_clean = (next_to_clean + 1) & count_mask;
361 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
362 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
366 bi = *i40e_rx_bi(rx_ring, next_to_clean);
367 bi->data_end = bi->data + size;
368 xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
370 xdp_res = i40e_run_xdp_zc(rx_ring, bi);
371 i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
372 &rx_bytes, size, xdp_res);
373 total_rx_packets += rx_packets;
374 total_rx_bytes += rx_bytes;
375 xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
376 next_to_clean = (next_to_clean + 1) & count_mask;
379 rx_ring->next_to_clean = next_to_clean;
380 cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
382 if (cleaned_count >= I40E_RX_BUFFER_WRITE)
383 failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
385 i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
386 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
388 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
389 if (failure || next_to_clean == rx_ring->next_to_use)
390 xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
392 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
394 return (int)total_rx_packets;
396 return failure ? budget : (int)total_rx_packets;
399 static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
400 unsigned int *total_bytes)
402 struct i40e_tx_desc *tx_desc;
405 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
406 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
408 tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
409 tx_desc->buffer_addr = cpu_to_le64(dma);
410 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP,
413 *total_bytes += desc->len;
416 static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
417 unsigned int *total_bytes)
419 u16 ntu = xdp_ring->next_to_use;
420 struct i40e_tx_desc *tx_desc;
424 loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
425 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr);
426 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len);
428 tx_desc = I40E_TX_DESC(xdp_ring, ntu++);
429 tx_desc->buffer_addr = cpu_to_le64(dma);
430 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC |
431 I40E_TX_DESC_CMD_EOP,
434 *total_bytes += desc[i].len;
437 xdp_ring->next_to_use = ntu;
440 static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts,
441 unsigned int *total_bytes)
443 u32 batched, leftover, i;
445 batched = nb_pkts & ~(PKTS_PER_BATCH - 1);
446 leftover = nb_pkts & (PKTS_PER_BATCH - 1);
447 for (i = 0; i < batched; i += PKTS_PER_BATCH)
448 i40e_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
449 for (i = batched; i < batched + leftover; i++)
450 i40e_xmit_pkt(xdp_ring, &descs[i], total_bytes);
453 static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
455 u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
456 struct i40e_tx_desc *tx_desc;
458 tx_desc = I40E_TX_DESC(xdp_ring, ntu);
459 tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
463 * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
464 * @xdp_ring: XDP Tx ring
465 * @budget: NAPI budget
467 * Returns true if the work is finished.
469 static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
471 struct xdp_desc *descs = xdp_ring->xsk_descs;
472 u32 nb_pkts, nb_processed = 0;
473 unsigned int total_bytes = 0;
475 nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
479 if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
480 nb_processed = xdp_ring->count - xdp_ring->next_to_use;
481 i40e_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
482 xdp_ring->next_to_use = 0;
485 i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
488 /* Request an interrupt for the last frame and bump tail ptr. */
489 i40e_set_rs_bit(xdp_ring);
490 i40e_xdp_ring_update_tail(xdp_ring);
492 i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes);
494 return nb_pkts < budget;
498 * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
499 * @tx_ring: XDP Tx ring
500 * @tx_bi: Tx buffer info to clean
502 static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
503 struct i40e_tx_buffer *tx_bi)
505 xdp_return_frame(tx_bi->xdpf);
506 tx_ring->xdp_tx_active--;
507 dma_unmap_single(tx_ring->dev,
508 dma_unmap_addr(tx_bi, dma),
509 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
510 dma_unmap_len_set(tx_bi, len, 0);
514 * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
516 * @tx_ring: XDP Tx ring
518 * Returns true if cleanup/tranmission is done.
520 bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
522 struct xsk_buff_pool *bp = tx_ring->xsk_pool;
523 u32 i, completed_frames, xsk_frames = 0;
524 u32 head_idx = i40e_get_head(tx_ring);
525 struct i40e_tx_buffer *tx_bi;
528 if (head_idx < tx_ring->next_to_clean)
529 head_idx += tx_ring->count;
530 completed_frames = head_idx - tx_ring->next_to_clean;
532 if (completed_frames == 0)
535 if (likely(!tx_ring->xdp_tx_active)) {
536 xsk_frames = completed_frames;
540 ntc = tx_ring->next_to_clean;
542 for (i = 0; i < completed_frames; i++) {
543 tx_bi = &tx_ring->tx_bi[ntc];
546 i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
552 if (++ntc >= tx_ring->count)
557 tx_ring->next_to_clean += completed_frames;
558 if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
559 tx_ring->next_to_clean -= tx_ring->count;
562 xsk_tx_completed(bp, xsk_frames);
564 i40e_arm_wb(tx_ring, vsi, completed_frames);
567 if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
568 xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
570 return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
574 * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup
575 * @dev: the netdevice
576 * @queue_id: queue id to wake up
577 * @flags: ignored in our case since we have Rx and Tx in the same NAPI.
579 * Returns <0 for errors, 0 otherwise.
581 int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
583 struct i40e_netdev_priv *np = netdev_priv(dev);
584 struct i40e_vsi *vsi = np->vsi;
585 struct i40e_pf *pf = vsi->back;
586 struct i40e_ring *ring;
588 if (test_bit(__I40E_CONFIG_BUSY, pf->state))
591 if (test_bit(__I40E_VSI_DOWN, vsi->state))
594 if (!i40e_enabled_xdp_vsi(vsi))
597 if (queue_id >= vsi->num_queue_pairs)
600 if (!vsi->xdp_rings[queue_id]->xsk_pool)
603 ring = vsi->xdp_rings[queue_id];
605 /* The idea here is that if NAPI is running, mark a miss, so
606 * it will run again. If not, trigger an interrupt and
607 * schedule the NAPI from interrupt context. If NAPI would be
608 * scheduled here, the interrupt affinity would not be
611 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
612 i40e_force_wb(vsi, ring->q_vector);
617 void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
619 u16 count_mask = rx_ring->count - 1;
620 u16 ntc = rx_ring->next_to_clean;
621 u16 ntu = rx_ring->next_to_use;
623 for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
624 struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
626 xsk_buff_free(rx_bi);
631 * i40e_xsk_clean_tx_ring - Clean the XDP Tx ring on shutdown
632 * @tx_ring: XDP Tx ring
634 void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
636 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
637 struct xsk_buff_pool *bp = tx_ring->xsk_pool;
638 struct i40e_tx_buffer *tx_bi;
642 tx_bi = &tx_ring->tx_bi[ntc];
645 i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
652 if (ntc >= tx_ring->count)
657 xsk_tx_completed(bp, xsk_frames);
661 * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
662 * buffer pool attached
665 * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
667 bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
669 struct net_device *netdev = vsi->netdev;
672 for (i = 0; i < vsi->num_queue_pairs; i++) {
673 if (xsk_get_pool_from_qid(netdev, i))