Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
[linux-2.6-microblaze.git] / drivers / net / ethernet / intel / ice / ice_xsk.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock_drv.h>
6 #include <net/xdp.h>
7 #include "ice.h"
8 #include "ice_base.h"
9 #include "ice_type.h"
10 #include "ice_xsk.h"
11 #include "ice_txrx.h"
12 #include "ice_txrx_lib.h"
13 #include "ice_lib.h"
14
15 /**
16  * ice_qp_reset_stats - Resets all stats for rings of given index
17  * @vsi: VSI that contains rings of interest
18  * @q_idx: ring index in array
19  */
20 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
21 {
22         memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
23                sizeof(vsi->rx_rings[q_idx]->rx_stats));
24         memset(&vsi->tx_rings[q_idx]->stats, 0,
25                sizeof(vsi->tx_rings[q_idx]->stats));
26         if (ice_is_xdp_ena_vsi(vsi))
27                 memset(&vsi->xdp_rings[q_idx]->stats, 0,
28                        sizeof(vsi->xdp_rings[q_idx]->stats));
29 }
30
31 /**
32  * ice_qp_clean_rings - Cleans all the rings of a given index
33  * @vsi: VSI that contains rings of interest
34  * @q_idx: ring index in array
35  */
36 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
37 {
38         ice_clean_tx_ring(vsi->tx_rings[q_idx]);
39         if (ice_is_xdp_ena_vsi(vsi))
40                 ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
41         ice_clean_rx_ring(vsi->rx_rings[q_idx]);
42 }
43
44 /**
45  * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
46  * @vsi: VSI that has netdev
47  * @q_vector: q_vector that has NAPI context
48  * @enable: true for enable, false for disable
49  */
50 static void
51 ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
52                      bool enable)
53 {
54         if (!vsi->netdev || !q_vector)
55                 return;
56
57         if (enable)
58                 napi_enable(&q_vector->napi);
59         else
60                 napi_disable(&q_vector->napi);
61 }
62
63 /**
64  * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
65  * @vsi: the VSI that contains queue vector being un-configured
66  * @rx_ring: Rx ring that will have its IRQ disabled
67  * @q_vector: queue vector
68  */
69 static void
70 ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring,
71                  struct ice_q_vector *q_vector)
72 {
73         struct ice_pf *pf = vsi->back;
74         struct ice_hw *hw = &pf->hw;
75         int base = vsi->base_vector;
76         u16 reg;
77         u32 val;
78
79         /* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
80          * here only QINT_RQCTL
81          */
82         reg = rx_ring->reg_idx;
83         val = rd32(hw, QINT_RQCTL(reg));
84         val &= ~QINT_RQCTL_CAUSE_ENA_M;
85         wr32(hw, QINT_RQCTL(reg), val);
86
87         if (q_vector) {
88                 u16 v_idx = q_vector->v_idx;
89
90                 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
91                 ice_flush(hw);
92                 synchronize_irq(pf->msix_entries[v_idx + base].vector);
93         }
94 }
95
96 /**
97  * ice_qvec_cfg_msix - Enable IRQ for given queue vector
98  * @vsi: the VSI that contains queue vector
99  * @q_vector: queue vector
100  */
101 static void
102 ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
103 {
104         u16 reg_idx = q_vector->reg_idx;
105         struct ice_pf *pf = vsi->back;
106         struct ice_hw *hw = &pf->hw;
107         struct ice_ring *ring;
108
109         ice_cfg_itr(hw, q_vector);
110
111         ice_for_each_ring(ring, q_vector->tx)
112                 ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx,
113                                       q_vector->tx.itr_idx);
114
115         ice_for_each_ring(ring, q_vector->rx)
116                 ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx,
117                                       q_vector->rx.itr_idx);
118
119         ice_flush(hw);
120 }
121
122 /**
123  * ice_qvec_ena_irq - Enable IRQ for given queue vector
124  * @vsi: the VSI that contains queue vector
125  * @q_vector: queue vector
126  */
127 static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
128 {
129         struct ice_pf *pf = vsi->back;
130         struct ice_hw *hw = &pf->hw;
131
132         ice_irq_dynamic_ena(hw, vsi, q_vector);
133
134         ice_flush(hw);
135 }
136
137 /**
138  * ice_qp_dis - Disables a queue pair
139  * @vsi: VSI of interest
140  * @q_idx: ring index in array
141  *
142  * Returns 0 on success, negative on failure.
143  */
144 static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
145 {
146         struct ice_txq_meta txq_meta = { };
147         struct ice_ring *tx_ring, *rx_ring;
148         struct ice_q_vector *q_vector;
149         int timeout = 50;
150         int err;
151
152         if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
153                 return -EINVAL;
154
155         tx_ring = vsi->tx_rings[q_idx];
156         rx_ring = vsi->rx_rings[q_idx];
157         q_vector = rx_ring->q_vector;
158
159         while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) {
160                 timeout--;
161                 if (!timeout)
162                         return -EBUSY;
163                 usleep_range(1000, 2000);
164         }
165         netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
166
167         ice_qvec_dis_irq(vsi, rx_ring, q_vector);
168
169         ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
170         err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
171         if (err)
172                 return err;
173         if (ice_is_xdp_ena_vsi(vsi)) {
174                 struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
175
176                 memset(&txq_meta, 0, sizeof(txq_meta));
177                 ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
178                 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
179                                            &txq_meta);
180                 if (err)
181                         return err;
182         }
183         err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
184         if (err)
185                 return err;
186
187         ice_qvec_toggle_napi(vsi, q_vector, false);
188         ice_qp_clean_rings(vsi, q_idx);
189         ice_qp_reset_stats(vsi, q_idx);
190
191         return 0;
192 }
193
194 /**
195  * ice_qp_ena - Enables a queue pair
196  * @vsi: VSI of interest
197  * @q_idx: ring index in array
198  *
199  * Returns 0 on success, negative on failure.
200  */
201 static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
202 {
203         struct ice_aqc_add_tx_qgrp *qg_buf;
204         struct ice_ring *tx_ring, *rx_ring;
205         struct ice_q_vector *q_vector;
206         u16 size;
207         int err;
208
209         if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
210                 return -EINVAL;
211
212         size = struct_size(qg_buf, txqs, 1);
213         qg_buf = kzalloc(size, GFP_KERNEL);
214         if (!qg_buf)
215                 return -ENOMEM;
216
217         qg_buf->num_txqs = 1;
218
219         tx_ring = vsi->tx_rings[q_idx];
220         rx_ring = vsi->rx_rings[q_idx];
221         q_vector = rx_ring->q_vector;
222
223         err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
224         if (err)
225                 goto free_buf;
226
227         if (ice_is_xdp_ena_vsi(vsi)) {
228                 struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
229
230                 memset(qg_buf, 0, size);
231                 qg_buf->num_txqs = 1;
232                 err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
233                 if (err)
234                         goto free_buf;
235                 ice_set_ring_xdp(xdp_ring);
236                 xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
237         }
238
239         err = ice_vsi_cfg_rxq(rx_ring);
240         if (err)
241                 goto free_buf;
242
243         ice_qvec_cfg_msix(vsi, q_vector);
244
245         err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
246         if (err)
247                 goto free_buf;
248
249         clear_bit(ICE_CFG_BUSY, vsi->state);
250         ice_qvec_toggle_napi(vsi, q_vector, true);
251         ice_qvec_ena_irq(vsi, q_vector);
252
253         netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
254 free_buf:
255         kfree(qg_buf);
256         return err;
257 }
258
259 /**
260  * ice_xsk_pool_disable - disable a buffer pool region
261  * @vsi: Current VSI
262  * @qid: queue ID
263  *
264  * Returns 0 on success, negative on failure
265  */
266 static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
267 {
268         struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
269
270         if (!pool)
271                 return -EINVAL;
272
273         clear_bit(qid, vsi->af_xdp_zc_qps);
274         xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
275
276         return 0;
277 }
278
279 /**
280  * ice_xsk_pool_enable - enable a buffer pool region
281  * @vsi: Current VSI
282  * @pool: pointer to a requested buffer pool region
283  * @qid: queue ID
284  *
285  * Returns 0 on success, negative on failure
286  */
287 static int
288 ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
289 {
290         int err;
291
292         if (vsi->type != ICE_VSI_PF)
293                 return -EINVAL;
294
295         if (qid >= vsi->netdev->real_num_rx_queues ||
296             qid >= vsi->netdev->real_num_tx_queues)
297                 return -EINVAL;
298
299         err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back),
300                                ICE_RX_DMA_ATTR);
301         if (err)
302                 return err;
303
304         set_bit(qid, vsi->af_xdp_zc_qps);
305
306         return 0;
307 }
308
309 /**
310  * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
311  * @vsi: Current VSI
312  * @pool: buffer pool to enable/associate to a ring, NULL to disable
313  * @qid: queue ID
314  *
315  * Returns 0 on success, negative on failure
316  */
317 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
318 {
319         bool if_running, pool_present = !!pool;
320         int ret = 0, pool_failure = 0;
321
322         if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
323
324         if (if_running) {
325                 ret = ice_qp_dis(vsi, qid);
326                 if (ret) {
327                         netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
328                         goto xsk_pool_if_up;
329                 }
330         }
331
332         pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
333                                       ice_xsk_pool_disable(vsi, qid);
334
335 xsk_pool_if_up:
336         if (if_running) {
337                 ret = ice_qp_ena(vsi, qid);
338                 if (!ret && pool_present)
339                         napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
340                 else if (ret)
341                         netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
342         }
343
344         if (pool_failure) {
345                 netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
346                            pool_present ? "en" : "dis", pool_failure);
347                 return pool_failure;
348         }
349
350         return ret;
351 }
352
353 /**
354  * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
355  * @rx_ring: Rx ring
356  * @count: The number of buffers to allocate
357  *
358  * This function allocates a number of Rx buffers from the fill ring
359  * or the internal recycle mechanism and places them on the Rx ring.
360  *
361  * Returns true if all allocations were successful, false if any fail.
362  */
363 bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
364 {
365         union ice_32b_rx_flex_desc *rx_desc;
366         u16 ntu = rx_ring->next_to_use;
367         struct ice_rx_buf *rx_buf;
368         bool ok = true;
369         dma_addr_t dma;
370
371         if (!count)
372                 return true;
373
374         rx_desc = ICE_RX_DESC(rx_ring, ntu);
375         rx_buf = &rx_ring->rx_buf[ntu];
376
377         do {
378                 rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
379                 if (!rx_buf->xdp) {
380                         ok = false;
381                         break;
382                 }
383
384                 dma = xsk_buff_xdp_get_dma(rx_buf->xdp);
385                 rx_desc->read.pkt_addr = cpu_to_le64(dma);
386                 rx_desc->wb.status_error0 = 0;
387
388                 rx_desc++;
389                 rx_buf++;
390                 ntu++;
391
392                 if (unlikely(ntu == rx_ring->count)) {
393                         rx_desc = ICE_RX_DESC(rx_ring, 0);
394                         rx_buf = rx_ring->rx_buf;
395                         ntu = 0;
396                 }
397         } while (--count);
398
399         if (rx_ring->next_to_use != ntu) {
400                 /* clear the status bits for the next_to_use descriptor */
401                 rx_desc->wb.status_error0 = 0;
402                 ice_release_rx_desc(rx_ring, ntu);
403         }
404
405         return ok;
406 }
407
408 /**
409  * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
410  * @rx_ring: Rx ring
411  */
412 static void ice_bump_ntc(struct ice_ring *rx_ring)
413 {
414         int ntc = rx_ring->next_to_clean + 1;
415
416         ntc = (ntc < rx_ring->count) ? ntc : 0;
417         rx_ring->next_to_clean = ntc;
418         prefetch(ICE_RX_DESC(rx_ring, ntc));
419 }
420
421 /**
422  * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
423  * @rx_ring: Rx ring
424  * @rx_buf: zero-copy Rx buffer
425  *
426  * This function allocates a new skb from a zero-copy Rx buffer.
427  *
428  * Returns the skb on success, NULL on failure.
429  */
430 static struct sk_buff *
431 ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
432 {
433         unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta;
434         unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data;
435         unsigned int datasize_hard = rx_buf->xdp->data_end -
436                                      rx_buf->xdp->data_hard_start;
437         struct sk_buff *skb;
438
439         skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
440                                GFP_ATOMIC | __GFP_NOWARN);
441         if (unlikely(!skb))
442                 return NULL;
443
444         skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start);
445         memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize);
446         if (metasize)
447                 skb_metadata_set(skb, metasize);
448
449         xsk_buff_free(rx_buf->xdp);
450         rx_buf->xdp = NULL;
451         return skb;
452 }
453
454 /**
455  * ice_run_xdp_zc - Executes an XDP program in zero-copy path
456  * @rx_ring: Rx ring
457  * @xdp: xdp_buff used as input to the XDP program
458  *
459  * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
460  */
461 static int
462 ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
463 {
464         int err, result = ICE_XDP_PASS;
465         struct bpf_prog *xdp_prog;
466         struct ice_ring *xdp_ring;
467         u32 act;
468
469         /* ZC patch is enabled only when XDP program is set,
470          * so here it can not be NULL
471          */
472         xdp_prog = READ_ONCE(rx_ring->xdp_prog);
473
474         act = bpf_prog_run_xdp(xdp_prog, xdp);
475
476         if (likely(act == XDP_REDIRECT)) {
477                 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
478                 if (err)
479                         goto out_failure;
480                 return ICE_XDP_REDIR;
481         }
482
483         switch (act) {
484         case XDP_PASS:
485                 break;
486         case XDP_TX:
487                 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
488                 result = ice_xmit_xdp_buff(xdp, xdp_ring);
489                 if (result == ICE_XDP_CONSUMED)
490                         goto out_failure;
491                 break;
492         default:
493                 bpf_warn_invalid_xdp_action(act);
494                 fallthrough;
495         case XDP_ABORTED:
496 out_failure:
497                 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
498                 fallthrough;
499         case XDP_DROP:
500                 result = ICE_XDP_CONSUMED;
501                 break;
502         }
503
504         return result;
505 }
506
507 /**
508  * ice_clean_rx_irq_zc - consumes packets from the hardware ring
509  * @rx_ring: AF_XDP Rx ring
510  * @budget: NAPI budget
511  *
512  * Returns number of processed packets on success, remaining budget on failure.
513  */
514 int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
515 {
516         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
517         u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
518         unsigned int xdp_xmit = 0;
519         bool failure = false;
520
521         while (likely(total_rx_packets < (unsigned int)budget)) {
522                 union ice_32b_rx_flex_desc *rx_desc;
523                 unsigned int size, xdp_res = 0;
524                 struct ice_rx_buf *rx_buf;
525                 struct sk_buff *skb;
526                 u16 stat_err_bits;
527                 u16 vlan_tag = 0;
528                 u16 rx_ptype;
529
530                 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
531
532                 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
533                 if (!ice_test_staterr(rx_desc, stat_err_bits))
534                         break;
535
536                 /* This memory barrier is needed to keep us from reading
537                  * any other fields out of the rx_desc until we have
538                  * verified the descriptor has been written back.
539                  */
540                 dma_rmb();
541
542                 size = le16_to_cpu(rx_desc->wb.pkt_len) &
543                                    ICE_RX_FLX_DESC_PKT_LEN_M;
544                 if (!size)
545                         break;
546
547                 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
548                 rx_buf->xdp->data_end = rx_buf->xdp->data + size;
549                 xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool);
550
551                 xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
552                 if (xdp_res) {
553                         if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
554                                 xdp_xmit |= xdp_res;
555                         else
556                                 xsk_buff_free(rx_buf->xdp);
557
558                         rx_buf->xdp = NULL;
559                         total_rx_bytes += size;
560                         total_rx_packets++;
561                         cleaned_count++;
562
563                         ice_bump_ntc(rx_ring);
564                         continue;
565                 }
566
567                 /* XDP_PASS path */
568                 skb = ice_construct_skb_zc(rx_ring, rx_buf);
569                 if (!skb) {
570                         rx_ring->rx_stats.alloc_buf_failed++;
571                         break;
572                 }
573
574                 cleaned_count++;
575                 ice_bump_ntc(rx_ring);
576
577                 if (eth_skb_pad(skb)) {
578                         skb = NULL;
579                         continue;
580                 }
581
582                 total_rx_bytes += skb->len;
583                 total_rx_packets++;
584
585                 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
586                 if (ice_test_staterr(rx_desc, stat_err_bits))
587                         vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
588
589                 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
590                                        ICE_RX_FLEX_DESC_PTYPE_M;
591
592                 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
593                 ice_receive_skb(rx_ring, skb, vlan_tag);
594         }
595
596         if (cleaned_count >= ICE_RX_BUF_WRITE)
597                 failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
598
599         ice_finalize_xdp_rx(rx_ring, xdp_xmit);
600         ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
601
602         if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
603                 if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
604                         xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
605                 else
606                         xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
607
608                 return (int)total_rx_packets;
609         }
610
611         return failure ? budget : (int)total_rx_packets;
612 }
613
614 /**
615  * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries
616  * @xdp_ring: XDP Tx ring
617  * @budget: max number of frames to xmit
618  *
619  * Returns true if cleanup/transmission is done.
620  */
621 static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
622 {
623         struct ice_tx_desc *tx_desc = NULL;
624         bool work_done = true;
625         struct xdp_desc desc;
626         dma_addr_t dma;
627
628         while (likely(budget-- > 0)) {
629                 struct ice_tx_buf *tx_buf;
630
631                 if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) {
632                         xdp_ring->tx_stats.tx_busy++;
633                         work_done = false;
634                         break;
635                 }
636
637                 tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
638
639                 if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
640                         break;
641
642                 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
643                 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
644                                                  desc.len);
645
646                 tx_buf->bytecount = desc.len;
647
648                 tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
649                 tx_desc->buf_addr = cpu_to_le64(dma);
650                 tx_desc->cmd_type_offset_bsz =
651                         ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0);
652
653                 xdp_ring->next_to_use++;
654                 if (xdp_ring->next_to_use == xdp_ring->count)
655                         xdp_ring->next_to_use = 0;
656         }
657
658         if (tx_desc) {
659                 ice_xdp_ring_update_tail(xdp_ring);
660                 xsk_tx_release(xdp_ring->xsk_pool);
661         }
662
663         return budget > 0 && work_done;
664 }
665
666 /**
667  * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
668  * @xdp_ring: XDP Tx ring
669  * @tx_buf: Tx buffer to clean
670  */
671 static void
672 ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
673 {
674         xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
675         dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
676                          dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
677         dma_unmap_len_set(tx_buf, len, 0);
678 }
679
680 /**
681  * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries
682  * @xdp_ring: XDP Tx ring
683  * @budget: NAPI budget
684  *
685  * Returns true if cleanup/tranmission is done.
686  */
687 bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
688 {
689         int total_packets = 0, total_bytes = 0;
690         s16 ntc = xdp_ring->next_to_clean;
691         struct ice_tx_desc *tx_desc;
692         struct ice_tx_buf *tx_buf;
693         u32 xsk_frames = 0;
694         bool xmit_done;
695
696         tx_desc = ICE_TX_DESC(xdp_ring, ntc);
697         tx_buf = &xdp_ring->tx_buf[ntc];
698         ntc -= xdp_ring->count;
699
700         do {
701                 if (!(tx_desc->cmd_type_offset_bsz &
702                       cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
703                         break;
704
705                 total_bytes += tx_buf->bytecount;
706                 total_packets++;
707
708                 if (tx_buf->raw_buf) {
709                         ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
710                         tx_buf->raw_buf = NULL;
711                 } else {
712                         xsk_frames++;
713                 }
714
715                 tx_desc->cmd_type_offset_bsz = 0;
716                 tx_buf++;
717                 tx_desc++;
718                 ntc++;
719
720                 if (unlikely(!ntc)) {
721                         ntc -= xdp_ring->count;
722                         tx_buf = xdp_ring->tx_buf;
723                         tx_desc = ICE_TX_DESC(xdp_ring, 0);
724                 }
725
726                 prefetch(tx_desc);
727
728         } while (likely(--budget));
729
730         ntc += xdp_ring->count;
731         xdp_ring->next_to_clean = ntc;
732
733         if (xsk_frames)
734                 xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
735
736         if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
737                 xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
738
739         ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
740         xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
741
742         return budget > 0 && xmit_done;
743 }
744
745 /**
746  * ice_xsk_wakeup - Implements ndo_xsk_wakeup
747  * @netdev: net_device
748  * @queue_id: queue to wake up
749  * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
750  *
751  * Returns negative on error, zero otherwise.
752  */
753 int
754 ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
755                u32 __always_unused flags)
756 {
757         struct ice_netdev_priv *np = netdev_priv(netdev);
758         struct ice_q_vector *q_vector;
759         struct ice_vsi *vsi = np->vsi;
760         struct ice_ring *ring;
761
762         if (test_bit(ICE_DOWN, vsi->state))
763                 return -ENETDOWN;
764
765         if (!ice_is_xdp_ena_vsi(vsi))
766                 return -ENXIO;
767
768         if (queue_id >= vsi->num_txq)
769                 return -ENXIO;
770
771         if (!vsi->xdp_rings[queue_id]->xsk_pool)
772                 return -ENXIO;
773
774         ring = vsi->xdp_rings[queue_id];
775
776         /* The idea here is that if NAPI is running, mark a miss, so
777          * it will run again. If not, trigger an interrupt and
778          * schedule the NAPI from interrupt context. If NAPI would be
779          * scheduled here, the interrupt affinity would not be
780          * honored.
781          */
782         q_vector = ring->q_vector;
783         if (!napi_if_scheduled_mark_missed(&q_vector->napi))
784                 ice_trigger_sw_intr(&vsi->back->hw, q_vector);
785
786         return 0;
787 }
788
789 /**
790  * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
791  * @vsi: VSI to be checked
792  *
793  * Returns true if any of the Rx rings has an AF_XDP buff pool attached
794  */
795 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
796 {
797         int i;
798
799         ice_for_each_rxq(vsi, i) {
800                 if (xsk_get_pool_from_qid(vsi->netdev, i))
801                         return true;
802         }
803
804         return false;
805 }
806
807 /**
808  * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
809  * @rx_ring: ring to be cleaned
810  */
811 void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
812 {
813         u16 i;
814
815         for (i = 0; i < rx_ring->count; i++) {
816                 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
817
818                 if (!rx_buf->xdp)
819                         continue;
820
821                 rx_buf->xdp = NULL;
822         }
823 }
824
825 /**
826  * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
827  * @xdp_ring: XDP_Tx ring
828  */
829 void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
830 {
831         u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
832         u32 xsk_frames = 0;
833
834         while (ntc != ntu) {
835                 struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
836
837                 if (tx_buf->raw_buf)
838                         ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
839                 else
840                         xsk_frames++;
841
842                 tx_buf->raw_buf = NULL;
843
844                 ntc++;
845                 if (ntc >= xdp_ring->count)
846                         ntc = 0;
847         }
848
849         if (xsk_frames)
850                 xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
851 }