Merge remote-tracking branch 'torvalds/master' into perf/core
[linux-2.6-microblaze.git] / drivers / net / ethernet / intel / ice / ice_xsk.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock_drv.h>
6 #include <net/xdp.h>
7 #include "ice.h"
8 #include "ice_base.h"
9 #include "ice_type.h"
10 #include "ice_xsk.h"
11 #include "ice_txrx.h"
12 #include "ice_txrx_lib.h"
13 #include "ice_lib.h"
14
15 /**
16  * ice_qp_reset_stats - Resets all stats for rings of given index
17  * @vsi: VSI that contains rings of interest
18  * @q_idx: ring index in array
19  */
20 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
21 {
22         memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
23                sizeof(vsi->rx_rings[q_idx]->rx_stats));
24         memset(&vsi->tx_rings[q_idx]->stats, 0,
25                sizeof(vsi->tx_rings[q_idx]->stats));
26         if (ice_is_xdp_ena_vsi(vsi))
27                 memset(&vsi->xdp_rings[q_idx]->stats, 0,
28                        sizeof(vsi->xdp_rings[q_idx]->stats));
29 }
30
31 /**
32  * ice_qp_clean_rings - Cleans all the rings of a given index
33  * @vsi: VSI that contains rings of interest
34  * @q_idx: ring index in array
35  */
36 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
37 {
38         ice_clean_tx_ring(vsi->tx_rings[q_idx]);
39         if (ice_is_xdp_ena_vsi(vsi))
40                 ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
41         ice_clean_rx_ring(vsi->rx_rings[q_idx]);
42 }
43
44 /**
45  * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
46  * @vsi: VSI that has netdev
47  * @q_vector: q_vector that has NAPI context
48  * @enable: true for enable, false for disable
49  */
50 static void
51 ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
52                      bool enable)
53 {
54         if (!vsi->netdev || !q_vector)
55                 return;
56
57         if (enable)
58                 napi_enable(&q_vector->napi);
59         else
60                 napi_disable(&q_vector->napi);
61 }
62
63 /**
64  * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
65  * @vsi: the VSI that contains queue vector being un-configured
66  * @rx_ring: Rx ring that will have its IRQ disabled
67  * @q_vector: queue vector
68  */
69 static void
70 ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring,
71                  struct ice_q_vector *q_vector)
72 {
73         struct ice_pf *pf = vsi->back;
74         struct ice_hw *hw = &pf->hw;
75         int base = vsi->base_vector;
76         u16 reg;
77         u32 val;
78
79         /* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
80          * here only QINT_RQCTL
81          */
82         reg = rx_ring->reg_idx;
83         val = rd32(hw, QINT_RQCTL(reg));
84         val &= ~QINT_RQCTL_CAUSE_ENA_M;
85         wr32(hw, QINT_RQCTL(reg), val);
86
87         if (q_vector) {
88                 u16 v_idx = q_vector->v_idx;
89
90                 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
91                 ice_flush(hw);
92                 synchronize_irq(pf->msix_entries[v_idx + base].vector);
93         }
94 }
95
96 /**
97  * ice_qvec_cfg_msix - Enable IRQ for given queue vector
98  * @vsi: the VSI that contains queue vector
99  * @q_vector: queue vector
100  */
101 static void
102 ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
103 {
104         u16 reg_idx = q_vector->reg_idx;
105         struct ice_pf *pf = vsi->back;
106         struct ice_hw *hw = &pf->hw;
107         struct ice_ring *ring;
108
109         ice_cfg_itr(hw, q_vector);
110
111         ice_for_each_ring(ring, q_vector->tx)
112                 ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx,
113                                       q_vector->tx.itr_idx);
114
115         ice_for_each_ring(ring, q_vector->rx)
116                 ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx,
117                                       q_vector->rx.itr_idx);
118
119         ice_flush(hw);
120 }
121
122 /**
123  * ice_qvec_ena_irq - Enable IRQ for given queue vector
124  * @vsi: the VSI that contains queue vector
125  * @q_vector: queue vector
126  */
127 static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
128 {
129         struct ice_pf *pf = vsi->back;
130         struct ice_hw *hw = &pf->hw;
131
132         ice_irq_dynamic_ena(hw, vsi, q_vector);
133
134         ice_flush(hw);
135 }
136
137 /**
138  * ice_qp_dis - Disables a queue pair
139  * @vsi: VSI of interest
140  * @q_idx: ring index in array
141  *
142  * Returns 0 on success, negative on failure.
143  */
144 static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
145 {
146         struct ice_txq_meta txq_meta = { };
147         struct ice_ring *tx_ring, *rx_ring;
148         struct ice_q_vector *q_vector;
149         int timeout = 50;
150         int err;
151
152         if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
153                 return -EINVAL;
154
155         tx_ring = vsi->tx_rings[q_idx];
156         rx_ring = vsi->rx_rings[q_idx];
157         q_vector = rx_ring->q_vector;
158
159         while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) {
160                 timeout--;
161                 if (!timeout)
162                         return -EBUSY;
163                 usleep_range(1000, 2000);
164         }
165         netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
166
167         ice_qvec_dis_irq(vsi, rx_ring, q_vector);
168
169         ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
170         err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
171         if (err)
172                 return err;
173         if (ice_is_xdp_ena_vsi(vsi)) {
174                 struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
175
176                 memset(&txq_meta, 0, sizeof(txq_meta));
177                 ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
178                 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
179                                            &txq_meta);
180                 if (err)
181                         return err;
182         }
183         err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
184         if (err)
185                 return err;
186
187         ice_qvec_toggle_napi(vsi, q_vector, false);
188         ice_qp_clean_rings(vsi, q_idx);
189         ice_qp_reset_stats(vsi, q_idx);
190
191         return 0;
192 }
193
194 /**
195  * ice_qp_ena - Enables a queue pair
196  * @vsi: VSI of interest
197  * @q_idx: ring index in array
198  *
199  * Returns 0 on success, negative on failure.
200  */
201 static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
202 {
203         struct ice_aqc_add_tx_qgrp *qg_buf;
204         struct ice_ring *tx_ring, *rx_ring;
205         struct ice_q_vector *q_vector;
206         u16 size;
207         int err;
208
209         if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
210                 return -EINVAL;
211
212         size = struct_size(qg_buf, txqs, 1);
213         qg_buf = kzalloc(size, GFP_KERNEL);
214         if (!qg_buf)
215                 return -ENOMEM;
216
217         qg_buf->num_txqs = 1;
218
219         tx_ring = vsi->tx_rings[q_idx];
220         rx_ring = vsi->rx_rings[q_idx];
221         q_vector = rx_ring->q_vector;
222
223         err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
224         if (err)
225                 goto free_buf;
226
227         if (ice_is_xdp_ena_vsi(vsi)) {
228                 struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
229
230                 memset(qg_buf, 0, size);
231                 qg_buf->num_txqs = 1;
232                 err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
233                 if (err)
234                         goto free_buf;
235                 ice_set_ring_xdp(xdp_ring);
236                 xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
237         }
238
239         err = ice_setup_rx_ctx(rx_ring);
240         if (err)
241                 goto free_buf;
242
243         ice_qvec_cfg_msix(vsi, q_vector);
244
245         err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
246         if (err)
247                 goto free_buf;
248
249         clear_bit(ICE_CFG_BUSY, vsi->state);
250         ice_qvec_toggle_napi(vsi, q_vector, true);
251         ice_qvec_ena_irq(vsi, q_vector);
252
253         netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
254 free_buf:
255         kfree(qg_buf);
256         return err;
257 }
258
259 /**
260  * ice_xsk_pool_disable - disable a buffer pool region
261  * @vsi: Current VSI
262  * @qid: queue ID
263  *
264  * Returns 0 on success, negative on failure
265  */
266 static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
267 {
268         struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
269
270         if (!pool)
271                 return -EINVAL;
272
273         clear_bit(qid, vsi->af_xdp_zc_qps);
274         xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
275
276         return 0;
277 }
278
279 /**
280  * ice_xsk_pool_enable - enable a buffer pool region
281  * @vsi: Current VSI
282  * @pool: pointer to a requested buffer pool region
283  * @qid: queue ID
284  *
285  * Returns 0 on success, negative on failure
286  */
287 static int
288 ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
289 {
290         int err;
291
292         if (vsi->type != ICE_VSI_PF)
293                 return -EINVAL;
294
295         if (qid >= vsi->netdev->real_num_rx_queues ||
296             qid >= vsi->netdev->real_num_tx_queues)
297                 return -EINVAL;
298
299         err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back),
300                                ICE_RX_DMA_ATTR);
301         if (err)
302                 return err;
303
304         set_bit(qid, vsi->af_xdp_zc_qps);
305
306         return 0;
307 }
308
309 /**
310  * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
311  * @vsi: Current VSI
312  * @pool: buffer pool to enable/associate to a ring, NULL to disable
313  * @qid: queue ID
314  *
315  * Returns 0 on success, negative on failure
316  */
317 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
318 {
319         bool if_running, pool_present = !!pool;
320         int ret = 0, pool_failure = 0;
321
322         if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
323
324         if (if_running) {
325                 ret = ice_qp_dis(vsi, qid);
326                 if (ret) {
327                         netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
328                         goto xsk_pool_if_up;
329                 }
330         }
331
332         pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
333                                       ice_xsk_pool_disable(vsi, qid);
334
335 xsk_pool_if_up:
336         if (if_running) {
337                 ret = ice_qp_ena(vsi, qid);
338                 if (!ret && pool_present)
339                         napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
340                 else if (ret)
341                         netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
342         }
343
344         if (pool_failure) {
345                 netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
346                            pool_present ? "en" : "dis", pool_failure);
347                 return pool_failure;
348         }
349
350         return ret;
351 }
352
353 /**
354  * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
355  * @rx_ring: Rx ring
356  * @count: The number of buffers to allocate
357  *
358  * This function allocates a number of Rx buffers from the fill ring
359  * or the internal recycle mechanism and places them on the Rx ring.
360  *
361  * Returns true if all allocations were successful, false if any fail.
362  */
363 bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
364 {
365         union ice_32b_rx_flex_desc *rx_desc;
366         u16 ntu = rx_ring->next_to_use;
367         struct ice_rx_buf *rx_buf;
368         bool ok = true;
369         dma_addr_t dma;
370
371         if (!count)
372                 return true;
373
374         rx_desc = ICE_RX_DESC(rx_ring, ntu);
375         rx_buf = &rx_ring->rx_buf[ntu];
376
377         do {
378                 rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
379                 if (!rx_buf->xdp) {
380                         ok = false;
381                         break;
382                 }
383
384                 dma = xsk_buff_xdp_get_dma(rx_buf->xdp);
385                 rx_desc->read.pkt_addr = cpu_to_le64(dma);
386                 rx_desc->wb.status_error0 = 0;
387
388                 rx_desc++;
389                 rx_buf++;
390                 ntu++;
391
392                 if (unlikely(ntu == rx_ring->count)) {
393                         rx_desc = ICE_RX_DESC(rx_ring, 0);
394                         rx_buf = rx_ring->rx_buf;
395                         ntu = 0;
396                 }
397         } while (--count);
398
399         if (rx_ring->next_to_use != ntu) {
400                 /* clear the status bits for the next_to_use descriptor */
401                 rx_desc->wb.status_error0 = 0;
402                 ice_release_rx_desc(rx_ring, ntu);
403         }
404
405         return ok;
406 }
407
408 /**
409  * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
410  * @rx_ring: Rx ring
411  */
412 static void ice_bump_ntc(struct ice_ring *rx_ring)
413 {
414         int ntc = rx_ring->next_to_clean + 1;
415
416         ntc = (ntc < rx_ring->count) ? ntc : 0;
417         rx_ring->next_to_clean = ntc;
418         prefetch(ICE_RX_DESC(rx_ring, ntc));
419 }
420
421 /**
422  * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
423  * @rx_ring: Rx ring
424  * @rx_buf: zero-copy Rx buffer
425  *
426  * This function allocates a new skb from a zero-copy Rx buffer.
427  *
428  * Returns the skb on success, NULL on failure.
429  */
430 static struct sk_buff *
431 ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
432 {
433         unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta;
434         unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data;
435         unsigned int datasize_hard = rx_buf->xdp->data_end -
436                                      rx_buf->xdp->data_hard_start;
437         struct sk_buff *skb;
438
439         skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
440                                GFP_ATOMIC | __GFP_NOWARN);
441         if (unlikely(!skb))
442                 return NULL;
443
444         skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start);
445         memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize);
446         if (metasize)
447                 skb_metadata_set(skb, metasize);
448
449         xsk_buff_free(rx_buf->xdp);
450         rx_buf->xdp = NULL;
451         return skb;
452 }
453
454 /**
455  * ice_run_xdp_zc - Executes an XDP program in zero-copy path
456  * @rx_ring: Rx ring
457  * @xdp: xdp_buff used as input to the XDP program
458  *
459  * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
460  */
461 static int
462 ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
463 {
464         int err, result = ICE_XDP_PASS;
465         struct bpf_prog *xdp_prog;
466         struct ice_ring *xdp_ring;
467         u32 act;
468
469         rcu_read_lock();
470         /* ZC patch is enabled only when XDP program is set,
471          * so here it can not be NULL
472          */
473         xdp_prog = READ_ONCE(rx_ring->xdp_prog);
474
475         act = bpf_prog_run_xdp(xdp_prog, xdp);
476
477         if (likely(act == XDP_REDIRECT)) {
478                 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
479                 if (err)
480                         goto out_failure;
481                 rcu_read_unlock();
482                 return ICE_XDP_REDIR;
483         }
484
485         switch (act) {
486         case XDP_PASS:
487                 break;
488         case XDP_TX:
489                 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
490                 result = ice_xmit_xdp_buff(xdp, xdp_ring);
491                 if (result == ICE_XDP_CONSUMED)
492                         goto out_failure;
493                 break;
494         default:
495                 bpf_warn_invalid_xdp_action(act);
496                 fallthrough;
497         case XDP_ABORTED:
498 out_failure:
499                 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
500                 fallthrough;
501         case XDP_DROP:
502                 result = ICE_XDP_CONSUMED;
503                 break;
504         }
505
506         rcu_read_unlock();
507         return result;
508 }
509
510 /**
511  * ice_clean_rx_irq_zc - consumes packets from the hardware ring
512  * @rx_ring: AF_XDP Rx ring
513  * @budget: NAPI budget
514  *
515  * Returns number of processed packets on success, remaining budget on failure.
516  */
517 int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
518 {
519         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
520         u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
521         unsigned int xdp_xmit = 0;
522         bool failure = false;
523
524         while (likely(total_rx_packets < (unsigned int)budget)) {
525                 union ice_32b_rx_flex_desc *rx_desc;
526                 unsigned int size, xdp_res = 0;
527                 struct ice_rx_buf *rx_buf;
528                 struct sk_buff *skb;
529                 u16 stat_err_bits;
530                 u16 vlan_tag = 0;
531                 u8 rx_ptype;
532
533                 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
534
535                 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
536                 if (!ice_test_staterr(rx_desc, stat_err_bits))
537                         break;
538
539                 /* This memory barrier is needed to keep us from reading
540                  * any other fields out of the rx_desc until we have
541                  * verified the descriptor has been written back.
542                  */
543                 dma_rmb();
544
545                 size = le16_to_cpu(rx_desc->wb.pkt_len) &
546                                    ICE_RX_FLX_DESC_PKT_LEN_M;
547                 if (!size)
548                         break;
549
550                 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
551                 rx_buf->xdp->data_end = rx_buf->xdp->data + size;
552                 xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool);
553
554                 xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
555                 if (xdp_res) {
556                         if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
557                                 xdp_xmit |= xdp_res;
558                         else
559                                 xsk_buff_free(rx_buf->xdp);
560
561                         rx_buf->xdp = NULL;
562                         total_rx_bytes += size;
563                         total_rx_packets++;
564                         cleaned_count++;
565
566                         ice_bump_ntc(rx_ring);
567                         continue;
568                 }
569
570                 /* XDP_PASS path */
571                 skb = ice_construct_skb_zc(rx_ring, rx_buf);
572                 if (!skb) {
573                         rx_ring->rx_stats.alloc_buf_failed++;
574                         break;
575                 }
576
577                 cleaned_count++;
578                 ice_bump_ntc(rx_ring);
579
580                 if (eth_skb_pad(skb)) {
581                         skb = NULL;
582                         continue;
583                 }
584
585                 total_rx_bytes += skb->len;
586                 total_rx_packets++;
587
588                 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
589                 if (ice_test_staterr(rx_desc, stat_err_bits))
590                         vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
591
592                 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
593                                        ICE_RX_FLEX_DESC_PTYPE_M;
594
595                 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
596                 ice_receive_skb(rx_ring, skb, vlan_tag);
597         }
598
599         if (cleaned_count >= ICE_RX_BUF_WRITE)
600                 failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
601
602         ice_finalize_xdp_rx(rx_ring, xdp_xmit);
603         ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
604
605         if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
606                 if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
607                         xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
608                 else
609                         xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
610
611                 return (int)total_rx_packets;
612         }
613
614         return failure ? budget : (int)total_rx_packets;
615 }
616
617 /**
618  * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries
619  * @xdp_ring: XDP Tx ring
620  * @budget: max number of frames to xmit
621  *
622  * Returns true if cleanup/transmission is done.
623  */
624 static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
625 {
626         struct ice_tx_desc *tx_desc = NULL;
627         bool work_done = true;
628         struct xdp_desc desc;
629         dma_addr_t dma;
630
631         while (likely(budget-- > 0)) {
632                 struct ice_tx_buf *tx_buf;
633
634                 if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) {
635                         xdp_ring->tx_stats.tx_busy++;
636                         work_done = false;
637                         break;
638                 }
639
640                 tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
641
642                 if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
643                         break;
644
645                 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
646                 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
647                                                  desc.len);
648
649                 tx_buf->bytecount = desc.len;
650
651                 tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
652                 tx_desc->buf_addr = cpu_to_le64(dma);
653                 tx_desc->cmd_type_offset_bsz =
654                         ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0);
655
656                 xdp_ring->next_to_use++;
657                 if (xdp_ring->next_to_use == xdp_ring->count)
658                         xdp_ring->next_to_use = 0;
659         }
660
661         if (tx_desc) {
662                 ice_xdp_ring_update_tail(xdp_ring);
663                 xsk_tx_release(xdp_ring->xsk_pool);
664         }
665
666         return budget > 0 && work_done;
667 }
668
669 /**
670  * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
671  * @xdp_ring: XDP Tx ring
672  * @tx_buf: Tx buffer to clean
673  */
674 static void
675 ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
676 {
677         xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
678         dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
679                          dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
680         dma_unmap_len_set(tx_buf, len, 0);
681 }
682
683 /**
684  * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries
685  * @xdp_ring: XDP Tx ring
686  * @budget: NAPI budget
687  *
688  * Returns true if cleanup/tranmission is done.
689  */
690 bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
691 {
692         int total_packets = 0, total_bytes = 0;
693         s16 ntc = xdp_ring->next_to_clean;
694         struct ice_tx_desc *tx_desc;
695         struct ice_tx_buf *tx_buf;
696         u32 xsk_frames = 0;
697         bool xmit_done;
698
699         tx_desc = ICE_TX_DESC(xdp_ring, ntc);
700         tx_buf = &xdp_ring->tx_buf[ntc];
701         ntc -= xdp_ring->count;
702
703         do {
704                 if (!(tx_desc->cmd_type_offset_bsz &
705                       cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
706                         break;
707
708                 total_bytes += tx_buf->bytecount;
709                 total_packets++;
710
711                 if (tx_buf->raw_buf) {
712                         ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
713                         tx_buf->raw_buf = NULL;
714                 } else {
715                         xsk_frames++;
716                 }
717
718                 tx_desc->cmd_type_offset_bsz = 0;
719                 tx_buf++;
720                 tx_desc++;
721                 ntc++;
722
723                 if (unlikely(!ntc)) {
724                         ntc -= xdp_ring->count;
725                         tx_buf = xdp_ring->tx_buf;
726                         tx_desc = ICE_TX_DESC(xdp_ring, 0);
727                 }
728
729                 prefetch(tx_desc);
730
731         } while (likely(--budget));
732
733         ntc += xdp_ring->count;
734         xdp_ring->next_to_clean = ntc;
735
736         if (xsk_frames)
737                 xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
738
739         if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
740                 xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
741
742         ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
743         xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
744
745         return budget > 0 && xmit_done;
746 }
747
748 /**
749  * ice_xsk_wakeup - Implements ndo_xsk_wakeup
750  * @netdev: net_device
751  * @queue_id: queue to wake up
752  * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
753  *
754  * Returns negative on error, zero otherwise.
755  */
756 int
757 ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
758                u32 __always_unused flags)
759 {
760         struct ice_netdev_priv *np = netdev_priv(netdev);
761         struct ice_q_vector *q_vector;
762         struct ice_vsi *vsi = np->vsi;
763         struct ice_ring *ring;
764
765         if (test_bit(ICE_DOWN, vsi->state))
766                 return -ENETDOWN;
767
768         if (!ice_is_xdp_ena_vsi(vsi))
769                 return -ENXIO;
770
771         if (queue_id >= vsi->num_txq)
772                 return -ENXIO;
773
774         if (!vsi->xdp_rings[queue_id]->xsk_pool)
775                 return -ENXIO;
776
777         ring = vsi->xdp_rings[queue_id];
778
779         /* The idea here is that if NAPI is running, mark a miss, so
780          * it will run again. If not, trigger an interrupt and
781          * schedule the NAPI from interrupt context. If NAPI would be
782          * scheduled here, the interrupt affinity would not be
783          * honored.
784          */
785         q_vector = ring->q_vector;
786         if (!napi_if_scheduled_mark_missed(&q_vector->napi))
787                 ice_trigger_sw_intr(&vsi->back->hw, q_vector);
788
789         return 0;
790 }
791
792 /**
793  * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
794  * @vsi: VSI to be checked
795  *
796  * Returns true if any of the Rx rings has an AF_XDP buff pool attached
797  */
798 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
799 {
800         int i;
801
802         ice_for_each_rxq(vsi, i) {
803                 if (xsk_get_pool_from_qid(vsi->netdev, i))
804                         return true;
805         }
806
807         return false;
808 }
809
810 /**
811  * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
812  * @rx_ring: ring to be cleaned
813  */
814 void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
815 {
816         u16 i;
817
818         for (i = 0; i < rx_ring->count; i++) {
819                 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
820
821                 if (!rx_buf->xdp)
822                         continue;
823
824                 rx_buf->xdp = NULL;
825         }
826 }
827
828 /**
829  * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
830  * @xdp_ring: XDP_Tx ring
831  */
832 void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
833 {
834         u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
835         u32 xsk_frames = 0;
836
837         while (ntc != ntu) {
838                 struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
839
840                 if (tx_buf->raw_buf)
841                         ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
842                 else
843                         xsk_frames++;
844
845                 tx_buf->raw_buf = NULL;
846
847                 ntc++;
848                 if (ntc >= xdp_ring->count)
849                         ntc = 0;
850         }
851
852         if (xsk_frames)
853                 xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
854 }