2 * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/bpf_trace.h>
34 #include <net/xdp_sock_drv.h>
36 #include "en/params.h"
38 int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
40 int hr = mlx5e_get_linear_rq_headroom(params, xsk);
42 /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)).
43 * The condition checked in mlx5e_rx_is_linear_skb is:
44 * SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE (1)
45 * (Note that hw_mtu == sw_mtu + hard_mtu.)
46 * What is returned from this function is:
47 * max_mtu = PAGE_SIZE - S - hr - hard_mtu (2)
48 * After assigning sw_mtu := max_mtu, the left side of (1) turns to
49 * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE,
50 * because both PAGE_SIZE and S are already aligned. Any number greater
51 * than max_mtu would make the left side of (1) greater than PAGE_SIZE,
52 * so max_mtu is the maximum MTU allowed.
55 return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr));
59 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
60 struct page *page, struct xdp_buff *xdp)
62 struct mlx5e_xmit_data xdptxd;
63 struct mlx5e_xdp_info xdpi;
64 struct xdp_frame *xdpf;
67 xdpf = xdp_convert_buff_to_frame(xdp);
71 if (unlikely(xdp_frame_has_frags(xdpf))) {
72 xdp_return_frame(xdpf);
76 xdptxd.data = xdpf->data;
77 xdptxd.len = xdpf->len;
79 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
80 /* The xdp_buff was in the UMEM and was copied into a newly
81 * allocated page. The UMEM page was returned via the ZCA, and
82 * this new page has to be mapped at this point and has to be
83 * unmapped and returned via xdp_return_frame on completion.
86 /* Prevent double recycling of the UMEM page. Even in case this
87 * function returns false, the xdp_buff shouldn't be recycled,
88 * as it was already done in xdp_convert_zc_to_xdp_frame.
90 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
92 xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME;
94 dma_addr = dma_map_single(sq->pdev, xdptxd.data, xdptxd.len,
96 if (dma_mapping_error(sq->pdev, dma_addr)) {
97 xdp_return_frame(xdpf);
101 xdptxd.dma_addr = dma_addr;
102 xdpi.frame.xdpf = xdpf;
103 xdpi.frame.dma_addr = dma_addr;
105 if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
106 mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0)))
109 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
113 /* Driver assumes that xdp_convert_buff_to_frame returns an xdp_frame
114 * that points to the same memory region as the original xdp_buff. It
115 * allows to map the memory only once and to use the DMA_BIDIRECTIONAL
119 xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
121 dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
122 dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_TO_DEVICE);
124 xdptxd.dma_addr = dma_addr;
126 xdpi.page.page = page;
128 if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
129 mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0)))
132 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
136 /* returns true if packet was consumed by xdp */
137 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
138 struct bpf_prog *prog, struct xdp_buff *xdp)
143 act = bpf_prog_run_xdp(prog, xdp);
148 if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, page, xdp)))
150 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
153 /* When XDP enabled then page-refcnt==1 here */
154 err = xdp_do_redirect(rq->netdev, xdp, prog);
157 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
158 __set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
159 if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL)
160 mlx5e_page_dma_unmap(rq, page);
161 rq->stats->xdp_redirect++;
164 bpf_warn_invalid_xdp_action(rq->netdev, prog, act);
168 trace_xdp_exception(rq->netdev, prog, act);
171 rq->stats->xdp_drop++;
176 static u16 mlx5e_xdpsq_get_next_pi(struct mlx5e_xdpsq *sq, u16 size)
178 struct mlx5_wq_cyc *wq = &sq->wq;
179 u16 pi, contig_wqebbs;
181 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
182 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
183 if (unlikely(contig_wqebbs < size)) {
184 struct mlx5e_xdp_wqe_info *wi, *edge_wi;
186 wi = &sq->db.wqe_info[pi];
187 edge_wi = wi + contig_wqebbs;
189 /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
190 for (; wi < edge_wi; wi++) {
191 *wi = (struct mlx5e_xdp_wqe_info) {
195 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
197 sq->stats->nops += contig_wqebbs;
199 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
205 static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
207 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
208 struct mlx5e_xdpsq_stats *stats = sq->stats;
209 struct mlx5e_tx_wqe *wqe;
212 pi = mlx5e_xdpsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
213 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
214 net_prefetchw(wqe->data);
216 *session = (struct mlx5e_tx_mpwqe) {
219 .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
221 .inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on),
227 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq)
229 struct mlx5_wq_cyc *wq = &sq->wq;
230 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
231 struct mlx5_wqe_ctrl_seg *cseg = &session->wqe->ctrl;
232 u16 ds_count = session->ds_count;
233 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
234 struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi];
236 cseg->opmod_idx_opcode =
237 cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW);
238 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count);
240 wi->num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS);
241 wi->num_pkts = session->pkt_count;
243 sq->pc += wi->num_wqebbs;
245 sq->doorbell_cseg = cseg;
247 session->wqe = NULL; /* Close session */
251 MLX5E_XDP_CHECK_OK = 1,
252 MLX5E_XDP_CHECK_START_MPWQE = 2,
255 INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
257 if (unlikely(!sq->mpwqe.wqe)) {
258 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
260 /* SQ is full, ring doorbell */
261 mlx5e_xmit_xdp_doorbell(sq);
266 return MLX5E_XDP_CHECK_START_MPWQE;
269 return MLX5E_XDP_CHECK_OK;
272 INDIRECT_CALLABLE_SCOPE bool
273 mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
274 struct skb_shared_info *sinfo, int check_result);
276 INDIRECT_CALLABLE_SCOPE bool
277 mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
278 struct skb_shared_info *sinfo, int check_result)
280 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
281 struct mlx5e_xdpsq_stats *stats = sq->stats;
283 if (unlikely(sinfo)) {
284 /* MPWQE is enabled, but a multi-buffer packet is queued for
285 * transmission. MPWQE can't send fragmented packets, so close
286 * the current session and fall back to a regular WQE.
288 if (unlikely(sq->mpwqe.wqe))
289 mlx5e_xdp_mpwqe_complete(sq);
290 return mlx5e_xmit_xdp_frame(sq, xdptxd, sinfo, 0);
293 if (unlikely(xdptxd->len > sq->hw_mtu)) {
299 check_result = mlx5e_xmit_xdp_frame_check_mpwqe(sq);
300 if (unlikely(check_result < 0))
303 if (check_result == MLX5E_XDP_CHECK_START_MPWQE) {
304 /* Start the session when nothing can fail, so it's guaranteed
305 * that if there is an active session, it has at least one dseg,
306 * and it's safe to complete it at any time.
308 mlx5e_xdp_mpwqe_session_start(sq);
311 mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
313 if (unlikely(mlx5e_xdp_mpqwe_is_full(session, sq->max_sq_mpw_wqebbs)))
314 mlx5e_xdp_mpwqe_complete(sq);
320 static int mlx5e_xmit_xdp_frame_check_stop_room(struct mlx5e_xdpsq *sq, int stop_room)
322 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, stop_room))) {
323 /* SQ is full, ring doorbell */
324 mlx5e_xmit_xdp_doorbell(sq);
329 return MLX5E_XDP_CHECK_OK;
332 INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
334 return mlx5e_xmit_xdp_frame_check_stop_room(sq, 1);
337 INDIRECT_CALLABLE_SCOPE bool
338 mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
339 struct skb_shared_info *sinfo, int check_result)
341 struct mlx5_wq_cyc *wq = &sq->wq;
342 struct mlx5_wqe_ctrl_seg *cseg;
343 struct mlx5_wqe_data_seg *dseg;
344 struct mlx5_wqe_eth_seg *eseg;
345 struct mlx5e_tx_wqe *wqe;
347 dma_addr_t dma_addr = xdptxd->dma_addr;
348 u32 dma_len = xdptxd->len;
349 u16 ds_cnt, inline_hdr_sz;
354 struct mlx5e_xdpsq_stats *stats = sq->stats;
356 if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) {
361 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
362 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE)
365 /* check_result must be 0 if sinfo is passed. */
369 if (unlikely(sinfo)) {
370 ds_cnt += sinfo->nr_frags;
371 num_frags = sinfo->nr_frags;
372 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
373 /* Assuming MLX5_CAP_GEN(mdev, max_wqe_sz_sq) is big
374 * enough to hold all fragments.
376 stop_room = MLX5E_STOP_ROOM(num_wqebbs);
379 check_result = mlx5e_xmit_xdp_frame_check_stop_room(sq, stop_room);
381 if (unlikely(check_result < 0))
384 pi = mlx5e_xdpsq_get_next_pi(sq, num_wqebbs);
385 wqe = mlx5_wq_cyc_get_wqe(wq, pi);
394 /* copy the inline part if required */
395 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
396 memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start));
397 memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start),
398 MLX5E_XDP_MIN_INLINE - sizeof(eseg->inline_hdr.start));
399 dma_len -= MLX5E_XDP_MIN_INLINE;
400 dma_addr += MLX5E_XDP_MIN_INLINE;
401 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
405 /* write the dma part */
406 dseg->addr = cpu_to_be64(dma_addr);
407 dseg->byte_count = cpu_to_be32(dma_len);
409 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
411 if (unlikely(test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state))) {
412 u8 num_pkts = 1 + num_frags;
415 memset(&cseg->signature, 0, sizeof(*cseg) -
416 sizeof(cseg->opmod_idx_opcode) - sizeof(cseg->qpn_ds));
417 memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
419 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
420 dseg->lkey = sq->mkey_be;
422 for (i = 0; i < num_frags; i++) {
423 skb_frag_t *frag = &sinfo->frags[i];
426 addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
430 dseg->addr = cpu_to_be64(addr);
431 dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
432 dseg->lkey = sq->mkey_be;
435 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
437 sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
438 .num_wqebbs = num_wqebbs,
439 .num_pkts = num_pkts,
442 sq->pc += num_wqebbs;
449 sq->doorbell_cseg = cseg;
455 static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
456 struct mlx5e_xdp_wqe_info *wi,
459 struct xdp_frame_bulk *bq)
461 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
464 for (i = 0; i < wi->num_pkts; i++) {
465 struct mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
468 case MLX5E_XDP_XMIT_MODE_FRAME:
469 /* XDP_TX from the XSK RQ and XDP_REDIRECT */
470 dma_unmap_single(sq->pdev, xdpi.frame.dma_addr,
471 xdpi.frame.xdpf->len, DMA_TO_DEVICE);
472 xdp_return_frame_bulk(xdpi.frame.xdpf, bq);
474 case MLX5E_XDP_XMIT_MODE_PAGE:
475 /* XDP_TX from the regular RQ */
476 mlx5e_page_release_dynamic(xdpi.page.rq, xdpi.page.page, recycle);
478 case MLX5E_XDP_XMIT_MODE_XSK:
488 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
490 struct xdp_frame_bulk bq;
491 struct mlx5e_xdpsq *sq;
492 struct mlx5_cqe64 *cqe;
497 xdp_frame_bulk_init(&bq);
499 sq = container_of(cq, struct mlx5e_xdpsq, cq);
501 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
504 cqe = mlx5_cqwq_get_cqe(&cq->wq);
508 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
509 * otherwise a cq overrun may occur
515 struct mlx5e_xdp_wqe_info *wi;
519 mlx5_cqwq_pop(&cq->wq);
521 wqe_counter = be16_to_cpu(cqe->wqe_counter);
524 last_wqe = (sqcc == wqe_counter);
525 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
526 wi = &sq->db.wqe_info[ci];
528 sqcc += wi->num_wqebbs;
530 mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true, &bq);
533 if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
534 netdev_WARN_ONCE(sq->channel->netdev,
535 "Bad OP in XDPSQ CQE: 0x%x\n",
536 get_cqe_opcode(cqe));
537 mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
538 (struct mlx5_err_cqe *)cqe);
539 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
541 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
543 xdp_flush_frame_bulk(&bq);
546 xsk_tx_completed(sq->xsk_pool, xsk_frames);
548 sq->stats->cqes += i;
550 mlx5_cqwq_update_db_record(&cq->wq);
552 /* ensure cq space is freed before enabling more cqes */
556 return (i == MLX5E_TX_CQ_POLL_BUDGET);
559 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
561 struct xdp_frame_bulk bq;
564 xdp_frame_bulk_init(&bq);
566 rcu_read_lock(); /* need for xdp_return_frame_bulk */
568 while (sq->cc != sq->pc) {
569 struct mlx5e_xdp_wqe_info *wi;
572 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
573 wi = &sq->db.wqe_info[ci];
575 sq->cc += wi->num_wqebbs;
577 mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false, &bq);
580 xdp_flush_frame_bulk(&bq);
584 xsk_tx_completed(sq->xsk_pool, xsk_frames);
587 int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
590 struct mlx5e_priv *priv = netdev_priv(dev);
591 struct mlx5e_xdpsq *sq;
596 /* this flag is sufficient, no need to test internal sq state */
597 if (unlikely(!mlx5e_xdp_tx_is_enabled(priv)))
600 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
603 sq_num = smp_processor_id();
605 if (unlikely(sq_num >= priv->channels.num))
608 sq = &priv->channels.c[sq_num]->xdpsq;
610 for (i = 0; i < n; i++) {
611 struct xdp_frame *xdpf = frames[i];
612 struct mlx5e_xmit_data xdptxd;
613 struct mlx5e_xdp_info xdpi;
616 xdptxd.data = xdpf->data;
617 xdptxd.len = xdpf->len;
618 xdptxd.dma_addr = dma_map_single(sq->pdev, xdptxd.data,
619 xdptxd.len, DMA_TO_DEVICE);
621 if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr)))
624 xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME;
625 xdpi.frame.xdpf = xdpf;
626 xdpi.frame.dma_addr = xdptxd.dma_addr;
628 ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
629 mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0);
630 if (unlikely(!ret)) {
631 dma_unmap_single(sq->pdev, xdptxd.dma_addr,
632 xdptxd.len, DMA_TO_DEVICE);
635 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
639 if (flags & XDP_XMIT_FLUSH) {
641 mlx5e_xdp_mpwqe_complete(sq);
642 mlx5e_xmit_xdp_doorbell(sq);
648 void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
650 struct mlx5e_xdpsq *xdpsq = rq->xdpsq;
652 if (xdpsq->mpwqe.wqe)
653 mlx5e_xdp_mpwqe_complete(xdpsq);
655 mlx5e_xmit_xdp_doorbell(xdpsq);
657 if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) {
659 __clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
663 void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw)
665 sq->xmit_xdp_frame_check = is_mpw ?
666 mlx5e_xmit_xdp_frame_check_mpwqe : mlx5e_xmit_xdp_frame_check;
667 sq->xmit_xdp_frame = is_mpw ?
668 mlx5e_xmit_xdp_frame_mpwqe : mlx5e_xmit_xdp_frame;